mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
71 Commits
node-utili
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1de26e82a1 | ||
|
|
5032e508a4 | ||
|
|
f06640cb0c | ||
|
|
e2844b8c7a | ||
|
|
a6e0ebff10 | ||
|
|
4c4c31f456 | ||
|
|
ea3e48396a | ||
|
|
0dd9328870 | ||
|
|
c98d340cbd | ||
|
|
8ec655b0de | ||
|
|
f68d06ad4a | ||
|
|
0b68495a17 | ||
|
|
8e6be70ff9 | ||
|
|
d536cf8ed0 | ||
|
|
48aede9fde | ||
|
|
bd5b95dbf9 | ||
|
|
71726c8c85 | ||
|
|
32e29973d8 | ||
|
|
d0fd115747 | ||
|
|
da65808f77 | ||
|
|
29ff28cbb5 | ||
|
|
d653537ee6 | ||
|
|
c3b9c97827 | ||
|
|
75c5c75e13 | ||
|
|
b66b5d35f0 | ||
|
|
5c3a3bdcf1 | ||
|
|
46fa370ede | ||
|
|
4e8c7e6702 | ||
|
|
bc6323611b | ||
|
|
51a004c848 | ||
|
|
44bde42b63 | ||
|
|
bbffb830b9 | ||
|
|
73fecfb7c4 | ||
|
|
f4c3fdf418 | ||
|
|
2c11481856 | ||
|
|
e6deb65299 | ||
|
|
677c6a60ce | ||
|
|
a2fd3aa1eb | ||
|
|
697ecc79e4 | ||
|
|
e619ec6c41 | ||
|
|
be9e971cda | ||
|
|
a8e14ec14d | ||
|
|
00b6e3528f | ||
|
|
18e3d17c29 | ||
|
|
a962cca90d | ||
|
|
6567f01e86 | ||
|
|
c86416612e | ||
|
|
a4c09bf560 | ||
|
|
7d4ec60e2d | ||
|
|
3a1a3ff9d8 | ||
|
|
343ebb9ff9 | ||
|
|
d1c64c48cd | ||
|
|
7b1178be9f | ||
|
|
23a6d26209 | ||
|
|
cd408dd785 | ||
|
|
9950b8a55d | ||
|
|
f115e780d8 | ||
|
|
af8a7445a4 | ||
|
|
5ba11e09c7 | ||
|
|
d41981644a | ||
|
|
67d3d52de8 | ||
|
|
e9f43856a9 | ||
|
|
e655a7eb27 | ||
|
|
da52983b27 | ||
|
|
1e48cfe6f8 | ||
|
|
fb4b8746ec | ||
|
|
269f16cf73 | ||
|
|
7eeb07d96a | ||
|
|
a18425a18d | ||
|
|
0c552b667f | ||
|
|
ef0c2c1c47 |
6
.github/workflows/manifests.yaml
vendored
6
.github/workflows/manifests.yaml
vendored
@@ -7,8 +7,8 @@ jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.31.0"]
|
||||
descheduler-version: ["v0.31.0"]
|
||||
k8s-version: ["v1.32.0"]
|
||||
descheduler-version: ["v0.32.2"]
|
||||
descheduler-api: ["v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
runs-on: ubuntu-latest
|
||||
@@ -16,7 +16,7 @@ jobs:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@v1.12.0
|
||||
with:
|
||||
node_image: kindest/node:${{ matrix.k8s-version }}
|
||||
kubectl_version: ${{ matrix.k8s-version }}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
run:
|
||||
timeout: 2m
|
||||
timeout: 5m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.22.5
|
||||
FROM golang:1.23.3
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
|
||||
4
Makefile
4
Makefile
@@ -26,7 +26,7 @@ ARCHS = amd64 arm arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
|
||||
GOLANGCI_VERSION := v1.61.0
|
||||
GOLANGCI_VERSION := v1.62.2
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
GOFUMPT_VERSION := v0.7.0
|
||||
@@ -148,7 +148,7 @@ lint:
|
||||
ifndef HAS_GOLANGCI
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
||||
endif
|
||||
./_output/bin/golangci-lint run
|
||||
./_output/bin/golangci-lint run -v
|
||||
|
||||
fmt:
|
||||
ifndef HAS_GOFUMPT
|
||||
|
||||
43
README.md
43
README.md
@@ -38,10 +38,10 @@ that version's release branch, as listed below:
|
||||
|
||||
|Descheduler Version|Docs link|
|
||||
|---|---|
|
||||
|v0.32.x|[`release-1.32`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.32/README.md)|
|
||||
|v0.31.x|[`release-1.31`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.31/README.md)|
|
||||
|v0.30.x|[`release-1.30`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.30/README.md)|
|
||||
|v0.29.x|[`release-1.29`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.29/README.md)|
|
||||
|v0.28.x|[`release-1.28`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.28/README.md)|
|
||||
|
||||
The
|
||||
[`master`](https://github.com/kubernetes-sigs/descheduler/blob/master/README.md)
|
||||
@@ -93,17 +93,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.31' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.32' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.31' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.32' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.31' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.32' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
@@ -124,23 +124,26 @@ These are top level keys in the Descheduler Policy that you can use to configure
|
||||
| `maxNoOfPodsToEvictPerNode` |`int`| `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictPerNamespace` |`int`| `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictTotal` |`int`| `nil` | maximum number of pods evicted per rescheduling cycle (summed through all strategies) |
|
||||
| `metricsCollector` |`object`| `nil` | configures collection of metrics for actual resource utilization |
|
||||
| `metricsCollector.enabled` |`bool`| `false` | enables kubernetes [metrics server](https://kubernetes-sigs.github.io/metrics-server/) collection |
|
||||
|
||||
### Evictor Plugin configuration (Default Evictor)
|
||||
|
||||
The Default Evictor Plugin is used by default for filtering pods before processing them in an strategy plugin, or for applying a PreEvictionFilter of pods before eviction. You can also create your own Evictor Plugin or use the Default one provided by Descheduler. Other uses for the Evictor plugin can be to sort, filter, validate or group pods by different criteria, and that's why this is handled by a plugin and not configured in the top level config.
|
||||
|
||||
| Name |type| Default Value | Description |
|
||||
|------|----|---------------|-------------|
|
||||
|---------------------------|----|---------------|-----------------------------------------------------------------------------------------------------------------------------|
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage |
|
||||
| `evictSystemCriticalPods` |`bool`| `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase |
|
||||
|`labelSelector`|`metav1.LabelSelector`||(see [label filtering](#label-filtering))|
|
||||
|`priorityThreshold`|`priorityThreshold`||(see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|`bool`|`false`|(see [node fit filtering](#node-fit-filtering))|
|
||||
|`minReplicas`|`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold |
|
||||
|`minPodAge`|`metav1.Duration`|`0`| ignore eviction of pods with a creation time within this threshold |
|
||||
| `labelSelector` |`metav1.LabelSelector`|| (see [label filtering](#label-filtering)) |
|
||||
| `priorityThreshold` |`priorityThreshold`|| (see [priority filtering](#priority-filtering)) |
|
||||
| `nodeFit` |`bool`|`false`| (see [node fit filtering](#node-fit-filtering)) |
|
||||
| `minReplicas` |`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold |
|
||||
| `minPodAge` |`metav1.Duration`|`0`| ignore eviction of pods with a creation time within this threshold |
|
||||
| `ignorePodsWithoutPDB` |`bool`|`false`| set whether pods without PodDisruptionBudget should be evicted or ignored |
|
||||
|
||||
### Example policy
|
||||
|
||||
@@ -157,6 +160,8 @@ nodeSelector: "node=node1" # you don't need to set this, if not set all will be
|
||||
maxNoOfPodsToEvictPerNode: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictPerNamespace: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictTotal: 5000 # you don't need to set this, unlimited if not set
|
||||
metricsCollector:
|
||||
enabled: true # you don't need to set this, metrics are not collected if not set
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
@@ -276,11 +281,13 @@ If that parameter is set to `true`, the thresholds are considered as percentage
|
||||
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
|
||||
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
**NOTE:** By default node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
|
||||
actual usage metrics. Metrics-based descheduling can be enabled by setting `metricsUtilization.metricsServer` field.
|
||||
In order to have the plugin consume the metrics the metric collector needs to be configured as well.
|
||||
See `metricsCollector` field at [Top Level configuration](#top-level-configuration) for available options.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
@@ -291,6 +298,9 @@ actual usage metrics. Implementing metrics-based descheduling is currently TODO
|
||||
|`targetThresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`metricsUtilization`|object|
|
||||
|`metricsUtilization.metricsServer`|bool|
|
||||
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -310,6 +320,8 @@ profiles:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
metricsUtilization:
|
||||
metricsServer: true
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -858,7 +870,7 @@ does not exist, descheduler won't create it and will throw an error.
|
||||
|
||||
### Label filtering
|
||||
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#labelselector-v1-meta)
|
||||
to filter pods by their labels:
|
||||
|
||||
* `PodLifeTime`
|
||||
@@ -997,6 +1009,11 @@ packages that it is compiled with.
|
||||
|
||||
| Descheduler | Supported Kubernetes Version |
|
||||
|-------------|------------------------------|
|
||||
| v0.32 | v1.32 |
|
||||
| v0.31 | v1.31 |
|
||||
| v0.30 | v1.30 |
|
||||
| v0.29 | v1.29 |
|
||||
| v0.28 | v1.28 |
|
||||
| v0.27 | v1.27 |
|
||||
| v0.26 | v1.26 |
|
||||
| v0.25 | v1.25 |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.31.0
|
||||
appVersion: 0.31.0
|
||||
version: 0.32.2
|
||||
appVersion: 0.32.2
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
|
||||
@@ -64,7 +64,6 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `replicas` | The replica count for Deployment | `1` |
|
||||
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||
|
||||
@@ -24,6 +24,9 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
@@ -33,4 +36,9 @@ rules:
|
||||
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
{{- end }}
|
||||
{{- if and .Values.deschedulerPolicy .Values.deschedulerPolicy.metricsCollector .Values.deschedulerPolicy.metricsCollector.enabled }}
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list"]
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -15,10 +15,10 @@ spec:
|
||||
{{- if .Values.startingDeadlineSeconds }}
|
||||
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.successfulJobsHistoryLimit }}
|
||||
{{- if ne .Values.successfulJobsHistoryLimit nil }}
|
||||
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.failedJobsHistoryLimit }}
|
||||
{{- if ne .Values.failedJobsHistoryLimit nil }}
|
||||
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.timeZone }}
|
||||
@@ -89,6 +89,8 @@ spec:
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 16 }}
|
||||
ports:
|
||||
{{- toYaml .Values.ports | nindent 16 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 16 }}
|
||||
{{- if .Values.securityContext }}
|
||||
|
||||
@@ -61,8 +61,7 @@ spec:
|
||||
{{- end }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
{{- toYaml .Values.ports | nindent 12 }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
resources:
|
||||
|
||||
@@ -18,9 +18,13 @@ resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -92,6 +96,8 @@ deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# metricsCollector:
|
||||
# enabled: true
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
# evictDaemonSetPods: true
|
||||
|
||||
@@ -18,17 +18,28 @@ limitations under the License.
|
||||
package options
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
componentbaseoptions "k8s.io/component-base/config/options"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
@@ -42,9 +53,15 @@ type DeschedulerServer struct {
|
||||
|
||||
Client clientset.Interface
|
||||
EventClient clientset.Interface
|
||||
MetricsClient metricsclient.Interface
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
SecureServingInfo *apiserver.SecureServingInfo
|
||||
DisableMetrics bool
|
||||
EnableHTTP2 bool
|
||||
// FeatureGates enabled by the user
|
||||
FeatureGates map[string]bool
|
||||
// DefaultFeatureGates for internal accessing so unit tests can enable/disable specific features
|
||||
DefaultFeatureGates featuregate.FeatureGate
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
@@ -102,8 +119,31 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
|
||||
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
|
||||
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
|
||||
fs.Var(cliflag.NewMapStringBool(&rs.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
||||
"Options are:\n"+strings.Join(features.DefaultMutableFeatureGate.KnownFeatures(), "\n"))
|
||||
|
||||
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
||||
|
||||
rs.SecureServing.AddFlags(fs)
|
||||
}
|
||||
|
||||
func (rs *DeschedulerServer) Apply() error {
|
||||
err := features.DefaultMutableFeatureGate.SetFromMap(rs.FeatureGates)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.DefaultFeatureGates = features.DefaultMutableFeatureGate
|
||||
|
||||
// loopbackClientConfig is a config for a privileged loopback connection
|
||||
var loopbackClientConfig *restclient.Config
|
||||
var secureServing *apiserver.SecureServingInfo
|
||||
if err := rs.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
return err
|
||||
}
|
||||
|
||||
secureServing.DisableHTTP2 = !rs.EnableHTTP2
|
||||
rs.SecureServingInfo = secureServing
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,19 +23,16 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/component-base/logs"
|
||||
logsapi "k8s.io/component-base/logs/api/v1"
|
||||
@@ -67,40 +64,16 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// loopbackClientConfig is a config for a privileged loopback connection
|
||||
var loopbackClientConfig *restclient.Config
|
||||
var secureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
if err = s.Apply(); err != nil {
|
||||
klog.ErrorS(err, "failed to apply")
|
||||
return err
|
||||
}
|
||||
|
||||
secureServing.DisableHTTP2 = !s.EnableHTTP2
|
||||
|
||||
ctx, done := signal.NotifyContext(cmd.Context(), syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
if !s.DisableMetrics {
|
||||
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||
}
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := secureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
if err = Run(cmd.Context(), s); err != nil {
|
||||
klog.ErrorS(err, "failed to run descheduler server")
|
||||
return err
|
||||
}
|
||||
|
||||
if err = Run(ctx, s); err != nil {
|
||||
klog.ErrorS(err, "descheduler server")
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -114,8 +87,23 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
err := tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
|
||||
ctx, done := signal.NotifyContext(rootCtx, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
if !rs.DisableMetrics {
|
||||
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||
}
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to create tracer provider")
|
||||
}
|
||||
@@ -124,5 +112,14 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
// increase the fake watch channel so the dry-run mode can be run
|
||||
// over a cluster with thousands of pods
|
||||
watch.DefaultChanSize = 100000
|
||||
return descheduler.Run(ctx, rs)
|
||||
err = descheduler.Run(ctx, rs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,13 +23,17 @@ descheduler [flags]
|
||||
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||
--dry-run Execute descheduler in dry run mode.
|
||||
--enable-http2 If http/2 should be enabled for the metrics and health check
|
||||
--feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
|
||||
AllAlpha=true|false (ALPHA - default=false)
|
||||
AllBeta=true|false (BETA - default=false)
|
||||
EvictionsInBackground=true|false (ALPHA - default=false)
|
||||
-h, --help help for descheduler
|
||||
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
|
||||
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
|
||||
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
|
||||
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 2m17s)
|
||||
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. (default 1m47s)
|
||||
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
|
||||
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases'. (default "leases")
|
||||
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
|
||||
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
## Required Tools
|
||||
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
- [Go 1.16+](https://golang.org/dl/)
|
||||
- [Go 1.23+](https://golang.org/dl/)
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||
- [kind v0.10.0+](https://kind.sigs.k8s.io/)
|
||||
|
||||
@@ -4,12 +4,13 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.32.2 | registry.k8s.io/descheduler/descheduler:v0.32.2 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.32.1 | registry.k8s.io/descheduler/descheduler:v0.32.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
|
||||
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
|
||||
starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||
|
||||
97
go.mod
97
go.mod
@@ -1,6 +1,6 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.22.5
|
||||
go 1.23.3
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
@@ -13,20 +13,25 @@ require (
|
||||
go.opentelemetry.io/otel/sdk v1.28.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
google.golang.org/grpc v1.65.0
|
||||
k8s.io/api v0.31.0
|
||||
k8s.io/apimachinery v0.31.0
|
||||
k8s.io/apiserver v0.31.0
|
||||
k8s.io/client-go v0.31.0
|
||||
k8s.io/code-generator v0.31.0
|
||||
k8s.io/component-base v0.31.0
|
||||
k8s.io/component-helpers v0.31.0
|
||||
k8s.io/api v0.32.0
|
||||
k8s.io/apimachinery v0.32.0
|
||||
k8s.io/apiserver v0.32.0
|
||||
k8s.io/client-go v0.32.0
|
||||
k8s.io/code-generator v0.32.0
|
||||
k8s.io/component-base v0.32.0
|
||||
k8s.io/component-helpers v0.32.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
k8s.io/metrics v0.32.0
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758
|
||||
kubevirt.io/api v1.3.0
|
||||
kubevirt.io/client-go v1.3.0
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.18.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
@@ -43,23 +48,27 @@ require (
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-kit/kit v0.13.0 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/glog v1.2.1 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/cel-go v0.20.1 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/cel-go v0.22.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
@@ -68,49 +77,55 @@ require (
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/openshift/custom-resource-status v1.1.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.14 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.14 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
|
||||
k8s.io/kms v0.31.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.30.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||
k8s.io/kms v0.32.0 // indirect
|
||||
k8s.io/kube-openapi v0.30.0 // indirect
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
)
|
||||
|
||||
replace go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0
|
||||
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f
|
||||
|
||||
replace golang.org/x/net => golang.org/x/net v0.33.0
|
||||
|
||||
replace golang.org/x/crypto => golang.org/x/crypto v0.31.0
|
||||
|
||||
509
go.sum
509
go.sum
@@ -1,11 +1,13 @@
|
||||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo=
|
||||
cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
@@ -16,12 +18,20 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
|
||||
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
@@ -33,61 +43,121 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
|
||||
github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
|
||||
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
|
||||
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
|
||||
github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g=
|
||||
github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
@@ -98,18 +168,22 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
@@ -117,21 +191,79 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
||||
github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
|
||||
github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
|
||||
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
|
||||
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
|
||||
github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
|
||||
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
|
||||
github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
|
||||
github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
|
||||
github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||
github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
|
||||
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
|
||||
github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
|
||||
github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
|
||||
github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
|
||||
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
|
||||
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
|
||||
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
|
||||
github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
|
||||
github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
|
||||
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
|
||||
github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
|
||||
github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
|
||||
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
|
||||
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4=
|
||||
github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
@@ -139,6 +271,7 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
@@ -153,49 +286,56 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
|
||||
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||
go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0=
|
||||
go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg=
|
||||
go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg=
|
||||
go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
|
||||
go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow=
|
||||
go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
|
||||
go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE=
|
||||
go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
@@ -216,113 +356,268 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
|
||||
k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
|
||||
k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
|
||||
k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY=
|
||||
k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk=
|
||||
k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
|
||||
k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
|
||||
k8s.io/code-generator v0.31.0 h1:w607nrMi1KeDKB3/F/J4lIoOgAwc+gV9ZKew4XRfMp8=
|
||||
k8s.io/code-generator v0.31.0/go.mod h1:84y4w3es8rOJOUUP1rLsIiGlO1JuEaPFXQPA9e/K6U0=
|
||||
k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs=
|
||||
k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo=
|
||||
k8s.io/component-helpers v0.31.0 h1:jyRUKA+GX+q19o81k4x94imjNICn+e6Gzi6T89va1/A=
|
||||
k8s.io/component-helpers v0.31.0/go.mod h1:MrNIvT4iB7wXIseYSWfHUJB/aNUiFvbilp4qDfBQi6s=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
|
||||
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
|
||||
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
|
||||
k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs=
|
||||
k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y=
|
||||
k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
|
||||
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs=
|
||||
k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag=
|
||||
k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
|
||||
k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
|
||||
k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/code-generator v0.32.0 h1:s0lNN8VSWny8LBz5t5iy7MCdgwdOhdg7vAGVxvS+VWU=
|
||||
k8s.io/code-generator v0.32.0/go.mod h1:b7Q7KMZkvsYFy72A79QYjiv4aTz3GvW0f1T3UfhFq4s=
|
||||
k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU=
|
||||
k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM=
|
||||
k8s.io/component-helpers v0.32.0 h1:pQEEBmRt3pDJJX98cQvZshDgJFeKRM4YtYkMmfOlczw=
|
||||
k8s.io/component-helpers v0.32.0/go.mod h1:9RuClQatbClcokXOcDWSzFKQm1huIf0FzQlPRpizlMc=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.31.0 h1:KchILPfB1ZE+ka7223mpU5zeFNkmb45jl7RHnlImUaI=
|
||||
k8s.io/kms v0.31.0/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
k8s.io/kms v0.32.0 h1:jwOfunHIrcdYl5FRcA+uUKKtg6qiqoPCwmS2T3XTYL4=
|
||||
k8s.io/kms v0.32.0/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM=
|
||||
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM=
|
||||
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro=
|
||||
k8s.io/metrics v0.32.0 h1:70qJ3ZS/9DrtH0UA0NVBI6gW2ip2GAn9e7NtoKERpns=
|
||||
k8s.io/metrics v0.32.0/go.mod h1:skdg9pDjVjCPIQqmc5rBzDL4noY64ORhKu9KCPv1+QI=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
kubevirt.io/api v1.3.0 h1:9sGElMmnRU50pGED+MPPD2OwQl4S5lvjCUjm+t0mI90=
|
||||
kubevirt.io/api v1.3.0/go.mod h1:e6LkElYZZm8NcP2gKlFVHZS9pgNhIARHIjSBSfeiP1s=
|
||||
kubevirt.io/client-go v1.3.0 h1:/HKn4exzwsctEVTwVtEFaeT9D2v4TgWr2SmxITVEZ/4=
|
||||
kubevirt.io/client-go v1.3.0/go.mod h1:qmcJZvUjbmggY1pp7irO3zesBJj7wwGIWAdnYEoh3yc=
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1 h1:chmxuINvA7TPmIe8LpShCoKPxoegcKjkG9tYboFBs/U=
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1/go.mod h1:8mwrkZIdy8j/LmCyKt2wFXbiMavLUIqDaegaIF67CZs=
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc=
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
|
||||
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
go::verify_version() {
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.20|go1.21|go1.22') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.21|go1.22|go1.23') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -70,7 +70,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
ret=1
|
||||
fi
|
||||
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" -x "README*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
echo "Your vendored results are different:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Vendor Verify failed." >&2
|
||||
|
||||
@@ -22,6 +22,9 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["create", "update"]
|
||||
@@ -29,6 +32,9 @@ rules:
|
||||
resources: ["leases"]
|
||||
resourceNames: ["descheduler"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["nodes", "pods"]
|
||||
verbs: ["get", "list"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.31.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.2
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.31.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.31.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.2
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -41,6 +41,13 @@ type DeschedulerPolicy struct {
|
||||
|
||||
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
|
||||
MaxNoOfPodsToEvictTotal *uint
|
||||
|
||||
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
|
||||
// Default is false.
|
||||
EvictionFailureEventNotification *bool
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
MetricsCollector MetricsCollector
|
||||
}
|
||||
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
@@ -84,3 +91,10 @@ type PluginSet struct {
|
||||
Enabled []string
|
||||
Disabled []string
|
||||
}
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from kubernetes metrics.
|
||||
// Later, the collection can be extended to other providers.
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
@@ -40,6 +40,13 @@ type DeschedulerPolicy struct {
|
||||
|
||||
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
|
||||
MaxNoOfPodsToEvictTotal *uint `json:"maxNoOfPodsToEvictTotal,omitempty"`
|
||||
|
||||
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
|
||||
// Default is false.
|
||||
EvictionFailureEventNotification *bool
|
||||
|
||||
// MetricsCollector configures collection of metrics for actual resource utilization
|
||||
MetricsCollector MetricsCollector `json:"metricsCollector,omitempty"`
|
||||
}
|
||||
|
||||
type DeschedulerProfile struct {
|
||||
@@ -66,3 +73,10 @@ type PluginSet struct {
|
||||
Enabled []string `json:"enabled"`
|
||||
Disabled []string `json:"disabled"`
|
||||
}
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from kubernetes metrics.
|
||||
// Later, the collection can be extended to other providers.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
}
|
||||
|
||||
40
pkg/api/v1alpha2/zz_generated.conversion.go
generated
40
pkg/api/v1alpha2/zz_generated.conversion.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -46,6 +46,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*MetricsCollector)(nil), (*api.MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(a.(*MetricsCollector), b.(*api.MetricsCollector), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.MetricsCollector)(nil), (*MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(a.(*api.MetricsCollector), b.(*MetricsCollector), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope)
|
||||
}); err != nil {
|
||||
@@ -105,6 +115,10 @@ func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
|
||||
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
if err := Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(&in.MetricsCollector, &out.MetricsCollector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -124,6 +138,10 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.Des
|
||||
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
if err := Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(&in.MetricsCollector, &out.MetricsCollector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -175,6 +193,26 @@ func Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in *api.Desch
|
||||
return autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector is an autogenerated conversion function.
|
||||
func Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
|
||||
return autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
|
||||
|
||||
24
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
24
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -56,6 +56,12 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictionFailureEventNotification != nil {
|
||||
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
out.MetricsCollector = in.MetricsCollector
|
||||
return
|
||||
}
|
||||
|
||||
@@ -101,6 +107,22 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
|
||||
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsCollector)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
|
||||
*out = *in
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
24
pkg/api/zz_generated.deepcopy.go
generated
24
pkg/api/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -56,6 +56,12 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictionFailureEventNotification != nil {
|
||||
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
out.MetricsCollector = in.MetricsCollector
|
||||
return
|
||||
}
|
||||
|
||||
@@ -101,6 +107,22 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
|
||||
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsCollector)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -19,16 +19,16 @@ package client
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
|
||||
// Ensure to load all auth plugins.
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (clientset.Interface, error) {
|
||||
func createConfig(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (*rest.Config, error) {
|
||||
var cfg *rest.Config
|
||||
if len(clientConnection.Kubeconfig) != 0 {
|
||||
master, err := GetMasterFromKubeconfig(clientConnection.Kubeconfig)
|
||||
@@ -56,9 +56,28 @@ func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfigura
|
||||
cfg = rest.AddUserAgent(cfg, userAgt)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (clientset.Interface, error) {
|
||||
cfg, err := createConfig(clientConnection, userAgt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create config: %v", err)
|
||||
}
|
||||
|
||||
return clientset.NewForConfig(cfg)
|
||||
}
|
||||
|
||||
func CreateMetricsClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (metricsclient.Interface, error) {
|
||||
cfg, err := createConfig(clientConnection, userAgt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create config: %v", err)
|
||||
}
|
||||
|
||||
// Create the metrics clientset to access the metrics.k8s.io API
|
||||
return metricsclient.NewForConfig(cfg)
|
||||
}
|
||||
|
||||
func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||
config, err := clientcmd.LoadFromFile(filename)
|
||||
if err != nil {
|
||||
|
||||
@@ -23,43 +23,43 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
frameworkprofile "sigs.k8s.io/descheduler/pkg/framework/profile"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
|
||||
@@ -78,6 +78,7 @@ type descheduler struct {
|
||||
eventRecorder events.EventRecorder
|
||||
podEvictor *evictions.PodEvictor
|
||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
}
|
||||
|
||||
type informerResources struct {
|
||||
@@ -124,7 +125,8 @@ func (ir *informerResources) CopyTo(fakeClient *fakeclientset.Clientset, newFact
|
||||
return nil
|
||||
}
|
||||
|
||||
func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory,
|
||||
) (*descheduler, error) {
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
ir := newInformerResources(sharedInformerFactory)
|
||||
@@ -133,24 +135,47 @@ func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.Desche
|
||||
// Future work could be to let each plugin declare what type of resources it needs; that way dry runs would stay
|
||||
// consistent with the real runs without having to keep the list here in sync.
|
||||
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
|
||||
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses")) // Used by the defaultevictor plugin
|
||||
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"), // Used by the defaultevictor plugin
|
||||
policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), // Used by the defaultevictor plugin
|
||||
|
||||
) // Used by the defaultevictor plugin
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
nil,
|
||||
podEvictor, err := evictions.NewPodEvictor(
|
||||
ctx,
|
||||
rs.Client,
|
||||
eventRecorder,
|
||||
podInformer,
|
||||
rs.DefaultFeatureGates,
|
||||
evictions.NewOptions().
|
||||
WithPolicyGroupVersion(evictionPolicyGroupVersion).
|
||||
WithMaxPodsToEvictPerNode(deschedulerPolicy.MaxNoOfPodsToEvictPerNode).
|
||||
WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace).
|
||||
WithMaxPodsToEvictTotal(deschedulerPolicy.MaxNoOfPodsToEvictTotal).
|
||||
WithEvictionFailureEventNotification(deschedulerPolicy.EvictionFailureEventNotification).
|
||||
WithDryRun(rs.DryRun).
|
||||
WithMetricsEnabled(!rs.DisableMetrics),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var metricsCollector *metricscollector.MetricsCollector
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
nodeSelector := labels.Everything()
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeSelector = sel
|
||||
}
|
||||
metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
|
||||
}
|
||||
|
||||
return &descheduler{
|
||||
rs: rs,
|
||||
@@ -161,6 +186,7 @@ func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.Desche
|
||||
eventRecorder: eventRecorder,
|
||||
podEvictor: podEvictor,
|
||||
podEvictionReactionFnc: podEvictionReactionFnc,
|
||||
metricsCollector: metricsCollector,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -219,7 +245,7 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
|
||||
d.runProfiles(ctx, client, nodes)
|
||||
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", d.podEvictor.TotalEvicted())
|
||||
klog.V(1).InfoS("Number of evictions/requests", "totalEvicted", d.podEvictor.TotalEvicted(), "evictionRequests", d.podEvictor.TotalEvictionRequests())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -240,6 +266,7 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
|
||||
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
|
||||
frameworkprofile.WithPodEvictor(d.podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
|
||||
frameworkprofile.WithMetricsCollector(d.metricsCollector),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
|
||||
@@ -304,6 +331,14 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
metricsClient, err := client.CreateMetricsClient(clientConnection, "descheduler")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.MetricsClient = metricsClient
|
||||
}
|
||||
|
||||
runFn := func() error {
|
||||
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
|
||||
}
|
||||
@@ -400,7 +435,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
descheduler, err := newDescheduler(rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
|
||||
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
|
||||
if err != nil {
|
||||
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
return err
|
||||
@@ -410,6 +445,21 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
descheduler.podEvictor.WaitForEventHandlersSync(ctx)
|
||||
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
go func() {
|
||||
klog.V(2).Infof("Starting metrics collector")
|
||||
descheduler.metricsCollector.Run(ctx)
|
||||
klog.V(2).Infof("Stopped metrics collector")
|
||||
}()
|
||||
klog.V(2).Infof("Waiting for metrics collector to sync")
|
||||
if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(context.Context) (done bool, err error) {
|
||||
return descheduler.metricsCollector.HasSynced(), nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to wait for metrics collector to sync: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
wait.NonSlidingUntil(func() {
|
||||
// A next context is created here intentionally to avoid nesting the spans via context.
|
||||
|
||||
@@ -2,25 +2,38 @@ package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
apiversion "k8s.io/apimachinery/pkg/version"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
@@ -28,11 +41,34 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
var (
|
||||
podEvictionError = errors.New("PodEvictionError")
|
||||
tooManyRequestsError = &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusTooManyRequests,
|
||||
Reason: metav1.StatusReasonTooManyRequests,
|
||||
Message: "admission webhook \"virt-launcher-eviction-interceptor.kubevirt.io\" denied the request: Eviction triggered evacuation of VMI",
|
||||
},
|
||||
}
|
||||
nodesgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
podsgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "pods"}
|
||||
)
|
||||
|
||||
func initFeatureGates() featuregate.FeatureGate {
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
return featureGates
|
||||
}
|
||||
|
||||
func initPluginRegistry() {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(nodeutilization.LowNodeUtilizationPluginName, nodeutilization.NewLowNodeUtilization, &nodeutilization.LowNodeUtilization{}, &nodeutilization.LowNodeUtilizationArgs{}, nodeutilization.ValidateLowNodeUtilizationArgs, nodeutilization.SetDefaults_LowNodeUtilizationArgs, pluginregistry.PluginRegistry)
|
||||
}
|
||||
|
||||
func removePodsViolatingNodeTaintsPolicy() *api.DeschedulerPolicy {
|
||||
@@ -99,7 +135,45 @@ func removeDuplicatesPolicy() *api.DeschedulerPolicy {
|
||||
}
|
||||
}
|
||||
|
||||
func initDescheduler(t *testing.T, ctx context.Context, internalDeschedulerPolicy *api.DeschedulerPolicy, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
|
||||
func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThresholds, metricsEnabled bool) *api.DeschedulerPolicy {
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "Profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: thresholds,
|
||||
TargetThresholds: targetThresholds,
|
||||
MetricsUtilization: nodeutilization.MetricsUtilization{
|
||||
MetricsServer: metricsEnabled,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{
|
||||
defaultevictor.PluginName,
|
||||
},
|
||||
},
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{
|
||||
nodeutilization.LowNodeUtilizationPluginName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
|
||||
client := fakeclientset.NewSimpleClientset(objects...)
|
||||
eventClient := fakeclientset.NewSimpleClientset(objects...)
|
||||
|
||||
@@ -109,11 +183,13 @@ func initDescheduler(t *testing.T, ctx context.Context, internalDeschedulerPolic
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = featureGates
|
||||
rs.MetricsClient = metricsClient
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
|
||||
descheduler, err := newDescheduler(rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory)
|
||||
descheduler, err := newDescheduler(ctx, rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory)
|
||||
if err != nil {
|
||||
eventBroadcaster.Shutdown()
|
||||
t.Fatalf("Unable to create a descheduler instance: %v", err)
|
||||
@@ -144,6 +220,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -167,7 +244,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
|
||||
if err := RunDeschedulerStrategies(ctx, rs, removePodsViolatingNodeTaintsPolicy(), "v1"); err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
@@ -206,6 +283,7 @@ func TestDuplicate(t *testing.T) {
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -217,7 +295,7 @@ func TestDuplicate(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
|
||||
if err := RunDeschedulerStrategies(ctx, rs, removeDuplicatesPolicy(), "v1"); err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
@@ -245,6 +323,7 @@ func TestRootCancel(t *testing.T) {
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
errChan := make(chan error, 1)
|
||||
defer close(errChan)
|
||||
|
||||
@@ -280,6 +359,7 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DeschedulingInterval = 0
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
errChan := make(chan error, 1)
|
||||
defer close(errChan)
|
||||
|
||||
@@ -358,7 +438,7 @@ func TestValidateVersionCompatibility(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func podEvictionReactionTestingFnc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||
func podEvictionReactionTestingFnc(evictedPods *[]string, isEvictionsInBackground func(podName string) bool, evictionErr error) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
@@ -366,7 +446,14 @@ func podEvictionReactionTestingFnc(evictedPods *[]string) func(action core.Actio
|
||||
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||
if isEvictionsInBackground != nil && isEvictionsInBackground(eviction.GetName()) {
|
||||
return true, nil, tooManyRequestsError
|
||||
}
|
||||
if evictionErr != nil {
|
||||
return true, nil, evictionErr
|
||||
}
|
||||
*evictedPods = append(*evictedPods, eviction.GetName())
|
||||
return true, nil, nil
|
||||
}
|
||||
}
|
||||
return false, nil, nil // fallback to the default reactor
|
||||
@@ -402,15 +489,15 @@ func TestPodEvictorReset(t *testing.T) {
|
||||
|
||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
rs, descheduler, client := initDescheduler(t, ctxCancel, internalDeschedulerPolicy, node1, node2, p1, p2)
|
||||
rs, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, node1, node2, p1, p2)
|
||||
defer cancel()
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods)
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
|
||||
}
|
||||
|
||||
// a single pod eviction expected
|
||||
@@ -453,6 +540,138 @@ func TestPodEvictorReset(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkTotals(t *testing.T, ctx context.Context, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
|
||||
if total := descheduler.podEvictor.TotalEvictionRequests(); total != totalEvictionRequests {
|
||||
t.Fatalf("Expected %v total eviction requests, got %v instead", totalEvictionRequests, total)
|
||||
}
|
||||
if total := descheduler.podEvictor.TotalEvicted(); total != totalEvicted {
|
||||
t.Fatalf("Expected %v total evictions, got %v instead", totalEvicted, total)
|
||||
}
|
||||
t.Logf("Total evictions: %v, total eviction requests: %v, total evictions and eviction requests: %v", totalEvicted, totalEvictionRequests, totalEvicted+totalEvictionRequests)
|
||||
}
|
||||
|
||||
func runDeschedulingCycleAndCheckTotals(t *testing.T, ctx context.Context, nodes []*v1.Node, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
checkTotals(t, ctx, descheduler, totalEvictionRequests, totalEvicted)
|
||||
}
|
||||
|
||||
func TestEvictionRequestsCache(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
}
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
evictions.EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, updatePod)
|
||||
|
||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, node1, node2, p1, p2, p3, p4)
|
||||
defer cancel()
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
// No evicted pod is actually deleted on purpose so the test can run the descheduling cycle repeatedly
|
||||
// without recreating the pods.
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background got initiated")
|
||||
p2.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Another eviction in background got initiated")
|
||||
p1.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
|
||||
if _, err := client.CoreV1().Pods(p1.Namespace).Update(context.TODO(), p1, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background completed")
|
||||
if err := client.CoreV1().Pods(p1.Namespace).Delete(context.TODO(), p1.Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 2)
|
||||
|
||||
klog.Infof("Scenario: A new pod without eviction in background added")
|
||||
if _, err := client.CoreV1().Pods(p5.Namespace).Create(context.TODO(), p5, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("unable to create a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions increased after running a descheduling cycle")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background canceled => eviction in progress annotation removed")
|
||||
delete(p2.Annotations, evictions.EvictionInProgressAnnotationKey)
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
checkTotals(t, ctx, descheduler, 0, 3)
|
||||
|
||||
klog.Infof("Scenario: Re-run the descheduling cycle to re-request eviction in background")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background completed with a pod in completed state")
|
||||
p2.Status.Phase = v1.PodSucceeded
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 0, 3)
|
||||
}
|
||||
|
||||
func TestDeschedulingLimits(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
@@ -496,6 +715,13 @@ func TestDeschedulingLimits(t *testing.T) {
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
evictions.EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
@@ -503,17 +729,33 @@ func TestDeschedulingLimits(t *testing.T) {
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, tc.policy, node1, node2)
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, node1, node2)
|
||||
defer cancel()
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
pods := []*v1.Pod{
|
||||
test.BuildTestPod("p1", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p2", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground),
|
||||
test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground),
|
||||
test.BuildTestPod("p3", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p4", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p5", 100, 0, node1.Name, updatePod),
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
rand.Shuffle(len(pods), func(i, j int) { pods[i], pods[j] = pods[j], pods[i] })
|
||||
func() {
|
||||
for j := 0; j < 5; j++ {
|
||||
idx := j
|
||||
if _, err := client.CoreV1().Pods(pods[idx].Namespace).Create(context.TODO(), pods[idx], metav1.CreateOptions{}); err != nil {
|
||||
@@ -527,15 +769,97 @@ func TestDeschedulingLimits(t *testing.T) {
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
totalERs := descheduler.podEvictor.TotalEvictionRequests()
|
||||
totalEs := descheduler.podEvictor.TotalEvicted()
|
||||
if totalERs+totalEs > tc.limit {
|
||||
t.Fatalf("Expected %v evictions and eviction requests in total, got %v instead", tc.limit, totalERs+totalEs)
|
||||
}
|
||||
t.Logf("Total evictions and eviction requests: %v (er=%v, e=%v)", totalERs+totalEs, totalERs, totalEs)
|
||||
}()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAwareDescheduling(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 300, 0, node1.Name, updatePod)
|
||||
p2 := test.BuildTestPod("p2", 300, 0, node1.Name, updatePod)
|
||||
p3 := test.BuildTestPod("p3", 300, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 300, 0, node1.Name, updatePod)
|
||||
p5 := test.BuildTestPod("p5", 300, 0, node1.Name, updatePod)
|
||||
|
||||
nodemetricses := []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics("n1", 2400, 3000),
|
||||
test.BuildNodeMetrics("n2", 400, 0),
|
||||
}
|
||||
|
||||
podmetricses := []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 400, 0),
|
||||
test.BuildPodMetrics("p2", 400, 0),
|
||||
test.BuildPodMetrics("p3", 400, 0),
|
||||
test.BuildPodMetrics("p4", 400, 0),
|
||||
test.BuildPodMetrics("p5", 400, 0),
|
||||
}
|
||||
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
for _, nodemetrics := range nodemetricses {
|
||||
metricsClientset.Tracker().Create(nodesgvr, nodemetrics, "")
|
||||
}
|
||||
for _, podmetrics := range podmetricses {
|
||||
metricsClientset.Tracker().Create(podsgvr, podmetrics, podmetrics.Namespace)
|
||||
}
|
||||
|
||||
policy := lowNodeUtilizationPolicy(
|
||||
api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
true, // enabled metrics utilization
|
||||
)
|
||||
policy.MetricsCollector.Enabled = true
|
||||
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
_, descheduler, _ := initDescheduler(
|
||||
t,
|
||||
ctxCancel,
|
||||
initFeatureGates(),
|
||||
policy,
|
||||
metricsClientset,
|
||||
node1, node2, p1, p2, p3, p4, p5)
|
||||
defer cancel()
|
||||
|
||||
// This needs to be run since the metrics collector is started
|
||||
// after newDescheduler in RunDeschedulerStrategies.
|
||||
descheduler.metricsCollector.Collect(ctx)
|
||||
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
totalEs := descheduler.podEvictor.TotalEvicted()
|
||||
if totalEs > tc.limit {
|
||||
t.Fatalf("Expected %v evictions in total, got %v instead", tc.limit, totalEs)
|
||||
if totalEs != 2 {
|
||||
t.Fatalf("Expected %v evictions in total, got %v instead", 2, totalEs)
|
||||
}
|
||||
t.Logf("Total evictions: %v", totalEs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,9 @@ package evictions
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
@@ -27,15 +29,176 @@ import (
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
var (
|
||||
assumedEvictionRequestTimeoutSeconds uint = 10 * 60 // 10 minutes
|
||||
evictionRequestsCacheResyncPeriod time.Duration = 10 * time.Minute
|
||||
// syncedPollPeriod controls how often you look at the status of your sync funcs
|
||||
syncedPollPeriod = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type evictionRequestItem struct {
|
||||
podName, podNamespace, podNodeName string
|
||||
evictionAssumed bool
|
||||
assumedTimestamp metav1.Time
|
||||
}
|
||||
|
||||
type evictionRequestsCache struct {
|
||||
mu sync.RWMutex
|
||||
requests map[string]evictionRequestItem
|
||||
requestsPerNode map[string]uint
|
||||
requestsPerNamespace map[string]uint
|
||||
requestsTotal uint
|
||||
assumedRequestTimeoutSeconds uint
|
||||
}
|
||||
|
||||
func newEvictionRequestsCache(assumedRequestTimeoutSeconds uint) *evictionRequestsCache {
|
||||
return &evictionRequestsCache{
|
||||
requests: make(map[string]evictionRequestItem),
|
||||
requestsPerNode: make(map[string]uint),
|
||||
requestsPerNamespace: make(map[string]uint),
|
||||
assumedRequestTimeoutSeconds: assumedRequestTimeoutSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) run(ctx context.Context) {
|
||||
wait.UntilWithContext(ctx, erc.cleanCache, evictionRequestsCacheResyncPeriod)
|
||||
}
|
||||
|
||||
// cleanCache removes all assumed entries that has not been confirmed
|
||||
// for more than a specified timeout
|
||||
func (erc *evictionRequestsCache) cleanCache(ctx context.Context) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
klog.V(4).Infof("Cleaning cache of assumed eviction requests in background")
|
||||
for uid, item := range erc.requests {
|
||||
if item.evictionAssumed {
|
||||
requestAgeSeconds := uint(metav1.Now().Sub(item.assumedTimestamp.Local()).Seconds())
|
||||
if requestAgeSeconds > erc.assumedRequestTimeoutSeconds {
|
||||
klog.V(4).InfoS("Assumed eviction request in background timed out, deleting", "timeout", erc.assumedRequestTimeoutSeconds, "podNamespace", item.podNamespace, "podName", item.podName)
|
||||
erc.deleteItem(uid)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsPerNode(nodeName string) uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsPerNode[nodeName]
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsPerNamespace(ns string) uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsPerNamespace[ns]
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsTotal() uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsTotal
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) TotalEvictionRequests() uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return uint(len(erc.requests))
|
||||
}
|
||||
|
||||
// getPodKey returns the string key of a pod.
|
||||
func getPodKey(pod *v1.Pod) string {
|
||||
uid := string(pod.UID)
|
||||
// Every pod is expected to have the UID set.
|
||||
// When the descheduling framework is used for simulation
|
||||
// user created workload may forget to set the UID.
|
||||
if len(uid) == 0 {
|
||||
panic(fmt.Errorf("cannot get cache key for %v/%v pod with empty UID", pod.Namespace, pod.Name))
|
||||
}
|
||||
return uid
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) addPod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
return
|
||||
}
|
||||
erc.requests[uid] = evictionRequestItem{podNamespace: pod.Namespace, podName: pod.Name, podNodeName: pod.Spec.NodeName}
|
||||
erc.requestsPerNode[pod.Spec.NodeName]++
|
||||
erc.requestsPerNamespace[pod.Namespace]++
|
||||
erc.requestsTotal++
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) assumePod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
return
|
||||
}
|
||||
erc.requests[uid] = evictionRequestItem{
|
||||
podNamespace: pod.Namespace,
|
||||
podName: pod.Name,
|
||||
podNodeName: pod.Spec.NodeName,
|
||||
evictionAssumed: true,
|
||||
assumedTimestamp: metav1.NewTime(time.Now()),
|
||||
}
|
||||
erc.requestsPerNode[pod.Spec.NodeName]++
|
||||
erc.requestsPerNamespace[pod.Namespace]++
|
||||
erc.requestsTotal++
|
||||
}
|
||||
|
||||
// no locking, expected to be invoked from protected methods only
|
||||
func (erc *evictionRequestsCache) deleteItem(uid string) {
|
||||
erc.requestsPerNode[erc.requests[uid].podNodeName]--
|
||||
if erc.requestsPerNode[erc.requests[uid].podNodeName] == 0 {
|
||||
delete(erc.requestsPerNode, erc.requests[uid].podNodeName)
|
||||
}
|
||||
erc.requestsPerNamespace[erc.requests[uid].podNamespace]--
|
||||
if erc.requestsPerNamespace[erc.requests[uid].podNamespace] == 0 {
|
||||
delete(erc.requestsPerNamespace, erc.requests[uid].podNamespace)
|
||||
}
|
||||
erc.requestsTotal--
|
||||
delete(erc.requests, uid)
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) deletePod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
erc.deleteItem(uid)
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) hasPod(pod *v1.Pod) bool {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
uid := getPodKey(pod)
|
||||
_, exists := erc.requests[uid]
|
||||
return exists
|
||||
}
|
||||
|
||||
var (
|
||||
EvictionRequestAnnotationKey = "descheduler.alpha.kubernetes.io/request-evict-only"
|
||||
EvictionInProgressAnnotationKey = "descheduler.alpha.kubernetes.io/eviction-in-progress"
|
||||
EvictionInBackgroundErrorText = "Eviction triggered evacuation"
|
||||
)
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node
|
||||
type (
|
||||
nodePodEvictedCount map[string]uint
|
||||
@@ -43,10 +206,11 @@ type (
|
||||
)
|
||||
|
||||
type PodEvictor struct {
|
||||
mu sync.Mutex
|
||||
mu sync.RWMutex
|
||||
client clientset.Interface
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
evictionFailureEventNotification bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
@@ -55,42 +219,171 @@ type PodEvictor struct {
|
||||
totalPodCount uint
|
||||
metricsEnabled bool
|
||||
eventRecorder events.EventRecorder
|
||||
erCache *evictionRequestsCache
|
||||
featureGates featuregate.FeatureGate
|
||||
|
||||
// registeredHandlers contains the registrations of all handlers. It's used to check if all handlers have finished syncing before the scheduling cycles start.
|
||||
registeredHandlers []cache.ResourceEventHandlerRegistration
|
||||
}
|
||||
|
||||
func NewPodEvictor(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
eventRecorder events.EventRecorder,
|
||||
podInformer cache.SharedIndexInformer,
|
||||
featureGates featuregate.FeatureGate,
|
||||
options *Options,
|
||||
) *PodEvictor {
|
||||
) (*PodEvictor, error) {
|
||||
if options == nil {
|
||||
options = NewOptions()
|
||||
}
|
||||
|
||||
return &PodEvictor{
|
||||
podEvictor := &PodEvictor{
|
||||
client: client,
|
||||
eventRecorder: eventRecorder,
|
||||
policyGroupVersion: options.policyGroupVersion,
|
||||
dryRun: options.dryRun,
|
||||
evictionFailureEventNotification: options.evictionFailureEventNotification,
|
||||
maxPodsToEvictPerNode: options.maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace,
|
||||
maxPodsToEvictTotal: options.maxPodsToEvictTotal,
|
||||
metricsEnabled: options.metricsEnabled,
|
||||
nodePodCount: make(nodePodEvictedCount),
|
||||
namespacePodCount: make(namespacePodEvictCount),
|
||||
featureGates: featureGates,
|
||||
}
|
||||
|
||||
if featureGates.Enabled(features.EvictionsInBackground) {
|
||||
erCache := newEvictionRequestsCache(assumedEvictionRequestTimeoutSeconds)
|
||||
|
||||
handlerRegistration, err := podInformer.AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", obj)
|
||||
return
|
||||
}
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
if _, exists := pod.Annotations[EvictionInProgressAnnotationKey]; exists {
|
||||
// Ignore completed/suceeeded or failed pods
|
||||
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
|
||||
klog.V(3).InfoS("Eviction in background detected. Adding pod to the cache.", "pod", klog.KObj(pod))
|
||||
erCache.addPod(pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
oldPod, ok := oldObj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert oldObj to *v1.Pod", "oldObj", oldObj)
|
||||
return
|
||||
}
|
||||
newPod, ok := newObj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert newObj to *v1.Pod", "newObj", newObj)
|
||||
return
|
||||
}
|
||||
// Ignore pod's that are not subject to an eviction in background
|
||||
if _, exists := newPod.Annotations[EvictionRequestAnnotationKey]; !exists {
|
||||
if erCache.hasPod(newPod) {
|
||||
klog.V(3).InfoS("Pod with eviction in background lost annotation. Removing pod from the cache.", "pod", klog.KObj(newPod))
|
||||
}
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Remove completed/suceeeded or failed pods from the cache
|
||||
if newPod.Status.Phase == v1.PodSucceeded || newPod.Status.Phase == v1.PodFailed {
|
||||
klog.V(3).InfoS("Pod with eviction in background completed. Removing pod from the cache.", "pod", klog.KObj(newPod))
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Ignore any pod that does not have eviction in progress
|
||||
if _, exists := newPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
|
||||
// In case EvictionInProgressAnnotationKey annotation is not present/removed
|
||||
// it's unclear whether the eviction was restarted or terminated.
|
||||
// If the eviction gets restarted the pod needs to be removed from the cache
|
||||
// to allow re-triggering the eviction.
|
||||
if _, exists := oldPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
|
||||
return
|
||||
}
|
||||
// the annotation was removed -> remove the pod from the cache to allow to
|
||||
// request for eviction again. In case the eviction got restarted requesting
|
||||
// the eviction again is expected to be a no-op. In case the eviction
|
||||
// got terminated with no-retry, requesting a new eviction is a normal
|
||||
// operation.
|
||||
klog.V(3).InfoS("Eviction in background canceled (annotation removed). Removing pod from the cache.", "annotation", EvictionInProgressAnnotationKey, "pod", klog.KObj(newPod))
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Pick up the eviction in progress
|
||||
if !erCache.hasPod(newPod) {
|
||||
klog.V(3).InfoS("Eviction in background detected. Updating the cache.", "pod", klog.KObj(newPod))
|
||||
}
|
||||
erCache.addPod(newPod)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
var pod *v1.Pod
|
||||
switch t := obj.(type) {
|
||||
case *v1.Pod:
|
||||
pod = t
|
||||
case cache.DeletedFinalStateUnknown:
|
||||
var ok bool
|
||||
pod, ok = t.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t.Obj)
|
||||
return
|
||||
}
|
||||
default:
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t)
|
||||
return
|
||||
}
|
||||
if erCache.hasPod(pod) {
|
||||
klog.V(3).InfoS("Pod with eviction in background deleted/evicted. Removing pod from the cache.", "pod", klog.KObj(pod))
|
||||
}
|
||||
erCache.deletePod(pod)
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to register event handler for pod evictor: %v", err)
|
||||
}
|
||||
|
||||
podEvictor.registeredHandlers = append(podEvictor.registeredHandlers, handlerRegistration)
|
||||
|
||||
go erCache.run(ctx)
|
||||
|
||||
podEvictor.erCache = erCache
|
||||
}
|
||||
|
||||
return podEvictor, nil
|
||||
}
|
||||
|
||||
// WaitForEventHandlersSync waits for EventHandlers to sync.
|
||||
// It returns true if it was successful, false if the controller should shut down
|
||||
func (pe *PodEvictor) WaitForEventHandlersSync(ctx context.Context) error {
|
||||
return wait.PollUntilContextCancel(ctx, syncedPollPeriod, true, func(ctx context.Context) (done bool, err error) {
|
||||
for _, handler := range pe.registeredHandlers {
|
||||
if !handler.HasSynced() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// NodeEvicted gives a number of pods evicted for node
|
||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.nodePodCount[node.Name]
|
||||
}
|
||||
|
||||
// TotalEvicted gives a number of pods evicted through all nodes
|
||||
func (pe *PodEvictor) TotalEvicted() uint {
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.totalPodCount
|
||||
}
|
||||
|
||||
@@ -108,6 +401,46 @@ func (pe *PodEvictor) SetClient(client clientset.Interface) {
|
||||
pe.client = client
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsTotal() uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsTotal()
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsPerNode(node string) uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsPerNode(node)
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsPerNamespace(ns string) uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsPerNamespace(ns)
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) EvictionRequests(node *v1.Node) uint {
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.evictionRequestsTotal()
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) TotalEvictionRequests() uint {
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.TotalEvictionRequests()
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// EvictOptions provides a handle for passing additional info to EvictPod
|
||||
type EvictOptions struct {
|
||||
// Reason allows for passing details about the specific eviction for logging.
|
||||
@@ -121,45 +454,70 @@ type EvictOptions struct {
|
||||
// EvictPod evicts a pod while exercising eviction limits.
|
||||
// Returns true when the pod is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) error {
|
||||
if len(pod.UID) == 0 {
|
||||
klog.InfoS("Ignoring pod eviction due to missing UID", "pod", pod)
|
||||
return fmt.Errorf("Pod %v is missing UID", klog.KObj(pod))
|
||||
}
|
||||
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
// eviction in background requested
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
if pe.erCache.hasPod(pod) {
|
||||
klog.V(3).InfoS("Eviction in background already requested (ignoring)", "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
|
||||
defer span.End()
|
||||
|
||||
if pe.maxPodsToEvictTotal != nil && pe.totalPodCount+1 > *pe.maxPodsToEvictTotal {
|
||||
if pe.maxPodsToEvictTotal != nil && pe.totalPodCount+pe.evictionRequestsTotal()+1 > *pe.maxPodsToEvictTotal {
|
||||
err := NewEvictionTotalLimitError()
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictTotal)
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictTotal)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodePodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodePodCount[pod.Spec.NodeName]+pe.evictionRequestsPerNode(pod.Spec.NodeName)+1 > *pe.maxPodsToEvictPerNode {
|
||||
err := NewEvictionNodeLimitError(pod.Spec.NodeName)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNode)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+pe.evictionRequestsPerNamespace(pod.Namespace)+1 > *pe.maxPodsToEvictPerNamespace {
|
||||
err := NewEvictionNamespaceLimitError(pod.Namespace)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace, "pod", klog.KObj(pod))
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNamespace)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
|
||||
ignore, err := pe.evictPod(ctx, pod)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
@@ -167,9 +525,16 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: %v", pod.Spec.NodeName, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if ignore {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
pe.nodePodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
@@ -191,17 +556,18 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
reason = "NotSet"
|
||||
}
|
||||
}
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
|
||||
// return (ignore, err)
|
||||
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
// GracePeriodSeconds ?
|
||||
eviction := &policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: policyGroupVersion,
|
||||
APIVersion: pe.policyGroupVersion,
|
||||
Kind: eutils.EvictionKind,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -210,13 +576,36 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
||||
},
|
||||
DeleteOptions: deleteOptions,
|
||||
}
|
||||
err := client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
err := pe.client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
// eviction in background requested
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
// Simulating https://github.com/kubevirt/kubevirt/pull/11532/files#diff-059cc1fc09e8b469143348cc3aa80b40de987670e008fa18a6fe010061f973c9R77
|
||||
if apierrors.IsTooManyRequests(err) && strings.Contains(err.Error(), EvictionInBackgroundErrorText) {
|
||||
// Ignore eviction of any pod that's failed or completed.
|
||||
// It can happen an eviction in background ends up with the pod stuck in the completed state.
|
||||
// Normally, any request eviction is expected to end with the pod deletion.
|
||||
// However, some custom eviction policies may end up with completed pods around.
|
||||
// Which leads to all the completed pods to be considered still as unfinished evictions in background.
|
||||
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
klog.V(3).InfoS("Ignoring eviction of a completed/failed pod", "pod", klog.KObj(pod))
|
||||
return true, nil
|
||||
}
|
||||
klog.V(3).InfoS("Eviction in background assumed", "pod", klog.KObj(pod))
|
||||
pe.erCache.assumePod(pod)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if apierrors.IsTooManyRequests(err) {
|
||||
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
|
||||
return false, fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
|
||||
}
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -18,57 +18,107 @@ package evictions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
const (
|
||||
notFoundText = "pod not found when evicting \"%s\": pods \"%s\" not found"
|
||||
tooManyRequests = "error when evicting pod (ignoring) \"%s\": Too many requests: too many requests"
|
||||
)
|
||||
|
||||
func initFeatureGates() featuregate.FeatureGate {
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
return featureGates
|
||||
}
|
||||
|
||||
func TestEvictPod(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
|
||||
tests := []struct {
|
||||
description string
|
||||
node *v1.Node
|
||||
pod *v1.Pod
|
||||
pods []v1.Pod
|
||||
want error
|
||||
evictedPod *v1.Pod
|
||||
pods []runtime.Object
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
description: "test pod eviction - pod present",
|
||||
node: node1,
|
||||
pod: pod1,
|
||||
pods: []v1.Pod{*pod1},
|
||||
want: nil,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{pod1},
|
||||
},
|
||||
{
|
||||
description: "test pod eviction - pod absent",
|
||||
description: "test pod eviction - pod absent (not found error)",
|
||||
node: node1,
|
||||
pod: pod1,
|
||||
pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1", nil), *test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
want: nil,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
wantErr: fmt.Errorf(notFoundText, pod1.Name, pod1.Name),
|
||||
},
|
||||
{
|
||||
description: "test pod eviction - pod absent (too many requests error)",
|
||||
node: node1,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
wantErr: fmt.Errorf(tooManyRequests, pod1.Name),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: test.pods}, nil
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeClient := fake.NewClientset(test.pods...)
|
||||
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, test.wantErr
|
||||
})
|
||||
got := evictPod(ctx, fakeClient, test.pod, "v1")
|
||||
if got != test.want {
|
||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
fakeClient,
|
||||
eventRecorder,
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
NewOptions(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
_, got := podEvictor.evictPod(ctx, test.evictedPod)
|
||||
if got != test.wantErr {
|
||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.evictedPod.Name, test.wantErr, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,50 +168,317 @@ func TestPodTypes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewPodEvictor(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
pod1 := test.BuildTestPod("pod", 400, 0, "node", nil)
|
||||
|
||||
type podEvictorTest struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
dryRun bool
|
||||
evictionFailureEventNotification *bool
|
||||
maxPodsToEvictTotal *uint
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
expectedNodeEvictions uint
|
||||
expectedTotalEvictions uint
|
||||
expectedError error
|
||||
// expectedEvent is a slice of strings representing expected events.
|
||||
// Each string in the slice should follow the format: "EventType Reason Message".
|
||||
// - "Warning Failed processing failed"
|
||||
events []string
|
||||
}
|
||||
tests := []podEvictorTest{
|
||||
{
|
||||
description: "one eviction expected with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on total with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionTotalLimitError(),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNodeLimitError("node"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNamespaceLimitError("default"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction error with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: fmt.Errorf("eviction error"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: eviction error"},
|
||||
},
|
||||
{
|
||||
description: "eviction with dryRun with eviction failure event notification",
|
||||
pod: pod1,
|
||||
dryRun: true,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
description: "one eviction expected without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on total without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionTotalLimitError(),
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNodeLimitError("node"),
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNamespaceLimitError("default"),
|
||||
},
|
||||
{
|
||||
description: "eviction error without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: fmt.Errorf("eviction error"),
|
||||
},
|
||||
{
|
||||
description: "eviction without dryRun with eviction failure event notification",
|
||||
pod: pod1,
|
||||
dryRun: true,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
fakeClient := fake.NewSimpleClientset(pod1)
|
||||
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, test.expectedError
|
||||
})
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := NewPodEvictor(
|
||||
eventRecorder := events.NewFakeRecorder(100)
|
||||
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
fakeClient,
|
||||
eventRecorder,
|
||||
NewOptions().WithMaxPodsToEvictPerNode(utilptr.To[uint](1)),
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
NewOptions().
|
||||
WithDryRun(test.dryRun).
|
||||
WithMaxPodsToEvictTotal(test.maxPodsToEvictTotal).
|
||||
WithMaxPodsToEvictPerNode(test.maxPodsToEvictPerNode).
|
||||
WithEvictionFailureEventNotification(test.evictionFailureEventNotification).
|
||||
WithMaxPodsToEvictPerNamespace(test.maxPodsToEvictPerNamespace),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}
|
||||
|
||||
// 0 evictions expected
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != 0 {
|
||||
t.Errorf("Expected 0 node evictions, got %q instead", evictions)
|
||||
}
|
||||
// 0 evictions expected
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != 0 {
|
||||
t.Errorf("Expected 0 total evictions, got %q instead", evictions)
|
||||
if actualErr := podEvictor.EvictPod(ctx, test.pod, EvictOptions{}); actualErr != nil && actualErr.Error() != test.expectedError.Error() {
|
||||
t.Errorf("Expected error: %v, got: %v", test.expectedError, actualErr)
|
||||
}
|
||||
|
||||
if err := podEvictor.EvictPod(context.TODO(), pod1, EvictOptions{}); err != nil {
|
||||
t.Errorf("Expected a pod eviction, got an eviction error instead: %v", err)
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != test.expectedNodeEvictions {
|
||||
t.Errorf("Expected %d node evictions, got %d instead", test.expectedNodeEvictions, evictions)
|
||||
}
|
||||
|
||||
// 1 node eviction expected
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != 1 {
|
||||
t.Errorf("Expected 1 node eviction, got %q instead", evictions)
|
||||
}
|
||||
// 1 total eviction expected
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != 1 {
|
||||
t.Errorf("Expected 1 total evictions, got %q instead", evictions)
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != test.expectedTotalEvictions {
|
||||
t.Errorf("Expected %d total evictions, got %d instead", test.expectedTotalEvictions, evictions)
|
||||
}
|
||||
|
||||
err := podEvictor.EvictPod(context.TODO(), pod1, EvictOptions{})
|
||||
if err == nil {
|
||||
t.Errorf("Expected a pod eviction error, got nil instead")
|
||||
}
|
||||
switch err.(type) {
|
||||
case *EvictionNodeLimitError:
|
||||
// all good
|
||||
default:
|
||||
t.Errorf("Expected a pod eviction EvictionNodeLimitError error, got a different error instead: %v", err)
|
||||
// Assert that the events are correct.
|
||||
assertEqualEvents(t, test.events, eventRecorder.Events)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictionRequestsCacheCleanup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(node1, p1, p2, p3, p4)
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
_, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
client,
|
||||
eventRecorder,
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
client.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
if !matched {
|
||||
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||
podName := eviction.GetName()
|
||||
if podName == "p1" || podName == "p2" {
|
||||
return true, nil, &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Reason: metav1.StatusReasonTooManyRequests,
|
||||
Message: "Eviction triggered evacuation",
|
||||
},
|
||||
}
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor.EvictPod(ctx, p1, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p2, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p3, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p4, EvictOptions{})
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
if total := podEvictor.TotalEvictionRequests(); total != 2 {
|
||||
t.Fatalf("Expected %v total eviction requests, got %v instead", 2, total)
|
||||
}
|
||||
if total := podEvictor.TotalEvicted(); total != 2 {
|
||||
t.Fatalf("Expected %v total evictions, got %v instead", 2, total)
|
||||
}
|
||||
|
||||
klog.Infof("2 evictions in background assumed. Wait for few seconds and check the assumed requests timed out")
|
||||
time.Sleep(2 * time.Second)
|
||||
klog.Infof("Checking the assumed requests timed out and were deleted")
|
||||
// Set the timeout to 1s so the cleaning can be tested
|
||||
podEvictor.erCache.assumedRequestTimeoutSeconds = 1
|
||||
podEvictor.erCache.cleanCache(ctx)
|
||||
if totalERs := podEvictor.TotalEvictionRequests(); totalERs > 0 {
|
||||
t.Fatalf("Expected 0 eviction requests, got %v instead", totalERs)
|
||||
}
|
||||
}
|
||||
|
||||
func assertEqualEvents(t *testing.T, expected []string, actual <-chan string) {
|
||||
t.Logf("Assert for events: %v", expected)
|
||||
c := time.After(wait.ForeverTestTimeout)
|
||||
for _, e := range expected {
|
||||
select {
|
||||
case a := <-actual:
|
||||
if !reflect.DeepEqual(a, e) {
|
||||
t.Errorf("Expected event %q, got %q instead", e, a)
|
||||
}
|
||||
case <-c:
|
||||
t.Errorf("Expected event %q, got nothing", e)
|
||||
// continue iterating to print all expected events
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case a := <-actual:
|
||||
t.Errorf("Unexpected event: %q", a)
|
||||
default:
|
||||
return // No more events, as expected.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ type Options struct {
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
evictionFailureEventNotification bool
|
||||
metricsEnabled bool
|
||||
}
|
||||
|
||||
@@ -49,3 +50,10 @@ func (o *Options) WithMetricsEnabled(metricsEnabled bool) *Options {
|
||||
o.metricsEnabled = metricsEnabled
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithEvictionFailureEventNotification(evictionFailureEventNotification *bool) *Options {
|
||||
if evictionFailureEventNotification != nil {
|
||||
o.evictionFailureEventNotification = *evictionFailureEventNotification
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
151
pkg/descheduler/metricscollector/metricscollector.go
Normal file
151
pkg/descheduler/metricscollector/metricscollector.go
Normal file
@@ -0,0 +1,151 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metricscollector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
listercorev1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
const (
|
||||
beta float64 = 0.9
|
||||
)
|
||||
|
||||
type MetricsCollector struct {
|
||||
nodeLister listercorev1.NodeLister
|
||||
metricsClientset metricsclient.Interface
|
||||
nodeSelector labels.Selector
|
||||
|
||||
nodes map[string]map[v1.ResourceName]*resource.Quantity
|
||||
|
||||
mu sync.RWMutex
|
||||
// hasSynced signals at least one sync succeeded
|
||||
hasSynced bool
|
||||
}
|
||||
|
||||
func NewMetricsCollector(nodeLister listercorev1.NodeLister, metricsClientset metricsclient.Interface, nodeSelector labels.Selector) *MetricsCollector {
|
||||
return &MetricsCollector{
|
||||
nodeLister: nodeLister,
|
||||
metricsClientset: metricsClientset,
|
||||
nodeSelector: nodeSelector,
|
||||
nodes: make(map[string]map[v1.ResourceName]*resource.Quantity),
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) Run(ctx context.Context) {
|
||||
wait.NonSlidingUntil(func() {
|
||||
mc.Collect(ctx)
|
||||
}, 5*time.Second, ctx.Done())
|
||||
}
|
||||
|
||||
// During experiments rounding to int error causes weightedAverage to never
|
||||
// reach value even when weightedAverage is repeated many times in a row.
|
||||
// The difference between the limit and computed average stops within 5 units.
|
||||
// Nevertheless, the value is expected to change in time. So the weighted
|
||||
// average nevers gets a chance to converge. Which makes the computed
|
||||
// error negligible.
|
||||
// The speed of convergence depends on how often the metrics collector
|
||||
// syncs with the current value. Currently, the interval is set to 5s.
|
||||
func weightedAverage(prevValue, value int64) int64 {
|
||||
return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value)))
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*resource.Quantity, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
allNodesUsage := make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
for nodeName := range mc.nodes {
|
||||
allNodesUsage[nodeName] = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()),
|
||||
}
|
||||
}
|
||||
|
||||
return allNodesUsage, nil
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
if _, exists := mc.nodes[node.Name]; !exists {
|
||||
klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node))
|
||||
return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
||||
}
|
||||
return map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) HasSynced() bool {
|
||||
return mc.hasSynced
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) MetricsClient() metricsclient.Interface {
|
||||
return mc.metricsClientset
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) Collect(ctx context.Context) error {
|
||||
mc.mu.Lock()
|
||||
defer mc.mu.Unlock()
|
||||
nodes, err := mc.nodeLister.List(mc.nodeSelector)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
metrics, err := mc.metricsClientset.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error fetching metrics", "node", node.Name)
|
||||
// No entry -> duplicate the previous value -> do nothing as beta*PV + (1-beta)*PV = PV
|
||||
continue
|
||||
}
|
||||
|
||||
if _, exists := mc.nodes[node.Name]; !exists {
|
||||
mc.nodes[node.Name] = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()),
|
||||
}
|
||||
} else {
|
||||
// get MilliValue to reduce loss of precision
|
||||
mc.nodes[node.Name][v1.ResourceCPU].SetMilli(
|
||||
weightedAverage(mc.nodes[node.Name][v1.ResourceCPU].MilliValue(), metrics.Usage.Cpu().MilliValue()),
|
||||
)
|
||||
mc.nodes[node.Name][v1.ResourceMemory].Set(
|
||||
weightedAverage(mc.nodes[node.Name][v1.ResourceMemory].Value(), metrics.Usage.Memory().Value()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
mc.hasSynced = true
|
||||
return nil
|
||||
}
|
||||
141
pkg/descheduler/metricscollector/metricscollector_test.go
Normal file
141
pkg/descheduler/metricscollector/metricscollector_test.go
Normal file
@@ -0,0 +1,141 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metricscollector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func checkCpuNodeUsage(t *testing.T, usage map[v1.ResourceName]*resource.Quantity, millicpu int64) {
|
||||
t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue())
|
||||
if usage[v1.ResourceCPU].MilliValue() != millicpu {
|
||||
t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricsCollector(t *testing.T) {
|
||||
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
|
||||
|
||||
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
|
||||
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
|
||||
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
metricsClientset.Tracker().Create(gvr, n1metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n2metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n3metrics, "")
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
t.Logf("Set initial node cpu usage to 1400")
|
||||
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ := collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1400)
|
||||
allnodesUsage, _ := collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
|
||||
|
||||
t.Logf("Set current node cpu usage to 500")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(500, resource.DecimalSI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1310)
|
||||
allnodesUsage, _ = collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1310)
|
||||
|
||||
t.Logf("Set current node cpu usage to 900")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1269)
|
||||
allnodesUsage, _ = collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1269)
|
||||
}
|
||||
|
||||
func TestMetricsCollectorConvergence(t *testing.T) {
|
||||
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
|
||||
|
||||
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
|
||||
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
|
||||
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
metricsClientset.Tracker().Create(gvr, n1metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n2metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n3metrics, "")
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
t.Logf("Set initial node cpu usage to 1400")
|
||||
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ := collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1400)
|
||||
allnodesUsage, _ := collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
|
||||
|
||||
t.Logf("Set current node cpu/memory usage to 900/1614978816 and wait until it converges to it")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
|
||||
n2metrics.Usage[v1.ResourceMemory] = *resource.NewQuantity(1614978816, resource.BinarySI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
converged := false
|
||||
for i := 0; i < 300; i++ {
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
if math.Abs(float64(900-nodesUsage[v1.ResourceCPU].MilliValue())) < 6 && math.Abs(float64(1614978816-nodesUsage[v1.ResourceMemory].Value())) < 6 {
|
||||
t.Logf("Node cpu/memory usage converged to 900+-5/1614978816+-5")
|
||||
converged = true
|
||||
break
|
||||
}
|
||||
t.Logf("The current node usage: cpu=%v, memory=%v", nodesUsage[v1.ResourceCPU].MilliValue(), nodesUsage[v1.ResourceMemory].Value())
|
||||
}
|
||||
if !converged {
|
||||
t.Fatalf("The node usage did not converged to 900+-1")
|
||||
}
|
||||
}
|
||||
@@ -218,7 +218,12 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
resourceNames = append(resourceNames, name)
|
||||
}
|
||||
|
||||
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames)
|
||||
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames,
|
||||
func(pod *v1.Pod) (v1.ResourceList, error) {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
return req, nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -239,12 +244,15 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
}
|
||||
|
||||
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeUtilization := NodeUtilization(podsOnNode, resourceNames)
|
||||
nodeUtilization, err := NodeUtilization(podsOnNode, resourceNames, podUtilization)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remainingResources := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
|
||||
@@ -265,31 +273,34 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
|
||||
}
|
||||
|
||||
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity {
|
||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
totalUtilization := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
if !IsBasicResource(name) {
|
||||
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
totalUtilization[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
podUtil, err := podUtilization(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
quantity, ok := req[name]
|
||||
quantity, ok := podUtil[name]
|
||||
if ok && name != v1.ResourcePods {
|
||||
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
|
||||
// the format of the quantity will be updated to the format of y.
|
||||
totalReqs[name].Add(quantity)
|
||||
totalUtilization[name].Add(quantity)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return totalReqs
|
||||
return totalUtilization, nil
|
||||
}
|
||||
|
||||
// IsBasicResource checks if resource is basic native.
|
||||
|
||||
@@ -39,6 +39,9 @@ type FilterFunc func(*v1.Pod) bool
|
||||
// as input and returns the pods that assigned to the node.
|
||||
type GetPodsAssignedToNodeFunc func(string, FilterFunc) ([]*v1.Pod, error)
|
||||
|
||||
// PodUtilizationFnc is a function for getting pod's utilization. E.g. requested resources of utilization from metrics.
|
||||
type PodUtilizationFnc func(pod *v1.Pod) (v1.ResourceList, error)
|
||||
|
||||
// WrapFilterFuncs wraps a set of FilterFunc in one.
|
||||
func WrapFilterFuncs(filters ...FilterFunc) FilterFunc {
|
||||
return func(pod *v1.Pod) bool {
|
||||
|
||||
@@ -105,6 +105,7 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
IgnorePodsWithoutPDB: false,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
49
pkg/features/features.go
Normal file
49
pkg/features/features.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package features
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/component-base/featuregate"
|
||||
)
|
||||
|
||||
const (
|
||||
// Every feature gate should add method here following this template:
|
||||
//
|
||||
// // owner: @username
|
||||
// // kep: kep link
|
||||
// // alpha: v1.X
|
||||
// MyFeature featuregate.Feature = "MyFeature"
|
||||
//
|
||||
// Feature gates should be listed in alphabetical, case-sensitive
|
||||
// (upper before any lower case character) order. This reduces the risk
|
||||
// of code conflicts because changes are more likely to be scattered
|
||||
// across the file.
|
||||
|
||||
// owner: @ingvagabund
|
||||
// kep: https://github.com/kubernetes-sigs/descheduler/issues/1397
|
||||
// alpha: v1.31
|
||||
//
|
||||
// Enable evictions in background so users can create their own eviction policies
|
||||
// as an alternative to immediate evictions.
|
||||
EvictionsInBackground featuregate.Feature = "EvictionsInBackground"
|
||||
)
|
||||
|
||||
func init() {
|
||||
runtime.Must(DefaultMutableFeatureGate.Add(defaultDeschedulerFeatureGates))
|
||||
}
|
||||
|
||||
// defaultDeschedulerFeatureGates consists of all known descheduler-specific feature keys.
|
||||
// To add a new feature, define a key for it above and add it here. The features will be
|
||||
// available throughout descheduler binary.
|
||||
//
|
||||
// Entries are separated from each other with blank lines to avoid sweeping gofmt changes
|
||||
// when adding or removing one entry.
|
||||
var defaultDeschedulerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
|
||||
}
|
||||
|
||||
// DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate.
|
||||
// Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this.
|
||||
// Tests that need to modify feature gates for the duration of their test should use:
|
||||
//
|
||||
// defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.<FeatureName>, <value>)()
|
||||
var DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
@@ -18,6 +19,7 @@ type HandleImpl struct {
|
||||
SharedInformerFactoryImpl informers.SharedInformerFactory
|
||||
EvictorFilterImpl frameworktypes.EvictorPlugin
|
||||
PodEvictorImpl *evictions.PodEvictor
|
||||
MetricsCollectorImpl *metricscollector.MetricsCollector
|
||||
}
|
||||
|
||||
var _ frameworktypes.Handle = &HandleImpl{}
|
||||
@@ -26,6 +28,10 @@ func (hi *HandleImpl) ClientSet() clientset.Interface {
|
||||
return hi.ClientsetImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) MetricsCollector() *metricscollector.MetricsCollector {
|
||||
return hi.MetricsCollectorImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
|
||||
return hi.GetPodsAssignedToNodeFuncImpl
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -195,6 +195,20 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.IgnorePodsWithoutPDB {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
hasPdb, err := utils.IsPodCoveredByPDB(pod, handle.SharedInformerFactory().Policy().V1().PodDisruptionBudgets().Lister())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to check if pod is covered by PodDisruptionBudget: %w", err)
|
||||
}
|
||||
if !hasPdb {
|
||||
return fmt.Errorf("no PodDisruptionBudget found for pod")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
@@ -39,6 +40,7 @@ type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
pdbs []*policyv1.PodDisruptionBudget
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
@@ -47,6 +49,7 @@ type testCase struct {
|
||||
minReplicas uint
|
||||
minPodAge *metav1.Duration
|
||||
result bool
|
||||
ignorePodsWithoutPDB bool
|
||||
}
|
||||
|
||||
func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
@@ -739,6 +742,33 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
}),
|
||||
},
|
||||
result: true,
|
||||
}, {
|
||||
description: "ignorePodsWithoutPDB, pod with no PDBs, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Labels = map[string]string{
|
||||
"app": "foo",
|
||||
}
|
||||
}),
|
||||
},
|
||||
ignorePodsWithoutPDB: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "ignorePodsWithoutPDB, pod with PDBs, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Labels = map[string]string{
|
||||
"app": "foo",
|
||||
}
|
||||
}),
|
||||
},
|
||||
pdbs: []*policyv1.PodDisruptionBudget{
|
||||
test.BuildTestPDB("pdb1", "foo"),
|
||||
},
|
||||
ignorePodsWithoutPDB: true,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -811,11 +841,15 @@ func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
for _, pdb := range test.pdbs {
|
||||
objs = append(objs, pdb)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
_ = sharedInformerFactory.Policy().V1().PodDisruptionBudgets().Lister()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
@@ -836,6 +870,7 @@ func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin
|
||||
NodeFit: test.nodeFit,
|
||||
MinReplicas: test.minReplicas,
|
||||
MinPodAge: test.minPodAge,
|
||||
IgnorePodsWithoutPDB: test.ignorePodsWithoutPDB,
|
||||
}
|
||||
|
||||
evictorPlugin, err := New(
|
||||
|
||||
@@ -42,6 +42,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
LabelSelector: nil,
|
||||
PriorityThreshold: nil,
|
||||
NodeFit: false,
|
||||
IgnorePodsWithoutPDB: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -58,6 +59,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
Value: utilptr.To[int32](800),
|
||||
},
|
||||
NodeFit: true,
|
||||
IgnorePodsWithoutPDB: true,
|
||||
},
|
||||
want: &DefaultEvictorArgs{
|
||||
NodeSelector: "NodeSelector",
|
||||
@@ -71,6 +73,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
Value: utilptr.To[int32](800),
|
||||
},
|
||||
NodeFit: true,
|
||||
IgnorePodsWithoutPDB: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -36,4 +36,5 @@ type DefaultEvictorArgs struct {
|
||||
NodeFit bool `json:"nodeFit,omitempty"`
|
||||
MinReplicas uint `json:"minReplicas,omitempty"`
|
||||
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
|
||||
IgnorePodsWithoutPDB bool `json:"ignorePodsWithoutPDB,omitempty"`
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -44,6 +44,7 @@ type HighNodeUtilization struct {
|
||||
underutilizationCriteria []interface{}
|
||||
resourceNames []v1.ResourceName
|
||||
targetThresholds api.ResourceThresholds
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
|
||||
@@ -84,6 +85,7 @@ func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (
|
||||
targetThresholds: targetThresholds,
|
||||
underutilizationCriteria: underutilizationCriteria,
|
||||
podFilter: podFilter,
|
||||
usageClient: newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -94,9 +96,15 @@ func (h *HighNodeUtilization) Name() string {
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
if err := h.usageClient.sync(nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
sourceNodes, highNodes := classifyNodes(
|
||||
getNodeUsage(nodes, h.resourceNames, h.handle.GetPodsAssignedToNodeFunc()),
|
||||
getNodeThresholds(nodes, h.args.Thresholds, h.targetThresholds, h.resourceNames, h.handle.GetPodsAssignedToNodeFunc(), false),
|
||||
getNodeUsage(nodes, h.usageClient),
|
||||
getNodeThresholds(nodes, h.args.Thresholds, h.targetThresholds, h.resourceNames, false, h.usageClient),
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
},
|
||||
@@ -152,7 +160,9 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
|
||||
h.podFilter,
|
||||
h.resourceNames,
|
||||
continueEvictionCond)
|
||||
continueEvictionCond,
|
||||
h.usageClient,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
@@ -43,6 +44,7 @@ type LowNodeUtilization struct {
|
||||
underutilizationCriteria []interface{}
|
||||
overutilizationCriteria []interface{}
|
||||
resourceNames []v1.ResourceName
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
|
||||
@@ -85,13 +87,26 @@ func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (f
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
resourceNames := getResourceNames(lowNodeUtilizationArgsArgs.Thresholds)
|
||||
|
||||
var usageClient usageClient
|
||||
if lowNodeUtilizationArgsArgs.MetricsUtilization.MetricsServer {
|
||||
if handle.MetricsCollector() == nil {
|
||||
return nil, fmt.Errorf("metrics client not initialized")
|
||||
}
|
||||
usageClient = newActualUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc(), handle.MetricsCollector())
|
||||
} else {
|
||||
usageClient = newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc())
|
||||
}
|
||||
|
||||
return &LowNodeUtilization{
|
||||
handle: handle,
|
||||
args: lowNodeUtilizationArgsArgs,
|
||||
underutilizationCriteria: underutilizationCriteria,
|
||||
overutilizationCriteria: overutilizationCriteria,
|
||||
resourceNames: getResourceNames(lowNodeUtilizationArgsArgs.Thresholds),
|
||||
resourceNames: resourceNames,
|
||||
podFilter: podFilter,
|
||||
usageClient: usageClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -102,9 +117,15 @@ func (l *LowNodeUtilization) Name() string {
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
if err := l.usageClient.sync(nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
lowNodes, sourceNodes := classifyNodes(
|
||||
getNodeUsage(nodes, l.resourceNames, l.handle.GetPodsAssignedToNodeFunc()),
|
||||
getNodeThresholds(nodes, l.args.Thresholds, l.args.TargetThresholds, l.resourceNames, l.handle.GetPodsAssignedToNodeFunc(), l.args.UseDeviationThresholds),
|
||||
getNodeUsage(nodes, l.usageClient),
|
||||
getNodeThresholds(nodes, l.args.Thresholds, l.args.TargetThresholds, l.resourceNames, l.args.UseDeviationThresholds, l.usageClient),
|
||||
// The node has to be schedulable (to be able to move workload there)
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
@@ -172,7 +193,9 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
|
||||
l.podFilter,
|
||||
l.resourceNames,
|
||||
continueEvictionCond)
|
||||
continueEvictionCond,
|
||||
l.usageClient,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -21,19 +21,23 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -53,7 +57,10 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
thresholds, targetThresholds api.ResourceThresholds
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
nodemetricses []*v1beta1.NodeMetrics
|
||||
podmetricses []*v1beta1.PodMetrics
|
||||
expectedPodsEvicted uint
|
||||
expectedPodsWithMetricsEvicted uint
|
||||
evictedPods []string
|
||||
evictableNamespaces *api.Namespaces
|
||||
}{
|
||||
@@ -103,7 +110,20 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 2401, 1714978816),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 1714978816),
|
||||
test.BuildNodeMetrics(n3NodeName, 10, 1714978816),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities",
|
||||
@@ -153,7 +173,20 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
expectedPodsWithMetricsEvicted: 4,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but excluding namespaces",
|
||||
@@ -218,12 +251,25 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
evictableNamespaces: &api.Namespaces{
|
||||
Exclude: []string{
|
||||
"namespace1",
|
||||
},
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but include only default namespace",
|
||||
@@ -283,12 +329,25 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
evictableNamespaces: &api.Namespaces{
|
||||
Include: []string{
|
||||
"default",
|
||||
},
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
},
|
||||
{
|
||||
name: "without priorities stop when cpu capacity is depleted",
|
||||
@@ -306,14 +365,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 300, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 300, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
@@ -330,17 +389,29 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 2100, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before cpu is depleted
|
||||
expectedPodsEvicted: 3,
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 0, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
expectedPodsWithMetricsEvicted: 4,
|
||||
},
|
||||
{
|
||||
name: "with priorities",
|
||||
@@ -410,7 +481,20 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
expectedPodsWithMetricsEvicted: 4,
|
||||
},
|
||||
{
|
||||
name: "without priorities evicting best-effort pods only",
|
||||
@@ -478,7 +562,20 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
expectedPodsWithMetricsEvicted: 4,
|
||||
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
||||
},
|
||||
{
|
||||
@@ -558,8 +655,21 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
// 4 pods available for eviction based on v1.ResourcePods, only 3 pods can be evicted before extended resource is depleted
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "with extended resource in some of nodes",
|
||||
@@ -586,8 +696,21 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
// 0 pods available for eviction because there's no enough extended resource in node2
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node is unschedulable",
|
||||
@@ -636,7 +759,19 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node doesn't match pod node selector for p4 and p5",
|
||||
@@ -701,7 +836,17 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsWithMetricsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node doesn't match pod node affinity for p4 and p5",
|
||||
@@ -795,7 +940,17 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsWithMetricsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "deviation thresholds",
|
||||
@@ -847,13 +1002,131 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
evictedPods: []string{},
|
||||
},
|
||||
{
|
||||
name: "without priorities different evictions for requested and actual resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeSelectorKey: notMatchingNodeSelectorValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with affinity to run in the "west" datacenter upon scheduling
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeSelectorKey,
|
||||
Operator: "In",
|
||||
Values: []string{nodeSelectorValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with affinity to run in the "west" datacenter upon scheduling
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeSelectorKey,
|
||||
Operator: "In",
|
||||
Values: []string{nodeSelectorValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 801, 0),
|
||||
test.BuildPodMetrics("p2", 801, 0),
|
||||
test.BuildPodMetrics("p3", 801, 0),
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
testFnc := func(metricsEnabled bool, expectedPodsEvicted uint) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -864,8 +1137,31 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
var collector *metricscollector.MetricsCollector
|
||||
if metricsEnabled {
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
for _, nodemetrics := range tc.nodemetricses {
|
||||
metricsClientset.Tracker().Create(nodesgvr, nodemetrics, "")
|
||||
}
|
||||
for _, podmetrics := range tc.podmetricses {
|
||||
metricsClientset.Tracker().Create(podsgvr, podmetrics, podmetrics.Namespace)
|
||||
}
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
collector = metricscollector.NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
err := collector.Collect(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to collect metrics: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range tc.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
@@ -891,12 +1187,16 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
handle.MetricsCollectorImpl = collector
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
Thresholds: tc.thresholds,
|
||||
TargetThresholds: tc.targetThresholds,
|
||||
UseDeviationThresholds: tc.useDeviationThresholds,
|
||||
EvictableNamespaces: tc.evictableNamespaces,
|
||||
MetricsUtilization: MetricsUtilization{
|
||||
MetricsServer: metricsEnabled,
|
||||
},
|
||||
},
|
||||
handle)
|
||||
if err != nil {
|
||||
@@ -905,13 +1205,16 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, tc.nodes)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if tc.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", tc.expectedPodsEvicted, podsEvicted)
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
t.Run(tc.name, testFnc(false, tc.expectedPodsEvicted))
|
||||
t.Run(tc.name+" with metrics enabled", testFnc(true, tc.expectedPodsWithMetricsEvicted))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -78,14 +77,14 @@ func getNodeThresholds(
|
||||
nodes []*v1.Node,
|
||||
lowThreshold, highThreshold api.ResourceThresholds,
|
||||
resourceNames []v1.ResourceName,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
useDeviationThresholds bool,
|
||||
usageClient usageClient,
|
||||
) map[string]NodeThresholds {
|
||||
nodeThresholdsMap := map[string]NodeThresholds{}
|
||||
|
||||
averageResourceUsagePercent := api.ResourceThresholds{}
|
||||
if useDeviationThresholds {
|
||||
averageResourceUsagePercent = averageNodeBasicresources(nodes, getPodsAssignedToNode, resourceNames)
|
||||
averageResourceUsagePercent = averageNodeBasicresources(nodes, usageClient)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
@@ -121,22 +120,15 @@ func getNodeThresholds(
|
||||
|
||||
func getNodeUsage(
|
||||
nodes []*v1.Node,
|
||||
resourceNames []v1.ResourceName,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
usageClient usageClient,
|
||||
) []NodeUsage {
|
||||
var nodeUsageList []NodeUsage
|
||||
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
nodeUsageList = append(nodeUsageList, NodeUsage{
|
||||
node: node,
|
||||
usage: nodeutil.NodeUtilization(pods, resourceNames),
|
||||
allPods: pods,
|
||||
usage: usageClient.nodeUtilization(node.Name),
|
||||
allPods: usageClient.pods(node.Name),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -214,6 +206,26 @@ func classifyNodes(
|
||||
return lowNodes, highNodes
|
||||
}
|
||||
|
||||
func usageToKeysAndValues(usage map[v1.ResourceName]*resource.Quantity) []interface{} {
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{}
|
||||
if quantity, exists := usage[v1.ResourceCPU]; exists {
|
||||
keysAndValues = append(keysAndValues, "CPU", quantity.MilliValue())
|
||||
}
|
||||
if quantity, exists := usage[v1.ResourceMemory]; exists {
|
||||
keysAndValues = append(keysAndValues, "Mem", quantity.Value())
|
||||
}
|
||||
if quantity, exists := usage[v1.ResourcePods]; exists {
|
||||
keysAndValues = append(keysAndValues, "Pods", quantity.Value())
|
||||
}
|
||||
for name := range usage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), usage[name].Value())
|
||||
}
|
||||
}
|
||||
return keysAndValues
|
||||
}
|
||||
|
||||
// evictPodsFromSourceNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||
// evicts them based on QoS as fallback option.
|
||||
// TODO: @ravig Break this function into smaller functions.
|
||||
@@ -226,12 +238,12 @@ func evictPodsFromSourceNodes(
|
||||
podFilter func(pod *v1.Pod) bool,
|
||||
resourceNames []v1.ResourceName,
|
||||
continueEviction continueEvictionCond,
|
||||
usageClient usageClient,
|
||||
) {
|
||||
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
|
||||
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourcePods: {},
|
||||
v1.ResourceCPU: {},
|
||||
v1.ResourceMemory: {},
|
||||
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{}
|
||||
for _, resourceName := range resourceNames {
|
||||
totalAvailableUsage[resourceName] = &resource.Quantity{}
|
||||
}
|
||||
|
||||
taintsOfDestinationNodes := make(map[string][]v1.Taint, len(destinationNodes))
|
||||
@@ -239,6 +251,10 @@ func evictPodsFromSourceNodes(
|
||||
taintsOfDestinationNodes[node.node.Name] = node.node.Spec.Taints
|
||||
|
||||
for _, name := range resourceNames {
|
||||
if _, exists := node.usage[name]; !exists {
|
||||
klog.Errorf("unable to find %q resource in node's %q usage, terminating eviction", name, node.node.Name)
|
||||
return
|
||||
}
|
||||
if _, ok := totalAvailableUsage[name]; !ok {
|
||||
totalAvailableUsage[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
@@ -248,17 +264,7 @@ func evictPodsFromSourceNodes(
|
||||
}
|
||||
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{
|
||||
"CPU", totalAvailableUsage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", totalAvailableUsage[v1.ResourceMemory].Value(),
|
||||
"Pods", totalAvailableUsage[v1.ResourcePods].Value(),
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !node.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
}
|
||||
}
|
||||
klog.V(1).InfoS("Total capacity to be moved", keysAndValues...)
|
||||
klog.V(1).InfoS("Total capacity to be moved", usageToKeysAndValues(totalAvailableUsage)...)
|
||||
|
||||
for _, node := range sourceNodes {
|
||||
klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage)
|
||||
@@ -274,7 +280,7 @@ func evictPodsFromSourceNodes(
|
||||
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||
err := evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, evictOptions, continueEviction)
|
||||
err := evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, evictOptions, continueEviction, usageClient)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
@@ -295,6 +301,7 @@ func evictPods(
|
||||
podEvictor frameworktypes.Evictor,
|
||||
evictOptions evictions.EvictOptions,
|
||||
continueEviction continueEvictionCond,
|
||||
usageClient usageClient,
|
||||
) error {
|
||||
var excludedNamespaces sets.Set[string]
|
||||
if evictableNamespaces != nil {
|
||||
@@ -320,6 +327,11 @@ func evictPods(
|
||||
if !preEvictionFilterWithOptions(pod) {
|
||||
continue
|
||||
}
|
||||
podUsage, err := usageClient.podUsage(pod)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get pod usage for %v/%v: %v", pod.Namespace, pod.Name, err)
|
||||
continue
|
||||
}
|
||||
err = podEvictor.Evict(ctx, pod, evictOptions)
|
||||
if err == nil {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
@@ -329,24 +341,15 @@ func evictPods(
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
} else {
|
||||
quantity := utils.GetResourceRequestQuantity(pod, name)
|
||||
nodeInfo.usage[name].Sub(quantity)
|
||||
totalAvailableUsage[name].Sub(quantity)
|
||||
nodeInfo.usage[name].Sub(*podUsage[name])
|
||||
totalAvailableUsage[name].Sub(*podUsage[name])
|
||||
}
|
||||
}
|
||||
|
||||
keysAndValues := []interface{}{
|
||||
"node", nodeInfo.node.Name,
|
||||
"CPU", nodeInfo.usage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", nodeInfo.usage[v1.ResourceMemory].Value(),
|
||||
"Pods", nodeInfo.usage[v1.ResourcePods].Value(),
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
}
|
||||
}
|
||||
|
||||
keysAndValues = append(keysAndValues, usageToKeysAndValues(nodeInfo.usage)...)
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
// check if pods can be still evicted
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
@@ -368,14 +371,20 @@ func evictPods(
|
||||
// sortNodesByUsage sorts nodes based on usage according to the given plugin.
|
||||
func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value()
|
||||
tj := nodes[j].usage[v1.ResourceMemory].Value() + nodes[j].usage[v1.ResourceCPU].MilliValue() + nodes[j].usage[v1.ResourcePods].Value()
|
||||
|
||||
// extended resources
|
||||
for name := range nodes[i].usage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
ti = ti + nodes[i].usage[name].Value()
|
||||
tj = tj + nodes[j].usage[name].Value()
|
||||
ti := resource.NewQuantity(0, resource.DecimalSI).Value()
|
||||
tj := resource.NewQuantity(0, resource.DecimalSI).Value()
|
||||
for resourceName := range nodes[i].usage {
|
||||
if resourceName == v1.ResourceCPU {
|
||||
ti += nodes[i].usage[resourceName].MilliValue()
|
||||
} else {
|
||||
ti += nodes[i].usage[resourceName].Value()
|
||||
}
|
||||
}
|
||||
for resourceName := range nodes[j].usage {
|
||||
if resourceName == v1.ResourceCPU {
|
||||
tj += nodes[j].usage[resourceName].MilliValue()
|
||||
} else {
|
||||
tj += nodes[j].usage[resourceName].Value()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -437,17 +446,12 @@ func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*
|
||||
return nonRemovablePods, removablePods
|
||||
}
|
||||
|
||||
func averageNodeBasicresources(nodes []*v1.Node, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, resourceNames []v1.ResourceName) api.ResourceThresholds {
|
||||
func averageNodeBasicresources(nodes []*v1.Node, usageClient usageClient) api.ResourceThresholds {
|
||||
total := api.ResourceThresholds{}
|
||||
average := api.ResourceThresholds{}
|
||||
numberOfNodes := len(nodes)
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
numberOfNodes--
|
||||
continue
|
||||
}
|
||||
usage := nodeutil.NodeUtilization(pods, resourceNames)
|
||||
usage := usageClient.nodeUtilization(node.Name)
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
|
||||
@@ -25,82 +25,34 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo {
|
||||
nodeInfo := &NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
},
|
||||
},
|
||||
}
|
||||
apply(nodeInfo)
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
var (
|
||||
lowPriority = int32(0)
|
||||
highPriority = int32(10000)
|
||||
extendedResource = v1.ResourceName("example.com/foo")
|
||||
testNode1 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
testNode2 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
testNode3 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node3"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestResourceUsagePercentages(t *testing.T) {
|
||||
@@ -141,26 +93,81 @@ func TestResourceUsagePercentages(t *testing.T) {
|
||||
t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage)
|
||||
}
|
||||
|
||||
func TestSortNodesByUsageDescendingOrder(t *testing.T) {
|
||||
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
|
||||
expectedNodeList := []NodeInfo{testNode3, testNode1, testNode2} // testNode3 has the highest usage
|
||||
sortNodesByUsage(nodeList, false) // ascending=false, sort nodes in descending order
|
||||
|
||||
for i := 0; i < len(expectedNodeList); i++ {
|
||||
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
|
||||
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortNodesByUsageAscendingOrder(t *testing.T) {
|
||||
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
|
||||
expectedNodeList := []NodeInfo{testNode2, testNode1, testNode3}
|
||||
sortNodesByUsage(nodeList, true) // ascending=true, sort nodes in ascending order
|
||||
|
||||
for i := 0; i < len(expectedNodeList); i++ {
|
||||
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
|
||||
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
|
||||
func TestSortNodesByUsage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
nodeInfoList []NodeInfo
|
||||
expectedNodeInfoNames []string
|
||||
}{
|
||||
{
|
||||
name: "cpu memory pods",
|
||||
nodeInfoList: []NodeInfo{
|
||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedNodeInfoNames: []string{"node3", "node1", "node2"},
|
||||
},
|
||||
{
|
||||
name: "memory",
|
||||
nodeInfoList: []NodeInfo{
|
||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedNodeInfoNames: []string{"node3", "node1", "node2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name+" descending", func(t *testing.T) {
|
||||
sortNodesByUsage(tc.nodeInfoList, false) // ascending=false, sort nodes in descending order
|
||||
|
||||
for i := 0; i < len(tc.nodeInfoList); i++ {
|
||||
if tc.nodeInfoList[i].NodeUsage.node.Name != tc.expectedNodeInfoNames[i] {
|
||||
t.Errorf("Expected %v, got %v", tc.expectedNodeInfoNames[i], tc.nodeInfoList[i].NodeUsage.node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run(tc.name+" ascending", func(t *testing.T) {
|
||||
sortNodesByUsage(tc.nodeInfoList, true) // ascending=true, sort nodes in ascending order
|
||||
|
||||
size := len(tc.nodeInfoList)
|
||||
for i := 0; i < size; i++ {
|
||||
if tc.nodeInfoList[i].NodeUsage.node.Name != tc.expectedNodeInfoNames[size-i-1] {
|
||||
t.Errorf("Expected %v, got %v", tc.expectedNodeInfoNames[size-i-1], tc.nodeInfoList[i].NodeUsage.node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ type LowNodeUtilizationArgs struct {
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
TargetThresholds api.ResourceThresholds `json:"targetThresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
MetricsUtilization MetricsUtilization `json:"metricsUtilization,omitempty"`
|
||||
|
||||
// Naming this one differently since namespaces are still
|
||||
// considered while considering resources used by pods
|
||||
@@ -43,8 +44,17 @@ type HighNodeUtilizationArgs struct {
|
||||
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
MetricsUtilization MetricsUtilization `json:"metricsUtilization,omitempty"`
|
||||
|
||||
// Naming this one differently since namespaces are still
|
||||
// considered while considering resources used by pods
|
||||
// but then filtered out before eviction
|
||||
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces,omitempty"`
|
||||
}
|
||||
|
||||
// MetricsUtilization allow to consume actual resource utilization from metrics
|
||||
type MetricsUtilization struct {
|
||||
// metricsServer enables metrics from a kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
MetricsServer bool `json:"metricsServer,omitempty"`
|
||||
}
|
||||
|
||||
201
pkg/framework/plugins/nodeutilization/usageclients.go
Normal file
201
pkg/framework/plugins/nodeutilization/usageclients.go
Normal file
@@ -0,0 +1,201 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
type usageClient interface {
|
||||
// Both low/high node utilization plugins are expected to invoke sync right
|
||||
// after Balance method is invoked. There's no cache invalidation so each
|
||||
// Balance is expected to get the latest data by invoking sync.
|
||||
sync(nodes []*v1.Node) error
|
||||
nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity
|
||||
pods(node string) []*v1.Pod
|
||||
podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error)
|
||||
}
|
||||
|
||||
type requestedUsageClient struct {
|
||||
resourceNames []v1.ResourceName
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
||||
}
|
||||
|
||||
var _ usageClient = &requestedUsageClient{}
|
||||
|
||||
func newRequestedUsageClient(
|
||||
resourceNames []v1.ResourceName,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
) *requestedUsageClient {
|
||||
return &requestedUsageClient{
|
||||
resourceNames: resourceNames,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
||||
return s._nodeUtilization[node]
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) pods(node string) []*v1.Pod {
|
||||
return s._pods[node]
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
usage := make(map[v1.ResourceName]*resource.Quantity)
|
||||
for _, resourceName := range s.resourceNames {
|
||||
usage[resourceName] = utilptr.To[resource.Quantity](utils.GetResourceRequestQuantity(pod, resourceName).DeepCopy())
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) sync(nodes []*v1.Node) error {
|
||||
s._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
s._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, s.getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
|
||||
}
|
||||
|
||||
nodeUsage, err := nodeutil.NodeUtilization(pods, s.resourceNames, func(pod *v1.Pod) (v1.ResourceList, error) {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
return req, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// store the snapshot of pods from the same (or the closest) node utilization computation
|
||||
s._pods[node.Name] = pods
|
||||
s._nodeUtilization[node.Name] = nodeUsage
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type actualUsageClient struct {
|
||||
resourceNames []v1.ResourceName
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
||||
}
|
||||
|
||||
var _ usageClient = &actualUsageClient{}
|
||||
|
||||
func newActualUsageClient(
|
||||
resourceNames []v1.ResourceName,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
metricsCollector *metricscollector.MetricsCollector,
|
||||
) *actualUsageClient {
|
||||
return &actualUsageClient{
|
||||
resourceNames: resourceNames,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
metricsCollector: metricsCollector,
|
||||
}
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
||||
return client._nodeUtilization[node]
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) pods(node string) []*v1.Pod {
|
||||
return client._pods[node]
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
// It's not efficient to keep track of all pods in a cluster when only their fractions is evicted.
|
||||
// Thus, take the current pod metrics without computing any softening (like e.g. EWMA).
|
||||
podMetrics, err := client.metricsCollector.MetricsClient().MetricsV1beta1().PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get podmetrics for %q/%q: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
totalUsage := make(map[v1.ResourceName]*resource.Quantity)
|
||||
for _, container := range podMetrics.Containers {
|
||||
for _, resourceName := range client.resourceNames {
|
||||
if resourceName == v1.ResourcePods {
|
||||
continue
|
||||
}
|
||||
if _, exists := container.Usage[resourceName]; !exists {
|
||||
return nil, fmt.Errorf("pod %v/%v: container %q is missing %q resource", pod.Namespace, pod.Name, container.Name, resourceName)
|
||||
}
|
||||
if totalUsage[resourceName] == nil {
|
||||
totalUsage[resourceName] = utilptr.To[resource.Quantity](container.Usage[resourceName].DeepCopy())
|
||||
} else {
|
||||
totalUsage[resourceName].Add(container.Usage[resourceName])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return totalUsage, nil
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) sync(nodes []*v1.Node) error {
|
||||
client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
client._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
nodesUsage, err := client.metricsCollector.AllNodesUsage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, client.getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
|
||||
}
|
||||
|
||||
nodeUsage, ok := nodesUsage[node.Name]
|
||||
if !ok {
|
||||
return fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
||||
}
|
||||
nodeUsage[v1.ResourcePods] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
|
||||
|
||||
for _, resourceName := range client.resourceNames {
|
||||
if _, exists := nodeUsage[resourceName]; !exists {
|
||||
return fmt.Errorf("unable to find %q resource for collected %q node metric", resourceName, node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// store the snapshot of pods from the same (or the closest) node utilization computation
|
||||
client._pods[node.Name] = pods
|
||||
client._nodeUtilization[node.Name] = nodeUsage
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
139
pkg/framework/plugins/nodeutilization/usageclients_test.go
Normal file
139
pkg/framework/plugins/nodeutilization/usageclients_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
var (
|
||||
nodesgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
podsgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "pods"}
|
||||
)
|
||||
|
||||
func updateMetricsAndCheckNodeUtilization(
|
||||
t *testing.T,
|
||||
ctx context.Context,
|
||||
newValue, expectedValue int64,
|
||||
metricsClientset *fakemetricsclient.Clientset,
|
||||
collector *metricscollector.MetricsCollector,
|
||||
usageClient usageClient,
|
||||
nodes []*v1.Node,
|
||||
nodeName string,
|
||||
nodemetrics *v1beta1.NodeMetrics,
|
||||
) {
|
||||
t.Logf("Set current node cpu usage to %v", newValue)
|
||||
nodemetrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(newValue, resource.DecimalSI)
|
||||
metricsClientset.Tracker().Update(nodesgvr, nodemetrics, "")
|
||||
err := collector.Collect(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to capture metrics: %v", err)
|
||||
}
|
||||
err = usageClient.sync(nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to capture a snapshot: %v", err)
|
||||
}
|
||||
nodeUtilization := usageClient.nodeUtilization(nodeName)
|
||||
t.Logf("current node cpu usage: %v\n", nodeUtilization[v1.ResourceCPU].MilliValue())
|
||||
if nodeUtilization[v1.ResourceCPU].MilliValue() != expectedValue {
|
||||
t.Fatalf("cpu node usage expected to be %v, got %v instead", expectedValue, nodeUtilization[v1.ResourceCPU].MilliValue())
|
||||
}
|
||||
pods := usageClient.pods(nodeName)
|
||||
fmt.Printf("pods: %#v\n", pods)
|
||||
if len(pods) != 2 {
|
||||
t.Fatalf("expected 2 pods for node %v, got %v instead", nodeName, len(pods))
|
||||
}
|
||||
}
|
||||
|
||||
func TestActualUsageClient(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||
p21 := test.BuildTestPod("p21", 400, 0, n2.Name, nil)
|
||||
p22 := test.BuildTestPod("p22", 400, 0, n2.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n3.Name, nil)
|
||||
|
||||
nodes := []*v1.Node{n1, n2, n3}
|
||||
|
||||
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
|
||||
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
|
||||
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3, p1, p21, p22, p3)
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
metricsClientset.Tracker().Create(nodesgvr, n1metrics, "")
|
||||
metricsClientset.Tracker().Create(nodesgvr, n2metrics, "")
|
||||
metricsClientset.Tracker().Create(nodesgvr, n3metrics, "")
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
resourceNames := []v1.ResourceName{
|
||||
v1.ResourceCPU,
|
||||
v1.ResourceMemory,
|
||||
}
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Fatalf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
collector := metricscollector.NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
|
||||
usageClient := newActualUsageClient(
|
||||
resourceNames,
|
||||
podsAssignedToNode,
|
||||
collector,
|
||||
)
|
||||
|
||||
updateMetricsAndCheckNodeUtilization(t, ctx,
|
||||
1400, 1400,
|
||||
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
|
||||
)
|
||||
|
||||
updateMetricsAndCheckNodeUtilization(t, ctx,
|
||||
500, 1310,
|
||||
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
|
||||
)
|
||||
|
||||
updateMetricsAndCheckNodeUtilization(t, ctx,
|
||||
900, 1269,
|
||||
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
|
||||
)
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -37,6 +37,7 @@ func (in *HighNodeUtilizationArgs) DeepCopyInto(out *HighNodeUtilizationArgs) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
out.MetricsUtilization = in.MetricsUtilization
|
||||
if in.EvictableNamespaces != nil {
|
||||
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
|
||||
*out = new(api.Namespaces)
|
||||
@@ -81,6 +82,7 @@ func (in *LowNodeUtilizationArgs) DeepCopyInto(out *LowNodeUtilizationArgs) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
out.MetricsUtilization = in.MetricsUtilization
|
||||
if in.EvictableNamespaces != nil {
|
||||
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
|
||||
*out = new(api.Namespaces)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -20,21 +20,22 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
// evictorImpl implements the Evictor interface so plugins
|
||||
@@ -67,6 +68,7 @@ func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.Ev
|
||||
// handleImpl implements the framework handle which gets passed to plugins
|
||||
type handleImpl struct {
|
||||
clientSet clientset.Interface
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
evictor *evictorImpl
|
||||
@@ -79,6 +81,10 @@ func (hi *handleImpl) ClientSet() clientset.Interface {
|
||||
return hi.clientSet
|
||||
}
|
||||
|
||||
func (hi *handleImpl) MetricsCollector() *metricscollector.MetricsCollector {
|
||||
return hi.metricsCollector
|
||||
}
|
||||
|
||||
// GetPodsAssignedToNodeFunc retrieves GetPodsAssignedToNodeFunc implementation
|
||||
func (hi *handleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
|
||||
return hi.getPodsAssignedToNodeFunc
|
||||
@@ -128,6 +134,7 @@ type handleImplOpts struct {
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
|
||||
podEvictor *evictions.PodEvictor
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
}
|
||||
|
||||
// WithClientSet sets clientSet for the scheduling frameworkImpl.
|
||||
@@ -155,6 +162,12 @@ func WithGetPodsAssignedToNodeFnc(getPodsAssignedToNodeFunc podutil.GetPodsAssig
|
||||
}
|
||||
}
|
||||
|
||||
func WithMetricsCollector(metricsCollector *metricscollector.MetricsCollector) Option {
|
||||
return func(o *handleImplOpts) {
|
||||
o.metricsCollector = metricsCollector
|
||||
}
|
||||
}
|
||||
|
||||
func getPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) (*api.PluginConfig, int) {
|
||||
for idx, pluginConfig := range pluginConfigs {
|
||||
if pluginConfig.Name == pluginName {
|
||||
@@ -253,6 +266,7 @@ func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts
|
||||
profileName: config.Name,
|
||||
podEvictor: hOpts.podEvictor,
|
||||
},
|
||||
metricsCollector: hOpts.metricsCollector,
|
||||
}
|
||||
|
||||
pluginNames := append(config.Plugins.Deschedule.Enabled, config.Plugins.Balance.Enabled...)
|
||||
@@ -305,7 +319,8 @@ func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node)
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, pl.Name(), trace.WithAttributes(attribute.String("plugin", pl.Name()), attribute.String("profile", d.profileName), attribute.String("operation", tracing.DescheduleOperation)))
|
||||
defer span.End()
|
||||
evicted := d.podEvictor.TotalEvicted()
|
||||
evictedBeforeDeschedule := d.podEvictor.TotalEvicted()
|
||||
evictionRequestsBeforeDeschedule := d.podEvictor.TotalEvictionRequests()
|
||||
strategyStart := time.Now()
|
||||
status := pl.Deschedule(ctx, nodes)
|
||||
metrics.DeschedulerStrategyDuration.With(map[string]string{"strategy": pl.Name(), "profile": d.profileName}).Observe(time.Since(strategyStart).Seconds())
|
||||
@@ -314,7 +329,7 @@ func (d profileImpl) RunDeschedulePlugins(ctx context.Context, nodes []*v1.Node)
|
||||
span.AddEvent("Plugin Execution Failed", trace.WithAttributes(attribute.String("err", status.Err.Error())))
|
||||
errs = append(errs, fmt.Errorf("plugin %q finished with error: %v", pl.Name(), status.Err))
|
||||
}
|
||||
klog.V(1).InfoS("Total number of pods evicted", "extension point", "Deschedule", "evictedPods", d.podEvictor.TotalEvicted()-evicted)
|
||||
klog.V(1).InfoS("Total number of evictions/requests", "extension point", "Deschedule", "evictedPods", d.podEvictor.TotalEvicted()-evictedBeforeDeschedule, "evictionRequests", d.podEvictor.TotalEvictionRequests()-evictionRequestsBeforeDeschedule)
|
||||
}
|
||||
|
||||
aggrErr := errors.NewAggregate(errs)
|
||||
@@ -333,7 +348,8 @@ func (d profileImpl) RunBalancePlugins(ctx context.Context, nodes []*v1.Node) *f
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, pl.Name(), trace.WithAttributes(attribute.String("plugin", pl.Name()), attribute.String("profile", d.profileName), attribute.String("operation", tracing.BalanceOperation)))
|
||||
defer span.End()
|
||||
evicted := d.podEvictor.TotalEvicted()
|
||||
evictedBeforeBalance := d.podEvictor.TotalEvicted()
|
||||
evictionRequestsBeforeBalance := d.podEvictor.TotalEvictionRequests()
|
||||
strategyStart := time.Now()
|
||||
status := pl.Balance(ctx, nodes)
|
||||
metrics.DeschedulerStrategyDuration.With(map[string]string{"strategy": pl.Name(), "profile": d.profileName}).Observe(time.Since(strategyStart).Seconds())
|
||||
@@ -342,7 +358,7 @@ func (d profileImpl) RunBalancePlugins(ctx context.Context, nodes []*v1.Node) *f
|
||||
span.AddEvent("Plugin Execution Failed", trace.WithAttributes(attribute.String("err", status.Err.Error())))
|
||||
errs = append(errs, fmt.Errorf("plugin %q finished with error: %v", pl.Name(), status.Err))
|
||||
}
|
||||
klog.V(1).InfoS("Total number of pods evicted", "extension point", "Balance", "evictedPods", d.podEvictor.TotalEvicted()-evicted)
|
||||
klog.V(1).InfoS("Total number of evictions/requests", "extension point", "Balance", "evictedPods", d.podEvictor.TotalEvicted()-evictedBeforeBalance, "evictionRequests", d.podEvictor.TotalEvictionRequests()-evictionRequestsBeforeBalance)
|
||||
}
|
||||
|
||||
aggrErr := errors.NewAggregate(errs)
|
||||
|
||||
@@ -7,10 +7,12 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/component-base/featuregate"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -44,7 +46,14 @@ func InitFrameworkHandle(
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
podEvictor := evictions.NewPodEvictor(client, eventRecorder, evictionOptions)
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
podEvictor, err := evictions.NewPodEvictor(ctx, client, eventRecorder, podInformer, featureGates, evictionOptions)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Unable to initialize pod evictor: %v", err)
|
||||
}
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
&defaultEvictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
@@ -36,6 +37,7 @@ type Handle interface {
|
||||
Evictor() Evictor
|
||||
GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc
|
||||
SharedInformerFactory() informers.SharedInformerFactory
|
||||
MetricsCollector() *metricscollector.MetricsCollector
|
||||
}
|
||||
|
||||
// Evictor defines an interface for filtering and evicting pods
|
||||
|
||||
@@ -3,6 +3,11 @@ package utils
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
policyv1 "k8s.io/client-go/listers/policy/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -115,6 +120,38 @@ func IsPodWithPVC(pod *v1.Pod) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodCoveredByPDB returns true if the pod is covered by at least one PodDisruptionBudget.
|
||||
func IsPodCoveredByPDB(pod *v1.Pod, lister policyv1.PodDisruptionBudgetLister) (bool, error) {
|
||||
// We can't use the GetPodPodDisruptionBudgets expansion method here because it treats no pdb as an error,
|
||||
// but we want to return false.
|
||||
|
||||
list, err := lister.PodDisruptionBudgets(pod.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(list) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
podLabels := labels.Set(pod.Labels)
|
||||
var pdbList []*policy.PodDisruptionBudget
|
||||
for _, pdb := range list {
|
||||
selector, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
|
||||
if err != nil {
|
||||
// This object has an invalid selector, it will never match the pod
|
||||
continue
|
||||
}
|
||||
|
||||
if !selector.Matches(podLabels) {
|
||||
continue
|
||||
}
|
||||
pdbList = append(pdbList, pdb)
|
||||
}
|
||||
|
||||
return len(pdbList) > 0, nil
|
||||
}
|
||||
|
||||
// GetPodSource returns the source of the pod based on the annotation.
|
||||
func GetPodSource(pod *v1.Pod) (string, error) {
|
||||
if pod.Annotations != nil {
|
||||
|
||||
542
test/e2e/e2e_evictioninbackground_test.go
Normal file
542
test/e2e/e2e_evictioninbackground_test.go
Normal file
@@ -0,0 +1,542 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
kvcorev1 "kubevirt.io/api/core/v1"
|
||||
generatedclient "kubevirt.io/client-go/generated/kubevirt/clientset/versioned"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
apiv1alpha2 "sigs.k8s.io/descheduler/pkg/api/v1alpha2"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
)
|
||||
|
||||
const (
|
||||
vmiCount = 3
|
||||
)
|
||||
|
||||
func virtualMachineInstance(idx int) *kvcorev1.VirtualMachineInstance {
|
||||
return &kvcorev1.VirtualMachineInstance{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("kubevirtvmi-%v", idx),
|
||||
Annotations: map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/request-evict-only": "",
|
||||
},
|
||||
},
|
||||
Spec: kvcorev1.VirtualMachineInstanceSpec{
|
||||
EvictionStrategy: utilptr.To[kvcorev1.EvictionStrategy](kvcorev1.EvictionStrategyLiveMigrate),
|
||||
Domain: kvcorev1.DomainSpec{
|
||||
Devices: kvcorev1.Devices{
|
||||
AutoattachPodInterface: utilptr.To[bool](false),
|
||||
Disks: []kvcorev1.Disk{
|
||||
{
|
||||
Name: "containerdisk",
|
||||
DiskDevice: kvcorev1.DiskDevice{
|
||||
Disk: &kvcorev1.DiskTarget{
|
||||
Bus: kvcorev1.DiskBusVirtio,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "cloudinitdisk",
|
||||
DiskDevice: kvcorev1.DiskDevice{
|
||||
Disk: &kvcorev1.DiskTarget{
|
||||
Bus: kvcorev1.DiskBusVirtio,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Rng: &kvcorev1.Rng{},
|
||||
},
|
||||
Resources: kvcorev1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("1024M"),
|
||||
},
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: utilptr.To[int64](0),
|
||||
Volumes: []kvcorev1.Volume{
|
||||
{
|
||||
Name: "containerdisk",
|
||||
VolumeSource: kvcorev1.VolumeSource{
|
||||
ContainerDisk: &kvcorev1.ContainerDiskSource{
|
||||
Image: "quay.io/kubevirt/fedora-with-test-tooling-container-disk:20240710_1265d1090",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "cloudinitdisk",
|
||||
VolumeSource: kvcorev1.VolumeSource{
|
||||
CloudInitNoCloud: &kvcorev1.CloudInitNoCloudSource{
|
||||
UserData: `#cloud-config
|
||||
password: fedora
|
||||
chpasswd: { expire: False }
|
||||
packages:
|
||||
- nginx
|
||||
runcmd:
|
||||
- [ "systemctl", "enable", "--now", "nginx" ]`,
|
||||
NetworkData: `version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
addresses: [ fd10:0:2::2/120 ]
|
||||
dhcp4: true
|
||||
gateway6: fd10:0:2::1`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func waitForKubevirtReady(t *testing.T, ctx context.Context, kvClient generatedclient.Interface) {
|
||||
obj, err := kvClient.KubevirtV1().KubeVirts("kubevirt").Get(ctx, "kubevirt", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to get kubevirt/kubevirt: %v", err)
|
||||
}
|
||||
available := false
|
||||
for _, condition := range obj.Status.Conditions {
|
||||
if condition.Type == kvcorev1.KubeVirtConditionAvailable {
|
||||
if condition.Status == corev1.ConditionTrue {
|
||||
available = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !available {
|
||||
t.Fatalf("Kubevirt is not available")
|
||||
}
|
||||
klog.Infof("Kubevirt is available")
|
||||
}
|
||||
|
||||
func allVMIsHaveRunningPods(t *testing.T, ctx context.Context, kubeClient clientset.Interface, kvClient generatedclient.Interface) (bool, error) {
|
||||
klog.Infof("Checking all vmi active pods are running")
|
||||
uidMap := make(map[types.UID]*corev1.Pod)
|
||||
podList, err := kubeClient.CoreV1().Pods("default").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "client rate limiter") {
|
||||
klog.Infof("Unable to list pods: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("Unable to list pods: %v", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, item := range podList.Items {
|
||||
pod := item
|
||||
klog.Infof("item: %#v\n", item.UID)
|
||||
uidMap[item.UID] = &pod
|
||||
}
|
||||
|
||||
vmiList, err := kvClient.KubevirtV1().VirtualMachineInstances("default").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.Infof("Unable to list VMIs: %v", err)
|
||||
return false, err
|
||||
}
|
||||
if len(vmiList.Items) != vmiCount {
|
||||
klog.Infof("Expected %v VMIs, got %v instead", vmiCount, len(vmiList.Items))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, item := range vmiList.Items {
|
||||
atLeastOneVmiIsRunning := false
|
||||
for activePod := range item.Status.ActivePods {
|
||||
if _, exists := uidMap[activePod]; !exists {
|
||||
klog.Infof("Active pod %v not found", activePod)
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("Checking whether active pod %v (uid=%v) is running", uidMap[activePod].Name, activePod)
|
||||
// ignore completed/failed pods
|
||||
if uidMap[activePod].Status.Phase == corev1.PodFailed || uidMap[activePod].Status.Phase == corev1.PodSucceeded {
|
||||
klog.Infof("Ignoring active pod %v, phase=%v", uidMap[activePod].Name, uidMap[activePod].Status.Phase)
|
||||
continue
|
||||
}
|
||||
if uidMap[activePod].Status.Phase != corev1.PodRunning {
|
||||
klog.Infof("activePod %v is not running: %v\n", uidMap[activePod].Name, uidMap[activePod].Status.Phase)
|
||||
return false, nil
|
||||
}
|
||||
atLeastOneVmiIsRunning = true
|
||||
}
|
||||
if !atLeastOneVmiIsRunning {
|
||||
klog.Infof("vmi %v does not have any activePod running\n", item.Name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func podLifeTimePolicy() *apiv1alpha2.DeschedulerPolicy {
|
||||
return &apiv1alpha2.DeschedulerPolicy{
|
||||
Profiles: []apiv1alpha2.DeschedulerProfile{
|
||||
{
|
||||
Name: "KubeVirtPodLifetimeProfile",
|
||||
PluginConfigs: []apiv1alpha2.PluginConfig{
|
||||
{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: runtime.RawExtension{
|
||||
Object: &podlifetime.PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: utilptr.To[uint](1), // set it to immediate eviction
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"default"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: runtime.RawExtension{
|
||||
Object: &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: apiv1alpha2.Plugins{
|
||||
Filter: apiv1alpha2.PluginSet{
|
||||
Enabled: []string{
|
||||
defaultevictor.PluginName,
|
||||
},
|
||||
},
|
||||
Deschedule: apiv1alpha2.PluginSet{
|
||||
Enabled: []string{
|
||||
podlifetime.PluginName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func kVirtRunningPodNames(t *testing.T, ctx context.Context, kubeClient clientset.Interface) []string {
|
||||
names := []string{}
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
podList, err := kubeClient.CoreV1().Pods("default").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
if isClientRateLimiterError(err) {
|
||||
t.Log(err)
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("Unable to list pods: %v", err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, item := range podList.Items {
|
||||
if !strings.HasPrefix(item.Name, "virt-launcher-kubevirtvmi-") {
|
||||
t.Fatalf("Only pod names with 'virt-launcher-kubevirtvmi-' prefix are expected, got %q instead", item.Name)
|
||||
}
|
||||
if item.Status.Phase == corev1.PodRunning {
|
||||
names = append(names, item.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Unable to list running kvirt pod names: %v", err)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func observeLiveMigration(t *testing.T, ctx context.Context, kubeClient clientset.Interface, usedRunningPodNames map[string]struct{}) {
|
||||
prevTotal := uint(0)
|
||||
jumps := 0
|
||||
// keep running the descheduling cycle until the migration is triggered and completed few times or times out
|
||||
for i := 0; i < 240; i++ {
|
||||
// monitor how many pods get evicted
|
||||
names := kVirtRunningPodNames(t, ctx, kubeClient)
|
||||
klog.Infof("vmi pods: %#v\n", names)
|
||||
// The number of pods need to be kept between vmiCount and vmiCount+1.
|
||||
// At most two pods are expected to have virt-launcher-kubevirtvmi-X prefix name in common.
|
||||
prefixes := make(map[string]uint)
|
||||
for _, name := range names {
|
||||
// "virt-launcher-kubevirtvmi-"
|
||||
str := strings.Split(name, "-")[4]
|
||||
prefixes[str]++
|
||||
usedRunningPodNames[name] = struct{}{}
|
||||
}
|
||||
|
||||
hasDouble := false
|
||||
total := uint(0)
|
||||
for idx, count := range prefixes {
|
||||
total += count
|
||||
if count > 2 {
|
||||
t.Fatalf("A vmi kubevirtvmi-%v has more than 2 running active pods (%v), not expected", idx, count)
|
||||
}
|
||||
if count == 2 {
|
||||
if !hasDouble {
|
||||
hasDouble = true
|
||||
continue
|
||||
}
|
||||
t.Fatalf("Another vmi with 2 running active pods, not expected")
|
||||
}
|
||||
}
|
||||
// The total sum can not be higher than vmiCount+1
|
||||
if total > vmiCount+1 {
|
||||
t.Fatalf("Total running pods (%v) are higher than expected vmiCount+1 (%v)", total, vmiCount+1)
|
||||
}
|
||||
|
||||
if prevTotal != 0 && prevTotal != total {
|
||||
jumps++
|
||||
}
|
||||
// Expect at least 3 finished live migrations (two should be enough as well, though ...)
|
||||
if jumps >= 6 {
|
||||
break
|
||||
}
|
||||
prevTotal = total
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
if jumps < 6 {
|
||||
podList, err := kubeClient.CoreV1().Pods("default").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
klog.Infof("Unable to list pods: %v", err)
|
||||
} else {
|
||||
for _, item := range podList.Items {
|
||||
klog.Infof("pod(%v): %#v", item.Name, item)
|
||||
}
|
||||
}
|
||||
|
||||
t.Fatalf("Expected at least 3 finished live migrations, got less: %v", jumps/2.0)
|
||||
}
|
||||
klog.Infof("The live migration finished 3 times")
|
||||
|
||||
// len(usedRunningPodNames) is expected to be vmiCount + jumps/2 + 1 (one more live migration could still be initiated)
|
||||
klog.Infof("len(usedRunningPodNames): %v, upper limit: %v\n", len(usedRunningPodNames), vmiCount+jumps/2+1)
|
||||
if len(usedRunningPodNames) > vmiCount+jumps/2+1 {
|
||||
t.Fatalf("Expected vmiCount + jumps/2 + 1 = %v running pods, got %v instead", vmiCount+jumps/2+1, len(usedRunningPodNames))
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
names := kVirtRunningPodNames(t, ctx, kubeClient)
|
||||
klog.Infof("vmi pods: %#v\n", names)
|
||||
lNames := len(names)
|
||||
if lNames != vmiCount {
|
||||
klog.Infof("Waiting for the number of running vmi pods to be %v, got %v instead", vmiCount, lNames)
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("The number of running vmi pods is %v as expected", vmiCount)
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for %v vmi active pods to be running: %v", vmiCount, err)
|
||||
}
|
||||
}
|
||||
|
||||
func createAndWaitForDeschedulerRunning(t *testing.T, ctx context.Context, kubeClient clientset.Interface, deschedulerDeploymentObj *appsv1.Deployment) string {
|
||||
klog.Infof("Creating descheduler deployment %v", deschedulerDeploymentObj.Name)
|
||||
_, err := kubeClient.AppsV1().Deployments(deschedulerDeploymentObj.Namespace).Create(ctx, deschedulerDeploymentObj, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating %q deployment: %v", deschedulerDeploymentObj.Name, err)
|
||||
}
|
||||
|
||||
klog.Infof("Waiting for the descheduler pod running")
|
||||
deschedulerPods := waitForPodsRunning(ctx, t, kubeClient, deschedulerDeploymentObj.Labels, 1, deschedulerDeploymentObj.Namespace)
|
||||
if len(deschedulerPods) == 0 {
|
||||
t.Fatalf("Error waiting for %q deployment: no running pod found", deschedulerDeploymentObj.Name)
|
||||
}
|
||||
return deschedulerPods[0].Name
|
||||
}
|
||||
|
||||
func updateDeschedulerPolicy(t *testing.T, ctx context.Context, kubeClient clientset.Interface, policy *apiv1alpha2.DeschedulerPolicy) {
|
||||
deschedulerPolicyConfigMapObj, err := deschedulerPolicyConfigMap(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating %q CM with unlimited evictions: %v", deschedulerPolicyConfigMapObj.Name, err)
|
||||
}
|
||||
_, err = kubeClient.CoreV1().ConfigMaps(deschedulerPolicyConfigMapObj.Namespace).Update(ctx, deschedulerPolicyConfigMapObj, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error updating %q CM: %v", deschedulerPolicyConfigMapObj.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func createKubevirtClient() (generatedclient.Interface, error) {
|
||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
|
||||
overrides := &clientcmd.ConfigOverrides{}
|
||||
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)
|
||||
|
||||
config, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.GroupVersion = &kvcorev1.StorageGroupVersion
|
||||
config.APIPath = "/apis"
|
||||
config.ContentType = runtime.ContentTypeJSON
|
||||
|
||||
return generatedclient.NewForConfig(config)
|
||||
}
|
||||
|
||||
func TestLiveMigrationInBackground(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
kubeClient, err := client.CreateClient(componentbaseconfig.ClientConnectionConfiguration{Kubeconfig: os.Getenv("KUBECONFIG")}, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error during kubernetes client creation with %v", err)
|
||||
}
|
||||
|
||||
kvClient, err := createKubevirtClient()
|
||||
if err != nil {
|
||||
t.Fatalf("Error during kvClient creation with %v", err)
|
||||
}
|
||||
|
||||
waitForKubevirtReady(t, ctx, kvClient)
|
||||
|
||||
// Delete all VMIs
|
||||
defer func() {
|
||||
for i := 1; i <= vmiCount; i++ {
|
||||
vmi := virtualMachineInstance(i)
|
||||
err := kvClient.KubevirtV1().VirtualMachineInstances("default").Delete(context.Background(), vmi.Name, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
klog.Infof("Unable to delete vmi %v: %v", vmi.Name, err)
|
||||
}
|
||||
}
|
||||
wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
podList, err := kubeClient.CoreV1().Pods("default").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
lPods := len(podList.Items)
|
||||
if lPods > 0 {
|
||||
klog.Infof("Waiting until all pods under default namespace are gone, %v remaining", lPods)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}()
|
||||
|
||||
// Create N vmis and wait for the corresponding vm pods to be ready and running
|
||||
for i := 1; i <= vmiCount; i++ {
|
||||
vmi := virtualMachineInstance(i)
|
||||
_, err = kvClient.KubevirtV1().VirtualMachineInstances("default").Create(context.Background(), vmi, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create KubeVirt vmi: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait until all VMIs have running pods
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 300*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
return allVMIsHaveRunningPods(t, ctx, kubeClient, kvClient)
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for all vmi active pods to be running: %v", err)
|
||||
}
|
||||
|
||||
usedRunningPodNames := make(map[string]struct{})
|
||||
// vmiCount number of names is expected
|
||||
names := kVirtRunningPodNames(t, ctx, kubeClient)
|
||||
klog.Infof("vmi pods: %#v\n", names)
|
||||
if len(names) != vmiCount {
|
||||
t.Fatalf("Expected %v vmi pods, got %v instead", vmiCount, len(names))
|
||||
}
|
||||
for _, name := range names {
|
||||
usedRunningPodNames[name] = struct{}{}
|
||||
}
|
||||
|
||||
policy := podLifeTimePolicy()
|
||||
// Allow only a single eviction simultaneously
|
||||
policy.MaxNoOfPodsToEvictPerNamespace = utilptr.To[uint](1)
|
||||
// Deploy the descheduler with the configured policy
|
||||
deschedulerPolicyConfigMapObj, err := deschedulerPolicyConfigMap(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating %q CM: %v", deschedulerPolicyConfigMapObj.Name, err)
|
||||
}
|
||||
klog.Infof("Creating %q policy CM with RemovePodsHavingTooManyRestarts configured...", deschedulerPolicyConfigMapObj.Name)
|
||||
_, err = kubeClient.CoreV1().ConfigMaps(deschedulerPolicyConfigMapObj.Namespace).Create(ctx, deschedulerPolicyConfigMapObj, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating %q CM: %v", deschedulerPolicyConfigMapObj.Name, err)
|
||||
}
|
||||
defer func() {
|
||||
klog.Infof("Deleting %q CM...", deschedulerPolicyConfigMapObj.Name)
|
||||
err = kubeClient.CoreV1().ConfigMaps(deschedulerPolicyConfigMapObj.Namespace).Delete(ctx, deschedulerPolicyConfigMapObj.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to delete %q CM: %v", deschedulerPolicyConfigMapObj.Name, err)
|
||||
}
|
||||
}()
|
||||
|
||||
deschedulerDeploymentObj := deschedulerDeployment("kube-system")
|
||||
// Set the descheduling interval to 10s
|
||||
deschedulerDeploymentObj.Spec.Template.Spec.Containers[0].Args = []string{"--policy-config-file", "/policy-dir/policy.yaml", "--descheduling-interval", "10s", "--v", "4", "--feature-gates", "EvictionsInBackground=true"}
|
||||
|
||||
deschedulerPodName := ""
|
||||
defer func() {
|
||||
if deschedulerPodName != "" {
|
||||
printPodLogs(ctx, t, kubeClient, deschedulerPodName)
|
||||
}
|
||||
|
||||
klog.Infof("Deleting %q deployment...", deschedulerDeploymentObj.Name)
|
||||
err = kubeClient.AppsV1().Deployments(deschedulerDeploymentObj.Namespace).Delete(ctx, deschedulerDeploymentObj.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return
|
||||
}
|
||||
t.Fatalf("Unable to delete %q deployment: %v", deschedulerDeploymentObj.Name, err)
|
||||
}
|
||||
waitForPodsToDisappear(ctx, t, kubeClient, deschedulerDeploymentObj.Labels, deschedulerDeploymentObj.Namespace)
|
||||
}()
|
||||
|
||||
deschedulerPodName = createAndWaitForDeschedulerRunning(t, ctx, kubeClient, deschedulerDeploymentObj)
|
||||
|
||||
observeLiveMigration(t, ctx, kubeClient, usedRunningPodNames)
|
||||
|
||||
printPodLogs(ctx, t, kubeClient, deschedulerPodName)
|
||||
|
||||
klog.Infof("Deleting the current descheduler pod")
|
||||
err = kubeClient.AppsV1().Deployments(deschedulerDeploymentObj.Namespace).Delete(ctx, deschedulerDeploymentObj.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error deleting %q deployment: %v", deschedulerDeploymentObj.Name, err)
|
||||
}
|
||||
|
||||
remainingPods := make(map[string]struct{})
|
||||
for _, name := range kVirtRunningPodNames(t, ctx, kubeClient) {
|
||||
remainingPods[name] = struct{}{}
|
||||
}
|
||||
|
||||
klog.Infof("Configuring the descheduler policy %v for PodLifetime with no limits", deschedulerPolicyConfigMapObj.Name)
|
||||
policy.MaxNoOfPodsToEvictPerNamespace = nil
|
||||
updateDeschedulerPolicy(t, ctx, kubeClient, policy)
|
||||
|
||||
deschedulerDeploymentObj = deschedulerDeployment("kube-system")
|
||||
deschedulerDeploymentObj.Spec.Template.Spec.Containers[0].Args = []string{"--policy-config-file", "/policy-dir/policy.yaml", "--descheduling-interval", "100m", "--v", "4", "--feature-gates", "EvictionsInBackground=true"}
|
||||
deschedulerPodName = createAndWaitForDeschedulerRunning(t, ctx, kubeClient, deschedulerDeploymentObj)
|
||||
|
||||
klog.Infof("Waiting until all pods are evicted (no limit set)")
|
||||
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 120*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
names := kVirtRunningPodNames(t, ctx, kubeClient)
|
||||
for _, name := range names {
|
||||
if _, exists := remainingPods[name]; exists {
|
||||
klog.Infof("Waiting for %v to disappear", name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
lNames := len(names)
|
||||
if lNames != vmiCount {
|
||||
klog.Infof("Waiting for the number of newly running vmi pods to be %v, got %v instead", vmiCount, lNames)
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("The number of newly running vmi pods is %v as expected", vmiCount)
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for %v new vmi active pods to be running: %v", vmiCount, err)
|
||||
}
|
||||
}
|
||||
304
test/e2e/e2e_lownodeutilization_test.go
Normal file
304
test/e2e/e2e_lownodeutilization_test.go
Normal file
@@ -0,0 +1,304 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
apiv1alpha2 "sigs.k8s.io/descheduler/pkg/api/v1alpha2"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
)
|
||||
|
||||
func lowNodeUtilizationPolicy(lowNodeUtilizationArgs *nodeutilization.LowNodeUtilizationArgs, evictorArgs *defaultevictor.DefaultEvictorArgs, metricsCollectorEnabled bool) *apiv1alpha2.DeschedulerPolicy {
|
||||
return &apiv1alpha2.DeschedulerPolicy{
|
||||
MetricsCollector: apiv1alpha2.MetricsCollector{
|
||||
Enabled: metricsCollectorEnabled,
|
||||
},
|
||||
Profiles: []apiv1alpha2.DeschedulerProfile{
|
||||
{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName + "Profile",
|
||||
PluginConfigs: []apiv1alpha2.PluginConfig{
|
||||
{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: runtime.RawExtension{
|
||||
Object: lowNodeUtilizationArgs,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: runtime.RawExtension{
|
||||
Object: evictorArgs,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: apiv1alpha2.Plugins{
|
||||
Filter: apiv1alpha2.PluginSet{
|
||||
Enabled: []string{
|
||||
defaultevictor.PluginName,
|
||||
},
|
||||
},
|
||||
Balance: apiv1alpha2.PluginSet{
|
||||
Enabled: []string{
|
||||
nodeutilization.LowNodeUtilizationPluginName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestLowNodeUtilizationKubernetesMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, err := client.CreateClient(componentbaseconfig.ClientConnectionConfiguration{Kubeconfig: os.Getenv("KUBECONFIG")}, "")
|
||||
if err != nil {
|
||||
t.Errorf("Error during kubernetes client creation with %v", err)
|
||||
}
|
||||
|
||||
metricsClient, err := client.CreateMetricsClient(componentbaseconfig.ClientConnectionConfiguration{Kubeconfig: os.Getenv("KUBECONFIG")}, "descheduler")
|
||||
if err != nil {
|
||||
t.Errorf("Error during kubernetes metrics client creation with %v", err)
|
||||
}
|
||||
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
|
||||
_, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
t.Logf("Creating testing namespace %q", testNamespace.Name)
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v: %v", testNamespace.Name, err)
|
||||
}
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
|
||||
|
||||
t.Log("Creating duplicates pods")
|
||||
testLabel := map[string]string{"app": "test-lownodeutilization-kubernetes-metrics", "name": "test-lownodeutilization-kubernetes-metrics"}
|
||||
deploymentObj := buildTestDeployment("lownodeutilization-kubernetes-metrics-pod", testNamespace.Name, 0, testLabel, nil)
|
||||
deploymentObj.Spec.Template.Spec.Containers[0].Image = "narmidm/k8s-pod-cpu-stressor:latest"
|
||||
deploymentObj.Spec.Template.Spec.Containers[0].Args = []string{"-cpu=1.0", "-duration=10s", "-forever"}
|
||||
deploymentObj.Spec.Template.Spec.Containers[0].Resources = v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3000m"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0m"),
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
replicasNum int
|
||||
beforeFunc func(deployment *appsv1.Deployment)
|
||||
expectedEvictedPodCount int
|
||||
lowNodeUtilizationArgs *nodeutilization.LowNodeUtilizationArgs
|
||||
evictorArgs *defaultevictor.DefaultEvictorArgs
|
||||
metricsCollectorEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "metric server not enabled",
|
||||
replicasNum: 4,
|
||||
beforeFunc: func(deployment *appsv1.Deployment) {
|
||||
deployment.Spec.Replicas = utilptr.To[int32](4)
|
||||
deployment.Spec.Template.Spec.NodeName = workerNodes[0].Name
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
lowNodeUtilizationArgs: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
MetricsUtilization: nodeutilization.MetricsUtilization{
|
||||
MetricsServer: true,
|
||||
},
|
||||
},
|
||||
evictorArgs: &defaultevictor.DefaultEvictorArgs{},
|
||||
metricsCollectorEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "requested cpu resource zero, actual cpu utilization 3 per pod",
|
||||
replicasNum: 4,
|
||||
beforeFunc: func(deployment *appsv1.Deployment) {
|
||||
deployment.Spec.Replicas = utilptr.To[int32](4)
|
||||
deployment.Spec.Template.Spec.NodeName = workerNodes[0].Name
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
lowNodeUtilizationArgs: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
MetricsUtilization: nodeutilization.MetricsUtilization{
|
||||
MetricsServer: true,
|
||||
},
|
||||
},
|
||||
evictorArgs: &defaultevictor.DefaultEvictorArgs{},
|
||||
metricsCollectorEnabled: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Logf("Creating deployment %v in %v namespace", deploymentObj.Name, deploymentObj.Namespace)
|
||||
tc.beforeFunc(deploymentObj)
|
||||
|
||||
_, err = clientSet.AppsV1().Deployments(deploymentObj.Namespace).Create(ctx, deploymentObj, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Logf("Error creating deployment: %v", err)
|
||||
if err = clientSet.AppsV1().Deployments(deploymentObj.Namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(deploymentObj.Labels).String(),
|
||||
}); err != nil {
|
||||
t.Fatalf("Unable to delete deployment: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
clientSet.AppsV1().Deployments(deploymentObj.Namespace).Delete(ctx, deploymentObj.Name, metav1.DeleteOptions{})
|
||||
waitForPodsToDisappear(ctx, t, clientSet, deploymentObj.Labels, deploymentObj.Namespace)
|
||||
}()
|
||||
waitForPodsRunning(ctx, t, clientSet, deploymentObj.Labels, tc.replicasNum, deploymentObj.Namespace)
|
||||
// wait until workerNodes[0].Name has the right actual cpu utilization and all the testing pods are running
|
||||
// and producing ~12 cores in total
|
||||
wait.PollUntilWithContext(ctx, 5*time.Second, func(context.Context) (done bool, err error) {
|
||||
item, err := metricsClient.MetricsV1beta1().NodeMetricses().Get(ctx, workerNodes[0].Name, metav1.GetOptions{})
|
||||
t.Logf("Waiting for %q nodemetrics cpu utilization to get over 12, currently %v", workerNodes[0].Name, item.Usage.Cpu().Value())
|
||||
if item.Usage.Cpu().Value() < 12 {
|
||||
return false, nil
|
||||
}
|
||||
totalCpu := resource.NewMilliQuantity(0, resource.DecimalSI)
|
||||
podItems, err := metricsClient.MetricsV1beta1().PodMetricses(deploymentObj.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Logf("unable to list podmetricses: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
for _, podMetrics := range podItems.Items {
|
||||
for _, container := range podMetrics.Containers {
|
||||
if _, exists := container.Usage[v1.ResourceCPU]; !exists {
|
||||
continue
|
||||
}
|
||||
totalCpu.Add(container.Usage[v1.ResourceCPU])
|
||||
}
|
||||
}
|
||||
// Value() will round up (e.g. 11.1 -> 12), which is still ok
|
||||
t.Logf("Waiting for totalCpu to get to 12 at least, got %v\n", totalCpu.Value())
|
||||
return totalCpu.Value() >= 12, nil
|
||||
})
|
||||
|
||||
preRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...)
|
||||
|
||||
// Deploy the descheduler with the configured policy
|
||||
deschedulerPolicyConfigMapObj, err := deschedulerPolicyConfigMap(lowNodeUtilizationPolicy(tc.lowNodeUtilizationArgs, tc.evictorArgs, tc.metricsCollectorEnabled))
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating %q CM: %v", deschedulerPolicyConfigMapObj.Name, err)
|
||||
}
|
||||
|
||||
t.Logf("Creating %q policy CM with LowNodeUtilization configured...", deschedulerPolicyConfigMapObj.Name)
|
||||
_, err = clientSet.CoreV1().ConfigMaps(deschedulerPolicyConfigMapObj.Namespace).Create(ctx, deschedulerPolicyConfigMapObj, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating %q CM: %v", deschedulerPolicyConfigMapObj.Name, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Logf("Deleting %q CM...", deschedulerPolicyConfigMapObj.Name)
|
||||
err = clientSet.CoreV1().ConfigMaps(deschedulerPolicyConfigMapObj.Namespace).Delete(ctx, deschedulerPolicyConfigMapObj.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to delete %q CM: %v", deschedulerPolicyConfigMapObj.Name, err)
|
||||
}
|
||||
}()
|
||||
|
||||
deschedulerDeploymentObj := deschedulerDeployment(testNamespace.Name)
|
||||
t.Logf("Creating descheduler deployment %v", deschedulerDeploymentObj.Name)
|
||||
_, err = clientSet.AppsV1().Deployments(deschedulerDeploymentObj.Namespace).Create(ctx, deschedulerDeploymentObj, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating %q deployment: %v", deschedulerDeploymentObj.Name, err)
|
||||
}
|
||||
|
||||
deschedulerPodName := ""
|
||||
defer func() {
|
||||
if deschedulerPodName != "" {
|
||||
printPodLogs(ctx, t, clientSet, deschedulerPodName)
|
||||
}
|
||||
|
||||
t.Logf("Deleting %q deployment...", deschedulerDeploymentObj.Name)
|
||||
err = clientSet.AppsV1().Deployments(deschedulerDeploymentObj.Namespace).Delete(ctx, deschedulerDeploymentObj.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to delete %q deployment: %v", deschedulerDeploymentObj.Name, err)
|
||||
}
|
||||
|
||||
waitForPodsToDisappear(ctx, t, clientSet, deschedulerDeploymentObj.Labels, deschedulerDeploymentObj.Namespace)
|
||||
}()
|
||||
|
||||
t.Logf("Waiting for the descheduler pod running")
|
||||
deschedulerPods := waitForPodsRunning(ctx, t, clientSet, deschedulerDeploymentObj.Labels, 1, deschedulerDeploymentObj.Namespace)
|
||||
if len(deschedulerPods) != 0 {
|
||||
deschedulerPodName = deschedulerPods[0].Name
|
||||
}
|
||||
|
||||
// Run LowNodeUtilization plugin
|
||||
var meetsExpectations bool
|
||||
var actualEvictedPodCount int
|
||||
if err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
currentRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...)
|
||||
actualEvictedPod := preRunNames.Difference(currentRunNames)
|
||||
actualEvictedPodCount = actualEvictedPod.Len()
|
||||
t.Logf("preRunNames: %v, currentRunNames: %v, actualEvictedPodCount: %v\n", preRunNames.List(), currentRunNames.List(), actualEvictedPodCount)
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Logf("Expecting %v number of pods evicted, got %v instead", tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
return false, nil
|
||||
}
|
||||
meetsExpectations = true
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Errorf("Error waiting for descheduler running: %v", err)
|
||||
}
|
||||
|
||||
if !meetsExpectations {
|
||||
t.Errorf("Unexpected number of pods have been evicted, got %v, expected %v", actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||
} else {
|
||||
t.Logf("Total of %d Pods were evicted for %s", actualEvictedPodCount, tc.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/yaml"
|
||||
@@ -54,6 +55,7 @@ import (
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
@@ -68,6 +70,14 @@ func isClientRateLimiterError(err error) bool {
|
||||
return strings.Contains(err.Error(), "client rate limiter")
|
||||
}
|
||||
|
||||
func initFeatureGates() featuregate.FeatureGate {
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
return featureGates
|
||||
}
|
||||
|
||||
func deschedulerPolicyConfigMap(policy *deschedulerapiv1alpha2.DeschedulerPolicy) (*v1.ConfigMap, error) {
|
||||
cm := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1344,6 +1354,7 @@ func TestDeschedulingInterval(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize server: %v", err)
|
||||
}
|
||||
s.Client = clientSet
|
||||
s.DefaultFeatureGates = initFeatureGates()
|
||||
|
||||
deschedulerPolicy := &deschedulerapi.DeschedulerPolicy{}
|
||||
|
||||
|
||||
@@ -148,6 +148,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
}
|
||||
rs.Client = clientSet
|
||||
rs.EventClient = clientSet
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
|
||||
preRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...)
|
||||
// Deploy the descheduler with the configured policy
|
||||
|
||||
@@ -31,6 +31,7 @@ func TestClientConnectionConfiguration(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize server: %v", err)
|
||||
}
|
||||
s.Client = clientSet
|
||||
s.DefaultFeatureGates = initFeatureGates()
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(s.Client)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
t.Errorf("Error when checking support for eviction: %v", err)
|
||||
|
||||
@@ -21,6 +21,12 @@ set -o nounset
|
||||
# Set to empty if unbound/empty
|
||||
SKIP_INSTALL=${SKIP_INSTALL:-}
|
||||
KIND_E2E=${KIND_E2E:-}
|
||||
CONTAINER_ENGINE=${CONTAINER_ENGINE:-docker}
|
||||
KIND_SUDO=${KIND_SUDO:-}
|
||||
SKIP_KUBECTL_INSTALL=${SKIP_KUBECTL_INSTALL:-}
|
||||
SKIP_KIND_INSTALL=${SKIP_KIND_INSTALL:-}
|
||||
SKIP_KUBEVIRT_INSTALL=${SKIP_KUBEVIRT_INSTALL:-}
|
||||
KUBEVIRT_VERSION=${KUBEVIRT_VERSION:-v1.3.0-rc.1}
|
||||
|
||||
# Build a descheduler image
|
||||
IMAGE_TAG=v$(date +%Y%m%d)-$(git describe --tags)
|
||||
@@ -32,20 +38,35 @@ echo "DESCHEDULER_IMAGE: ${DESCHEDULER_IMAGE}"
|
||||
|
||||
# This just runs e2e tests.
|
||||
if [ -n "$KIND_E2E" ]; then
|
||||
# If we did not set SKIP_INSTALL
|
||||
if [ -z "$SKIP_INSTALL" ]; then
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.31.0}
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.32.0}
|
||||
if [ -z "${SKIP_KUBECTL_INSTALL}" ]; then
|
||||
curl -Lo kubectl https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.24.0/kind-linux-amd64
|
||||
fi
|
||||
if [ -z "${SKIP_KIND_INSTALL}" ]; then
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.26.0/kind-linux-amd64
|
||||
chmod +x kind-linux-amd64
|
||||
mv kind-linux-amd64 kind
|
||||
export PATH=$PATH:$PWD
|
||||
kind create cluster --image kindest/node:${K8S_VERSION} --config=./hack/kind_config.yaml
|
||||
fi
|
||||
${CONTAINER_ENGINE:-docker} pull registry.k8s.io/pause
|
||||
kind load docker-image registry.k8s.io/pause
|
||||
kind load docker-image ${DESCHEDULER_IMAGE}
|
||||
kind get kubeconfig > /tmp/admin.conf
|
||||
|
||||
# If we did not set SKIP_INSTALL
|
||||
if [ -z "$SKIP_INSTALL" ]; then
|
||||
${KIND_SUDO} kind create cluster --image kindest/node:${K8S_VERSION} --config=./hack/kind_config.yaml
|
||||
fi
|
||||
${CONTAINER_ENGINE} pull registry.k8s.io/pause
|
||||
if [ "${CONTAINER_ENGINE}" == "podman" ]; then
|
||||
podman save registry.k8s.io/pause -o /tmp/pause.tar
|
||||
${KIND_SUDO} kind load image-archive /tmp/pause.tar
|
||||
rm /tmp/pause.tar
|
||||
podman save ${DESCHEDULER_IMAGE} -o /tmp/descheduler.tar
|
||||
${KIND_SUDO} kind load image-archive /tmp/descheduler.tar
|
||||
rm /tmp/descheduler.tar
|
||||
else
|
||||
${KIND_SUDO} kind load docker-image registry.k8s.io/pause
|
||||
${KIND_SUDO} kind load docker-image ${DESCHEDULER_IMAGE}
|
||||
fi
|
||||
${KIND_SUDO} kind get kubeconfig > /tmp/admin.conf
|
||||
|
||||
export KUBECONFIG="/tmp/admin.conf"
|
||||
mkdir -p ~/gopath/src/sigs.k8s.io/
|
||||
fi
|
||||
@@ -53,5 +74,35 @@ fi
|
||||
# Deploy rbac, sa and binding for a descheduler running through a deployment
|
||||
kubectl apply -f kubernetes/base/rbac.yaml
|
||||
|
||||
collect_logs() {
|
||||
echo "Collecting pods and logs"
|
||||
kubectl get pods -n default
|
||||
kubectl get pods -n kubevirt
|
||||
|
||||
for pod in $(kubectl get pods -n default -o name); do
|
||||
echo "Logs for ${pod}"
|
||||
kubectl logs -n default ${pod}
|
||||
done
|
||||
|
||||
for pod in $(kubectl get pods -n kubevirt -o name); do
|
||||
echo "Logs for ${pod}"
|
||||
kubectl logs -n kubevirt ${pod}
|
||||
done
|
||||
}
|
||||
|
||||
trap "collect_logs" ERR
|
||||
|
||||
if [ -z "${SKIP_KUBEVIRT_INSTALL}" ]; then
|
||||
kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator.yaml
|
||||
kubectl create -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr.yaml
|
||||
kubectl wait --timeout=180s --for=condition=Available -n kubevirt kv/kubevirt
|
||||
kubectl -n kubevirt patch kubevirt kubevirt --type=merge --patch '{"spec":{"configuration":{"developerConfiguration":{"useEmulation":true}}}}'
|
||||
fi
|
||||
|
||||
METRICS_SERVER_VERSION="v0.5.0"
|
||||
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/download/${METRICS_SERVER_VERSION}/components.yaml
|
||||
kubectl patch -n kube-system deployment metrics-server --type=json \
|
||||
-p '[{"op":"add","path":"/spec/template/spec/containers/0/args/-","value":"--kubelet-insecure-tls"}]'
|
||||
|
||||
PRJ_PREFIX="sigs.k8s.io/descheduler"
|
||||
go test ${PRJ_PREFIX}/test/e2e/ -v -timeout 0
|
||||
|
||||
@@ -25,12 +25,15 @@ import (
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
@@ -67,6 +70,45 @@ func BuildTestPod(name string, cpu, memory int64, nodeName string, apply func(*v
|
||||
return pod
|
||||
}
|
||||
|
||||
func BuildTestPDB(name, appLabel string) *policyv1.PodDisruptionBudget {
|
||||
maxUnavailable := intstr.FromInt32(1)
|
||||
pdb := &policyv1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
Spec: policyv1.PodDisruptionBudgetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": appLabel,
|
||||
},
|
||||
},
|
||||
MaxUnavailable: &maxUnavailable,
|
||||
},
|
||||
}
|
||||
return pdb
|
||||
}
|
||||
|
||||
// BuildPodMetrics creates a test podmetrics with given parameters.
|
||||
func BuildPodMetrics(name string, millicpu, mem int64) *v1beta1.PodMetrics {
|
||||
return &v1beta1.PodMetrics{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "default",
|
||||
},
|
||||
Window: metav1.Duration{Duration: 20010000000},
|
||||
Containers: []v1beta1.ContainerMetrics{
|
||||
{
|
||||
Name: "container-1",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(millicpu, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(mem, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetMirrorPodAnnotation returns the annotation needed for mirror pod.
|
||||
func GetMirrorPodAnnotation() map[string]string {
|
||||
return map[string]string{
|
||||
@@ -135,6 +177,19 @@ func BuildTestNode(name string, millicpu, mem, pods int64, apply func(*v1.Node))
|
||||
return node
|
||||
}
|
||||
|
||||
func BuildNodeMetrics(name string, millicpu, mem int64) *v1beta1.NodeMetrics {
|
||||
return &v1beta1.NodeMetrics{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Window: metav1.Duration{Duration: 20010000000},
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(millicpu, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(mem, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// MakeBestEffortPod makes the given pod a BestEffort pod
|
||||
func MakeBestEffortPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
|
||||
2
vendor/cel.dev/expr/.bazelversion
vendored
Normal file
2
vendor/cel.dev/expr/.bazelversion
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
7.0.1
|
||||
# Keep this pinned version in parity with cel-go
|
||||
2
vendor/cel.dev/expr/.gitattributes
vendored
Normal file
2
vendor/cel.dev/expr/.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.pb.go linguist-generated=true
|
||||
*.pb.go -diff -merge
|
||||
2
vendor/cel.dev/expr/.gitignore
vendored
Normal file
2
vendor/cel.dev/expr/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
bazel-*
|
||||
MODULE.bazel.lock
|
||||
34
vendor/cel.dev/expr/BUILD.bazel
vendored
Normal file
34
vendor/cel.dev/expr/BUILD.bazel
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
go_library(
|
||||
name = "expr",
|
||||
srcs = [
|
||||
"checked.pb.go",
|
||||
"eval.pb.go",
|
||||
"explain.pb.go",
|
||||
"syntax.pb.go",
|
||||
"value.pb.go",
|
||||
],
|
||||
importpath = "cel.dev/expr",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
|
||||
"@org_golang_google_protobuf//reflect/protoreflect",
|
||||
"@org_golang_google_protobuf//runtime/protoimpl",
|
||||
"@org_golang_google_protobuf//types/known/anypb",
|
||||
"@org_golang_google_protobuf//types/known/durationpb",
|
||||
"@org_golang_google_protobuf//types/known/emptypb",
|
||||
"@org_golang_google_protobuf//types/known/structpb",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||
],
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "go_default_library",
|
||||
actual = ":expr",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
25
vendor/cel.dev/expr/CODE_OF_CONDUCT.md
vendored
Normal file
25
vendor/cel.dev/expr/CODE_OF_CONDUCT.md
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Contributor Code of Conduct
|
||||
## Version 0.1.1 (adapted from 0.3b-angular)
|
||||
|
||||
As contributors and maintainers of the Common Expression Language
|
||||
(CEL) project, we pledge to respect everyone who contributes by
|
||||
posting issues, updating documentation, submitting pull requests,
|
||||
providing feedback in comments, and any other activities.
|
||||
|
||||
Communication through any of CEL's channels (GitHub, Gitter, IRC,
|
||||
mailing lists, Google+, Twitter, etc.) must be constructive and never
|
||||
resort to personal attacks, trolling, public or private harassment,
|
||||
insults, or other unprofessional conduct.
|
||||
|
||||
We promise to extend courtesy and respect to everyone involved in this
|
||||
project regardless of gender, gender identity, sexual orientation,
|
||||
disability, age, race, ethnicity, religion, or level of experience. We
|
||||
expect anyone contributing to the project to do the same.
|
||||
|
||||
If any member of the community violates this code of conduct, the
|
||||
maintainers of the CEL project may take action, removing issues,
|
||||
comments, and PRs or blocking accounts as deemed appropriate.
|
||||
|
||||
If you are subject to or witness unacceptable behavior, or have any
|
||||
other concerns, please email us at
|
||||
[cel-conduct@google.com](mailto:cel-conduct@google.com).
|
||||
32
vendor/cel.dev/expr/CONTRIBUTING.md
vendored
Normal file
32
vendor/cel.dev/expr/CONTRIBUTING.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# How to Contribute
|
||||
|
||||
We'd love to accept your patches and contributions to this project. There are a
|
||||
few guidelines you need to follow.
|
||||
|
||||
## Contributor License Agreement
|
||||
|
||||
Contributions to this project must be accompanied by a Contributor License
|
||||
Agreement. You (or your employer) retain the copyright to your contribution,
|
||||
this simply gives us permission to use and redistribute your contributions as
|
||||
part of the project. Head over to <https://cla.developers.google.com/> to see
|
||||
your current agreements on file or to sign a new one.
|
||||
|
||||
You generally only need to submit a CLA once, so if you've already submitted one
|
||||
(even if it was for a different project), you probably don't need to do it
|
||||
again.
|
||||
|
||||
## Code reviews
|
||||
|
||||
All submissions, including submissions by project members, require review. We
|
||||
use GitHub pull requests for this purpose. Consult
|
||||
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
|
||||
information on using pull requests.
|
||||
|
||||
## What to expect from maintainers
|
||||
|
||||
Expect maintainers to respond to new issues or pull requests within a week.
|
||||
For outstanding and ongoing issues and particularly for long-running
|
||||
pull requests, expect the maintainers to review within a week of a
|
||||
contributor asking for a new review. There is no commitment to resolution --
|
||||
merging or closing a pull request, or fixing or closing an issue -- because some
|
||||
issues will require more discussion than others.
|
||||
43
vendor/cel.dev/expr/GOVERNANCE.md
vendored
Normal file
43
vendor/cel.dev/expr/GOVERNANCE.md
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
# Project Governance
|
||||
|
||||
This document defines the governance process for the CEL language. CEL is
|
||||
Google-developed, but openly governed. Major contributors to the CEL
|
||||
specification and its corresponding implementations constitute the CEL
|
||||
Language Council. New members may be added by a unanimous vote of the
|
||||
Council.
|
||||
|
||||
The MAINTAINERS.md file lists the members of the CEL Language Council, and
|
||||
unofficially indicates the "areas of expertise" of each member with respect
|
||||
to the publicly available CEL repos.
|
||||
|
||||
## Code Changes
|
||||
|
||||
Code changes must follow the standard pull request (PR) model documented in the
|
||||
CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a
|
||||
maintainer. The maintainer reserves the right to request that any feature
|
||||
request (FR) or PR be reviewed by the language council.
|
||||
|
||||
## Syntax and Semantic Changes
|
||||
|
||||
Syntactic and semantic changes must be reviewed by the CEL Language Council.
|
||||
Maintainers may also request language council review at their discretion.
|
||||
|
||||
The review process is as follows:
|
||||
|
||||
- Create a Feature Request in the CEL-Spec repo. The feature description will
|
||||
serve as an abstract for the detailed design document.
|
||||
- Co-develop a design document with the Language Council.
|
||||
- Once the proposer gives the design document approval, the document will be
|
||||
linked to the FR in the CEL-Spec repo and opened for comments to members of
|
||||
the cel-lang-discuss@googlegroups.com.
|
||||
- The Language Council will review the design doc at the next council meeting
|
||||
(once every three weeks) and the council decision included in the document.
|
||||
|
||||
If the proposal is approved, the spec will be updated by a maintainer (if
|
||||
applicable) and a rationale will be included in the CEL-Spec wiki to ensure
|
||||
future developers may follow CEL's growth and direction over time.
|
||||
|
||||
Approved proposals may be implemented by the proposer or by the maintainers as
|
||||
the parties see fit. At the discretion of the maintainer, changes from the
|
||||
approved design are permitted during implementation if they improve the user
|
||||
experience and clarity of the feature.
|
||||
202
vendor/cel.dev/expr/LICENSE
vendored
Normal file
202
vendor/cel.dev/expr/LICENSE
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
13
vendor/cel.dev/expr/MAINTAINERS.md
vendored
Normal file
13
vendor/cel.dev/expr/MAINTAINERS.md
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# CEL Language Council
|
||||
|
||||
| Name | Company | Area of Expertise |
|
||||
|-----------------|--------------|-------------------|
|
||||
| Alfred Fuller | Facebook | cel-cpp, cel-spec |
|
||||
| Jim Larson | Google | cel-go, cel-spec |
|
||||
| Matthais Blume | Google | cel-spec |
|
||||
| Tristan Swadell | Google | cel-go, cel-spec |
|
||||
|
||||
## Emeritus
|
||||
|
||||
* Sanjay Ghemawat (Google)
|
||||
* Wolfgang Grieskamp (Facebook)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user