1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-25 20:59:28 +01:00

fix: pod resource calculation to consider native sidecars

previously, descheduler code had copied an old version of PodRequestsAndLimits which does not consider native sidecars
it will now rely on resourcehelper libs, which will continue to get upstream updates

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
This commit is contained in:
Amir Alavi
2025-11-01 10:47:53 -04:00
parent 582641c2e9
commit 06cab8e2aa
6 changed files with 535 additions and 89 deletions

View File

@@ -241,7 +241,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
return false, fmt.Errorf("insufficient %v", resource)
}
}
// check pod num, at least one pod number is avaibalbe
// check pod num, at least one pod number is available
if quantity, ok := availableResources[v1.ResourcePods]; ok && quantity.MilliValue() <= 0 {
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
}

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/utils/ptr"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test"
)
@@ -1020,6 +1021,64 @@ func TestNodeFit(t *testing.T) {
node: node,
podsOnNode: []*v1.Pod{},
},
{
description: "Pod with native sidecars with too much cpu does not fit on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
Resources: v1.ResourceRequirements{
Requests: createResourceList(100000, 100*1000*1000, 0),
},
})
}),
node: node,
podsOnNode: []*v1.Pod{},
err: errors.New("insufficient cpu"),
},
{
description: "Pod with native sidecars with too much memory does not fit on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
Resources: v1.ResourceRequirements{
Requests: createResourceList(100, 1000*1000*1000*1000, 0),
},
})
}),
node: node,
podsOnNode: []*v1.Pod{},
err: errors.New("insufficient memory"),
},
{
description: "Pod with small native sidecars fits on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
Resources: v1.ResourceRequirements{
Requests: createResourceList(100, 100*1000*1000, 0),
},
})
}),
node: node,
podsOnNode: []*v1.Pod{},
},
{
description: "Pod with large overhead does not fit on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.Overhead = createResourceList(100000, 100*1000*1000, 0)
}),
node: node,
podsOnNode: []*v1.Pod{},
err: errors.New("insufficient cpu"),
},
{
description: "Pod with small overhead fits on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.Overhead = createResourceList(1, 1*1000*1000, 0)
}),
node: node,
podsOnNode: []*v1.Pod{},
},
}
for _, tc := range tests {

View File

@@ -11,24 +11,10 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/klog/v2"
)
// GetResourceRequest finds and returns the request value for a specific resource.
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
if resource == v1.ResourcePods {
return 1
}
requestQuantity := GetResourceRequestQuantity(pod, resource)
if resource == v1.ResourceCPU {
return requestQuantity.MilliValue()
}
return requestQuantity.Value()
}
// GetResourceRequestQuantity finds and returns the request quantity for a specific resource.
func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity {
requestQuantity := resource.Quantity{}
@@ -42,26 +28,8 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
}
for _, container := range pod.Spec.Containers {
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
requestQuantity.Add(rQuantity)
}
}
for _, container := range pod.Spec.InitContainers {
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
if requestQuantity.Cmp(rQuantity) < 0 {
requestQuantity = rQuantity.DeepCopy()
}
}
}
// We assume pod overhead feature gate is enabled.
// We can't import the scheduler settings so we will inherit the default.
if pod.Spec.Overhead != nil {
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
requestQuantity.Add(podOverhead)
}
if rQuantity, ok := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{})[resourceName]; ok {
requestQuantity.Add(rQuantity)
}
return requestQuantity
@@ -171,59 +139,9 @@ func GetPodSource(pod *v1.Pod) (string, error) {
// containers of the pod. If PodOverhead feature is enabled, pod overhead is added to the
// total container resource requests and to the total container limits which have a
// non-zero quantity.
func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
reqs, limits = v1.ResourceList{}, v1.ResourceList{}
for _, container := range pod.Spec.Containers {
addResourceList(reqs, container.Resources.Requests)
addResourceList(limits, container.Resources.Limits)
}
// init containers define the minimum of any resource
for _, container := range pod.Spec.InitContainers {
maxResourceList(reqs, container.Resources.Requests)
maxResourceList(limits, container.Resources.Limits)
}
// We assume pod overhead feature gate is enabled.
// We can't import the scheduler settings so we will inherit the default.
if pod.Spec.Overhead != nil {
addResourceList(reqs, pod.Spec.Overhead)
for name, quantity := range pod.Spec.Overhead {
if value, ok := limits[name]; ok && !value.IsZero() {
value.Add(quantity)
limits[name] = value
}
}
}
return
}
// addResourceList adds the resources in newList to list
func addResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok {
list[name] = quantity.DeepCopy()
} else {
value.Add(quantity)
list[name] = value
}
}
}
// maxResourceList sets list to the greater of list/newList for every resource
// either list
func maxResourceList(list, new v1.ResourceList) {
for name, quantity := range new {
if value, ok := list[name]; !ok {
list[name] = quantity.DeepCopy()
continue
} else {
if quantity.Cmp(value) > 0 {
list[name] = quantity.DeepCopy()
}
}
}
func PodRequestsAndLimits(pod *v1.Pod) (v1.ResourceList, v1.ResourceList) {
opts := resourcehelper.PodResourcesOptions{}
return resourcehelper.PodRequests(pod, opts), resourcehelper.PodLimits(pod, opts)
}
// PodToleratesTaints returns true if a pod tolerates one node's taints

13
vendor/k8s.io/component-helpers/resource/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,13 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
approvers:
- api-approvers
reviewers:
- sig-node-reviewers
- sig-scheduling
labels:
- sig/node
- sig/scheduling
- kind/api-change

455
vendor/k8s.io/component-helpers/resource/helpers.go generated vendored Normal file
View File

@@ -0,0 +1,455 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
// ContainerType signifies container type
type ContainerType int
const (
// Containers is for normal containers
Containers ContainerType = 1 << iota
// InitContainers is for init containers
InitContainers
)
// PodResourcesOptions controls the behavior of PodRequests and PodLimits.
type PodResourcesOptions struct {
// Reuse, if provided will be reused to accumulate resources and returned by the PodRequests or PodLimits
// functions. All existing values in Reuse will be lost.
Reuse v1.ResourceList
// UseStatusResources indicates whether resources reported by the PodStatus should be considered
// when evaluating the pod resources. This MUST be false if the InPlacePodVerticalScaling
// feature is not enabled.
UseStatusResources bool
// ExcludeOverhead controls if pod overhead is excluded from the calculation.
ExcludeOverhead bool
// ContainerFn is called with the effective resources required for each container within the pod.
ContainerFn func(res v1.ResourceList, containerType ContainerType)
// NonMissingContainerRequests if provided will replace any missing container level requests for the specified resources
// with the given values. If the requests for those resources are explicitly set, even if zero, they will not be modified.
NonMissingContainerRequests v1.ResourceList
// SkipPodLevelResources controls whether pod-level resources should be skipped
// from the calculation. If pod-level resources are not set in PodSpec,
// pod-level resources will always be skipped.
SkipPodLevelResources bool
// SkipContainerLevelResources
SkipContainerLevelResources bool
}
var supportedPodLevelResources = sets.New(v1.ResourceCPU, v1.ResourceMemory)
func SupportedPodLevelResources() sets.Set[v1.ResourceName] {
return supportedPodLevelResources.Clone().Insert(v1.ResourceHugePagesPrefix)
}
// IsSupportedPodLevelResources checks if a given resource is supported by pod-level
// resource management through the PodLevelResources feature. Returns true if
// the resource is supported.
func IsSupportedPodLevelResource(name v1.ResourceName) bool {
return supportedPodLevelResources.Has(name) || strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix)
}
// IsPodLevelResourcesSet check if PodLevelResources pod-level resources are set.
// It returns true if either the Requests or Limits maps are non-empty.
func IsPodLevelResourcesSet(pod *v1.Pod) bool {
if pod.Spec.Resources == nil {
return false
}
if (len(pod.Spec.Resources.Requests) + len(pod.Spec.Resources.Limits)) == 0 {
return false
}
for resourceName := range pod.Spec.Resources.Requests {
if IsSupportedPodLevelResource(resourceName) {
return true
}
}
for resourceName := range pod.Spec.Resources.Limits {
if IsSupportedPodLevelResource(resourceName) {
return true
}
}
return false
}
// IsPodLevelRequestsSet checks if pod-level requests are set. It returns true if
// Requests map is non-empty.
func IsPodLevelRequestsSet(pod *v1.Pod) bool {
if pod.Spec.Resources == nil {
return false
}
if len(pod.Spec.Resources.Requests) == 0 {
return false
}
for resourceName := range pod.Spec.Resources.Requests {
if IsSupportedPodLevelResource(resourceName) {
return true
}
}
return false
}
// IsPodLevelLimitsSet checks if pod-level limits are set. It returns true if
// Limits map is non-empty and contains at least one supported pod-level resource.
func IsPodLevelLimitsSet(pod *v1.Pod) bool {
if pod.Spec.Resources == nil {
return false
}
if len(pod.Spec.Resources.Limits) == 0 {
return false
}
for resourceName := range pod.Spec.Resources.Limits {
if IsSupportedPodLevelResource(resourceName) {
return true
}
}
return false
}
// PodRequests computes the total pod requests per the PodResourcesOptions supplied.
// If PodResourcesOptions is nil, then the requests are returned including pod overhead.
// If the PodLevelResources feature is enabled AND the pod-level resources are set,
// those pod-level values are used in calculating Pod Requests.
// The computation is part of the API and must be reviewed as an API change.
func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
reqs := v1.ResourceList{}
if !opts.SkipContainerLevelResources {
reqs = AggregateContainerRequests(pod, opts)
}
if !opts.SkipPodLevelResources && IsPodLevelRequestsSet(pod) {
for resourceName, quantity := range pod.Spec.Resources.Requests {
if IsSupportedPodLevelResource(resourceName) {
reqs[resourceName] = quantity
}
}
}
// Add overhead for running a pod to the sum of requests if requested:
if !opts.ExcludeOverhead && pod.Spec.Overhead != nil {
addResourceList(reqs, pod.Spec.Overhead)
}
return reqs
}
// AggregateContainerRequests computes the total resource requests of all the containers
// in a pod. This computation folows the formula defined in the KEP for sidecar
// containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission
// for more details.
func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
// attempt to reuse the maps if passed, or allocate otherwise
reqs := reuseOrClearResourceList(opts.Reuse)
var containerStatuses map[string]*v1.ContainerStatus
if opts.UseStatusResources {
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses))
for i := range pod.Status.ContainerStatuses {
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
}
for i := range pod.Status.InitContainerStatuses {
containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i]
}
}
for _, container := range pod.Spec.Containers {
containerReqs := container.Resources.Requests
if opts.UseStatusResources {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
containerReqs = determineContainerReqs(pod, &container, cs)
}
}
if len(opts.NonMissingContainerRequests) > 0 {
containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests)
}
if opts.ContainerFn != nil {
opts.ContainerFn(containerReqs, Containers)
}
addResourceList(reqs, containerReqs)
}
restartableInitContainerReqs := v1.ResourceList{}
initContainerReqs := v1.ResourceList{}
// init containers define the minimum of any resource
//
// Let's say `InitContainerUse(i)` is the resource requirements when the i-th
// init container is initializing, then
// `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`.
//
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
for _, container := range pod.Spec.InitContainers {
containerReqs := container.Resources.Requests
if opts.UseStatusResources {
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
containerReqs = determineContainerReqs(pod, &container, cs)
}
}
}
if len(opts.NonMissingContainerRequests) > 0 {
containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests)
}
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
// and add them to the resulting cumulative container requests
addResourceList(reqs, containerReqs)
// track our cumulative restartable init container resources
addResourceList(restartableInitContainerReqs, containerReqs)
containerReqs = restartableInitContainerReqs
} else {
tmp := v1.ResourceList{}
addResourceList(tmp, containerReqs)
addResourceList(tmp, restartableInitContainerReqs)
containerReqs = tmp
}
if opts.ContainerFn != nil {
opts.ContainerFn(containerReqs, InitContainers)
}
maxResourceList(initContainerReqs, containerReqs)
}
maxResourceList(reqs, initContainerReqs)
return reqs
}
// determineContainerReqs will return a copy of the container requests based on if resizing is feasible or not.
func determineContainerReqs(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
if IsPodResizeInfeasible(pod) {
return max(cs.Resources.Requests, cs.AllocatedResources)
}
return max(container.Resources.Requests, cs.Resources.Requests, cs.AllocatedResources)
}
// determineContainerLimits will return a copy of the container limits based on if resizing is feasible or not.
func determineContainerLimits(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
if IsPodResizeInfeasible(pod) {
return cs.Resources.Limits.DeepCopy()
}
return max(container.Resources.Limits, cs.Resources.Limits)
}
// IsPodResizeInfeasible returns true if the pod condition PodResizePending is set to infeasible.
func IsPodResizeInfeasible(pod *v1.Pod) bool {
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodResizePending {
return condition.Reason == v1.PodReasonInfeasible
}
}
return false
}
// IsPodResizeDeferred returns true if the pod condition PodResizePending is set to deferred.
func IsPodResizeDeferred(pod *v1.Pod) bool {
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodResizePending {
return condition.Reason == v1.PodReasonDeferred
}
}
return false
}
// applyNonMissing will return a copy of the given resource list with any missing values replaced by the nonMissing values
func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.ResourceList {
cp := v1.ResourceList{}
for k, v := range reqs {
cp[k] = v.DeepCopy()
}
for k, v := range nonMissing {
if _, found := reqs[k]; !found {
rk := cp[k]
rk.Add(v)
cp[k] = rk
}
}
return cp
}
// PodLimits computes the pod limits per the PodResourcesOptions supplied. If PodResourcesOptions is nil, then
// the limits are returned including pod overhead for any non-zero limits. The computation is part of the API and must be reviewed
// as an API change.
func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
// attempt to reuse the maps if passed, or allocate otherwise
limits := AggregateContainerLimits(pod, opts)
if !opts.SkipPodLevelResources && IsPodLevelResourcesSet(pod) {
for resourceName, quantity := range pod.Spec.Resources.Limits {
if IsSupportedPodLevelResource(resourceName) {
limits[resourceName] = quantity
}
}
}
// Add overhead to non-zero limits if requested:
if !opts.ExcludeOverhead && pod.Spec.Overhead != nil {
for name, quantity := range pod.Spec.Overhead {
if value, ok := limits[name]; ok && !value.IsZero() {
value.Add(quantity)
limits[name] = value
}
}
}
return limits
}
// AggregateContainerLimits computes the aggregated resource limits of all the containers
// in a pod. This computation follows the formula defined in the KEP for sidecar
// containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission
// for more details.
func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
// attempt to reuse the maps if passed, or allocate otherwise
limits := reuseOrClearResourceList(opts.Reuse)
var containerStatuses map[string]*v1.ContainerStatus
if opts.UseStatusResources {
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses))
for i := range pod.Status.ContainerStatuses {
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
}
for i := range pod.Status.InitContainerStatuses {
containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i]
}
}
for _, container := range pod.Spec.Containers {
containerLimits := container.Resources.Limits
if opts.UseStatusResources {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
containerLimits = determineContainerLimits(pod, &container, cs)
}
}
if opts.ContainerFn != nil {
opts.ContainerFn(containerLimits, Containers)
}
addResourceList(limits, containerLimits)
}
restartableInitContainerLimits := v1.ResourceList{}
initContainerLimits := v1.ResourceList{}
// init containers define the minimum of any resource
//
// Let's say `InitContainerUse(i)` is the resource requirements when the i-th
// init container is initializing, then
// `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`.
//
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
for _, container := range pod.Spec.InitContainers {
containerLimits := container.Resources.Limits
if opts.UseStatusResources {
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
containerLimits = determineContainerLimits(pod, &container, cs)
}
}
}
// Is the init container marked as a restartable init container?
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
addResourceList(limits, containerLimits)
// track our cumulative restartable init container resources
addResourceList(restartableInitContainerLimits, containerLimits)
containerLimits = restartableInitContainerLimits
} else {
tmp := v1.ResourceList{}
addResourceList(tmp, containerLimits)
addResourceList(tmp, restartableInitContainerLimits)
containerLimits = tmp
}
if opts.ContainerFn != nil {
opts.ContainerFn(containerLimits, InitContainers)
}
maxResourceList(initContainerLimits, containerLimits)
}
maxResourceList(limits, initContainerLimits)
return limits
}
// addResourceList adds the resources in newList to list.
func addResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok {
list[name] = quantity.DeepCopy()
} else {
value.Add(quantity)
list[name] = value
}
}
}
// maxResourceList sets list to the greater of list/newList for every resource in newList
func maxResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok || quantity.Cmp(value) > 0 {
list[name] = quantity.DeepCopy()
}
}
}
// max returns the result of max(a, b...) for each named resource and is only used if we can't
// accumulate into an existing resource list
func max(a v1.ResourceList, b ...v1.ResourceList) v1.ResourceList {
var result v1.ResourceList
if a != nil {
result = a.DeepCopy()
} else {
result = v1.ResourceList{}
}
for _, other := range b {
maxResourceList(result, other)
}
return result
}
// reuseOrClearResourceList is a helper for avoiding excessive allocations of
// resource lists within the inner loop of resource calculations.
func reuseOrClearResourceList(reuse v1.ResourceList) v1.ResourceList {
if reuse == nil {
return make(v1.ResourceList, 4)
}
for k := range reuse {
delete(reuse, k)
}
return reuse
}

1
vendor/modules.txt vendored
View File

@@ -1300,6 +1300,7 @@ k8s.io/component-base/zpages/httputil
k8s.io/component-base/zpages/statusz
# k8s.io/component-helpers v0.34.0
## explicit; go 1.24.0
k8s.io/component-helpers/resource
k8s.io/component-helpers/scheduling/corev1
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
# k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f