mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
fix: pod resource calculation to consider native sidecars
previously, descheduler code had copied an old version of PodRequestsAndLimits which does not consider native sidecars it will now rely on resourcehelper libs, which will continue to get upstream updates Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
This commit is contained in:
@@ -241,7 +241,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
return false, fmt.Errorf("insufficient %v", resource)
|
||||
}
|
||||
}
|
||||
// check pod num, at least one pod number is avaibalbe
|
||||
// check pod num, at least one pod number is available
|
||||
if quantity, ok := availableResources[v1.ResourcePods]; ok && quantity.MilliValue() <= 0 {
|
||||
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/utils/ptr"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -1020,6 +1021,64 @@ func TestNodeFit(t *testing.T) {
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "Pod with native sidecars with too much cpu does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100000, 100*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient cpu"),
|
||||
},
|
||||
{
|
||||
description: "Pod with native sidecars with too much memory does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100, 1000*1000*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient memory"),
|
||||
},
|
||||
{
|
||||
description: "Pod with small native sidecars fits on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100, 100*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "Pod with large overhead does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.Overhead = createResourceList(100000, 100*1000*1000, 0)
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient cpu"),
|
||||
},
|
||||
{
|
||||
description: "Pod with small overhead fits on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.Overhead = createResourceList(1, 1*1000*1000, 0)
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
||||
@@ -11,24 +11,10 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
resourcehelper "k8s.io/component-helpers/resource"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// GetResourceRequest finds and returns the request value for a specific resource.
|
||||
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||
if resource == v1.ResourcePods {
|
||||
return 1
|
||||
}
|
||||
|
||||
requestQuantity := GetResourceRequestQuantity(pod, resource)
|
||||
|
||||
if resource == v1.ResourceCPU {
|
||||
return requestQuantity.MilliValue()
|
||||
}
|
||||
|
||||
return requestQuantity.Value()
|
||||
}
|
||||
|
||||
// GetResourceRequestQuantity finds and returns the request quantity for a specific resource.
|
||||
func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity {
|
||||
requestQuantity := resource.Quantity{}
|
||||
@@ -42,26 +28,8 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou
|
||||
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
|
||||
requestQuantity.Add(rQuantity)
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
|
||||
if requestQuantity.Cmp(rQuantity) < 0 {
|
||||
requestQuantity = rQuantity.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We assume pod overhead feature gate is enabled.
|
||||
// We can't import the scheduler settings so we will inherit the default.
|
||||
if pod.Spec.Overhead != nil {
|
||||
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
|
||||
requestQuantity.Add(podOverhead)
|
||||
}
|
||||
if rQuantity, ok := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{})[resourceName]; ok {
|
||||
requestQuantity.Add(rQuantity)
|
||||
}
|
||||
|
||||
return requestQuantity
|
||||
@@ -171,59 +139,9 @@ func GetPodSource(pod *v1.Pod) (string, error) {
|
||||
// containers of the pod. If PodOverhead feature is enabled, pod overhead is added to the
|
||||
// total container resource requests and to the total container limits which have a
|
||||
// non-zero quantity.
|
||||
func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
|
||||
reqs, limits = v1.ResourceList{}, v1.ResourceList{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
addResourceList(reqs, container.Resources.Requests)
|
||||
addResourceList(limits, container.Resources.Limits)
|
||||
}
|
||||
// init containers define the minimum of any resource
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
maxResourceList(reqs, container.Resources.Requests)
|
||||
maxResourceList(limits, container.Resources.Limits)
|
||||
}
|
||||
|
||||
// We assume pod overhead feature gate is enabled.
|
||||
// We can't import the scheduler settings so we will inherit the default.
|
||||
if pod.Spec.Overhead != nil {
|
||||
addResourceList(reqs, pod.Spec.Overhead)
|
||||
|
||||
for name, quantity := range pod.Spec.Overhead {
|
||||
if value, ok := limits[name]; ok && !value.IsZero() {
|
||||
value.Add(quantity)
|
||||
limits[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// addResourceList adds the resources in newList to list
|
||||
func addResourceList(list, newList v1.ResourceList) {
|
||||
for name, quantity := range newList {
|
||||
if value, ok := list[name]; !ok {
|
||||
list[name] = quantity.DeepCopy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
list[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// maxResourceList sets list to the greater of list/newList for every resource
|
||||
// either list
|
||||
func maxResourceList(list, new v1.ResourceList) {
|
||||
for name, quantity := range new {
|
||||
if value, ok := list[name]; !ok {
|
||||
list[name] = quantity.DeepCopy()
|
||||
continue
|
||||
} else {
|
||||
if quantity.Cmp(value) > 0 {
|
||||
list[name] = quantity.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
func PodRequestsAndLimits(pod *v1.Pod) (v1.ResourceList, v1.ResourceList) {
|
||||
opts := resourcehelper.PodResourcesOptions{}
|
||||
return resourcehelper.PodRequests(pod, opts), resourcehelper.PodLimits(pod, opts)
|
||||
}
|
||||
|
||||
// PodToleratesTaints returns true if a pod tolerates one node's taints
|
||||
|
||||
Reference in New Issue
Block a user