Skip to content
4 changes: 3 additions & 1 deletion ray-operator/controllers/ray/common/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -1158,7 +1158,9 @@ func updateRayStartParamsResources(ctx context.Context, rayStartParams map[strin
} else if normalizedName == string(corev1.ResourceMemory) {
rayStartParams["memory"] = strconv.FormatInt(q.Value(), 10)
} else if utils.IsGPUResourceKey(normalizedName) {
rayStartParams["num-gpus"] = strconv.FormatInt(q.Value(), 10)
// Support fractional GPU values (e.g., 0.4 GPU per replica for multi-model serving)
// Convert to float to preserve decimal values for Ray autoscaler
rayStartParams["num-gpus"] = strconv.FormatFloat(q.AsApproximateFloat64(), 'f', -1, 64)
} else {
rayResourcesJson[name] = q.AsApproximateFloat64()
}
Expand Down
41 changes: 41 additions & 0 deletions ray-operator/controllers/ray/common/pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2167,3 +2167,44 @@ func TestUpdateRayStartParamsResources(t *testing.T) {
})
}
}

func TestUpdateRayStartParamsResources_WithFractionalGPU(t *testing.T) {
// Test that fractional GPU values are properly converted when using Resources field
// See: https://github.com/ray-project/kuberay/issues/4447
ctx := context.Background()

tests := map[string]struct {
groupResources map[string]string
expectedNumGPUs string
expectedParamPresent bool
}{
"Fractional GPU as millicores": {
groupResources: map[string]string{"nvidia.com/gpu": "400m"}, // 400 millicores = 0.4 GPU
expectedNumGPUs: "0.4",
expectedParamPresent: true,
},
"Single GPU": {
groupResources: map[string]string{"nvidia.com/gpu": "1"},
expectedNumGPUs: "1",
expectedParamPresent: true,
},
"Multiple GPUs": {
groupResources: map[string]string{"nvidia.com/gpu": "4"},
expectedNumGPUs: "4",
expectedParamPresent: true,
},
}

for name, tc := range tests {
t.Run(name, func(t *testing.T) {
rayStartParams := make(map[string]string)
updateRayStartParamsResources(ctx, rayStartParams, tc.groupResources)

if tc.expectedParamPresent {
val, ok := rayStartParams["num-gpus"]
assert.True(t, ok, "num-gpus should be set in rayStartParams")
assert.Equal(t, tc.expectedNumGPUs, val, "GPU value should match expected fractional value")
}
})
}
}
106 changes: 106 additions & 0 deletions ray-operator/test/e2e/raycluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,15 @@ import (
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"

rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1"
"github.com/ray-project/kuberay/ray-operator/controllers/ray/utils"
rayv1ac "github.com/ray-project/kuberay/ray-operator/pkg/client/applyconfiguration/ray/v1"
corev1ac "k8s.io/client-go/applyconfigurations/core/v1"
. "github.com/ray-project/kuberay/ray-operator/test/support"
)

Expand Down Expand Up @@ -264,3 +266,107 @@ func TestRayClusterUpgradeStrategy(t *testing.T) {
g.Expect(err).NotTo(HaveOccurred())
g.Expect(newWorkerPods).To(HaveLen(1))
}

// TestRayClusterWithFractionalGPU tests that RayCluster correctly converts pod GPU resources
// to fractional num-gpus Ray parameters.
// This test demonstrates support for issue #4447 where fractional GPU serving (e.g., 0.4 GPU per model)
// is needed for efficient resource utilization when serving multiple models on a single GPU.
// The fix is in pod.go which converts GPU resources from pod.Resources.Requests using float instead of int.
// Reference: https://github.com/ray-project/kuberay/issues/4447
//
// NOTE: This test validates that KubeRay correctly generates Ray start parameters with fractional GPU values.
// The test won't schedule actual GPUs (nvidia.com/gpu resource) but validates the configuration generation.
func TestRayClusterWithFractionalGPU(t *testing.T) {
test := With(t)
g := NewWithT(t)

// Create a namespace
namespace := test.NewTestNamespace()

// Define a RayCluster with fractional GPU resource request (0.4 GPU)
// This tests the conversion from pod resources to Ray start parameters
rayClusterAC := rayv1ac.RayCluster("ray-fractional-gpu", namespace.Name).
WithSpec(rayv1ac.RayClusterSpec().
WithRayVersion(GetRayVersion()).
WithHeadGroupSpec(rayv1ac.HeadGroupSpec().
WithRayStartParams(map[string]string{"num-cpus": "2"}).
WithTemplate(HeadPodTemplateApplyConfiguration())).
WithWorkerGroupSpecs(rayv1ac.WorkerGroupSpec().
WithGroupName("gpu-workers").
WithReplicas(1).
WithMinReplicas(0).
WithMaxReplicas(2).
WithRayStartParams(map[string]string{
"num-cpus": "1",
}).
WithTemplate(func() *corev1ac.PodTemplateSpecApplyConfiguration {
// Create a fractional GPU quantity (0.4 GPU)
// Using milliGPU representation: 400m = 0.4 GPU
fractionalGPU := resource.MustParse("400m")

return corev1ac.PodTemplateSpec().
WithSpec(corev1ac.PodSpec().
WithContainers(corev1ac.Container().
WithName("ray-worker").
WithImage(GetRayImage()).
WithResources(corev1ac.ResourceRequirements().
WithRequests(corev1.ResourceList{
corev1.ResourceCPU: ptr.Deref(resource.NewQuantity(1, resource.DecimalSI), resource.Quantity{}),
corev1.ResourceMemory: ptr.Deref(resource.NewQuantity(1*1024*1024*1024, resource.BinarySI), resource.Quantity{}),
// Request 0.4 GPU - this should be converted to "num-gpus: 0.4" in Ray start params
"nvidia.com/gpu": fractionalGPU,
}).
WithLimits(corev1.ResourceList{
corev1.ResourceCPU: ptr.Deref(resource.NewQuantity(1, resource.DecimalSI), resource.Quantity{}),
corev1.ResourceMemory: ptr.Deref(resource.NewQuantity(1*1024*1024*1024, resource.BinarySI), resource.Quantity{}),
"nvidia.com/gpu": fractionalGPU,
}))))
}())))

// Create the RayCluster
rayCluster, err := test.Client().Ray().RayV1().RayClusters(namespace.Name).Apply(test.Ctx(), rayClusterAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred(), "Failed to create RayCluster")
LogWithTimestamp(t, "Created RayCluster %s/%s with fractional GPU (0.4) specification", rayCluster.Namespace, rayCluster.Name)

// Get the worker pods to verify the GPU resource specification
g.Eventually(func() int {
pods, err := test.Client().Core().CoreV1().Pods(namespace.Name).List(test.Ctx(), metav1.ListOptions{
LabelSelector: "ray.io/cluster=" + rayCluster.Name + ",ray.io/node-type=worker",
})
if err != nil {
return 0
}
return len(pods.Items)
}, TestTimeoutMedium).Should(BeNumerically(">=", 1), "Worker pod should be created")

// Verify that worker pods have been created with correct GPU specification
workerPods, err := GetWorkerPods(test, rayCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(workerPods).To(HaveLen(1), "Expected 1 worker pod")

// Verify the worker pod has the fractional GPU resource request
workerPod := workerPods[0]
container := workerPod.Spec.Containers[0]
gpuQuantity := container.Resources.Requests["nvidia.com/gpu"]

// The GPU quantity should be 400m (0.4 GPU)
g.Expect(gpuQuantity.IsZero()).To(BeFalse(), "Worker pod should have GPU request")
g.Expect(gpuQuantity.String()).To(Equal("400m"), "Worker pod should request 0.4 GPU (400m)")

// Verify the Ray start command includes the fractional num-gpus parameter
// The operator should have added "num-gpus": "0.4" to the environment variable
envVars := container.Env
var rayStartCmd string
for _, env := range envVars {
if env.Name == "KUBERAY_GEN_RAY_START_CMD" {
rayStartCmd = env.Value
break
}
}
g.Expect(rayStartCmd).NotTo(BeEmpty(), "Ray start command should be generated")
g.Expect(rayStartCmd).To(ContainSubstring("--num-gpus=0.4"), "Ray start command should contain fractional GPU parameter '--num-gpus=0.4'")

LogWithTimestamp(t, "✓ Worker pod GPU request: %s", gpuQuantity.String())
LogWithTimestamp(t, "✓ Ray start command contains: --num-gpus=0.4")
LogWithTimestamp(t, "✓ Test passed: Fractional GPU (0.4) correctly converted to Ray start parameter")
}