Skip to content

Commit f6d3923

Browse files
committed
use prt instead of pointer
Signed-off-by: kuizhiqing <[email protected]>
1 parent a6c2da8 commit f6d3923

6 files changed

+57
-59
lines changed

pkg/controller/mpi_job_controller.go

+4-5
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,6 @@ import (
5353
"k8s.io/client-go/util/workqueue"
5454
"k8s.io/klog"
5555
"k8s.io/utils/clock"
56-
"k8s.io/utils/pointer"
5756
"k8s.io/utils/ptr"
5857
schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned"
5958
volcanoclient "volcano.sh/apis/pkg/client/clientset/versioned"
@@ -673,7 +672,7 @@ func (c *MPIJobController) syncHandler(key string) error {
673672
if launcher != nil {
674673
if isMPIJobSuspended(mpiJob) != isJobSuspended(launcher) {
675674
// align the suspension state of launcher with the MPIJob
676-
launcher.Spec.Suspend = pointer.Bool(isMPIJobSuspended(mpiJob))
675+
launcher.Spec.Suspend = ptr.To(isMPIJobSuspended(mpiJob))
677676
if _, err := c.kubeClient.BatchV1().Jobs(namespace).Update(context.TODO(), launcher, metav1.UpdateOptions{}); err != nil {
678677
return err
679678
}
@@ -998,11 +997,11 @@ func (c *MPIJobController) getOrCreateWorker(mpiJob *kubeflow.MPIJob) ([]*corev1
998997
}
999998

1000999
func isMPIJobSuspended(mpiJob *kubeflow.MPIJob) bool {
1001-
return pointer.BoolDeref(mpiJob.Spec.RunPolicy.Suspend, false)
1000+
return ptr.Deref(mpiJob.Spec.RunPolicy.Suspend, false)
10021001
}
10031002

10041003
func isJobSuspended(job *batchv1.Job) bool {
1005-
return pointer.BoolDeref(job.Spec.Suspend, false)
1004+
return ptr.Deref(job.Spec.Suspend, false)
10061005
}
10071006

10081007
func (c *MPIJobController) deleteWorkerPods(mpiJob *kubeflow.MPIJob) error {
@@ -1486,7 +1485,7 @@ func (c *MPIJobController) newLauncherJob(mpiJob *kubeflow.MPIJob) *batchv1.Job
14861485
},
14871486
}
14881487
if isMPIJobSuspended(mpiJob) {
1489-
job.Spec.Suspend = pointer.Bool(true)
1488+
job.Spec.Suspend = ptr.To(true)
14901489
}
14911490
return job
14921491
}

pkg/controller/mpi_job_controller_test.go

+14-15
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ import (
3737
"k8s.io/client-go/tools/record"
3838
"k8s.io/utils/clock"
3939
clocktesting "k8s.io/utils/clock/testing"
40-
"k8s.io/utils/pointer"
4140
"k8s.io/utils/ptr"
4241
schedv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1"
4342
schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned"
@@ -804,7 +803,7 @@ func TestCreateSuspendedMPIJob(t *testing.T) {
804803
// create a suspended job
805804
var replicas int32 = 8
806805
mpiJob := newMPIJob("test", &replicas, nil, nil)
807-
mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(true)
806+
mpiJob.Spec.RunPolicy.Suspend = ptr.To(true)
808807
mpiJob.Spec.MPIImplementation = implementation
809808
f.setUpMPIJob(mpiJob)
810809

@@ -823,7 +822,7 @@ func TestCreateSuspendedMPIJob(t *testing.T) {
823822
// expect creating of the launcher
824823
fmjc := f.newFakeMPIJobController()
825824
launcher := fmjc.newLauncherJob(mpiJob)
826-
launcher.Spec.Suspend = pointer.Bool(true)
825+
launcher.Spec.Suspend = ptr.To(true)
827826
f.expectCreateJobAction(launcher)
828827

829828
// expect an update to add the conditions
@@ -851,7 +850,7 @@ func TestSuspendedRunningMPIJob(t *testing.T) {
851850
var replicas int32 = 8
852851
startTime := metav1.Now()
853852
mpiJob := newMPIJob("test", &replicas, &startTime, nil)
854-
mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(false)
853+
mpiJob.Spec.RunPolicy.Suspend = ptr.To(false)
855854
msg := fmt.Sprintf("MPIJob %s/%s is created.", mpiJob.Namespace, mpiJob.Name)
856855
updateMPIJobConditions(mpiJob, kubeflow.JobCreated, corev1.ConditionTrue, mpiJobCreatedReason, msg)
857856
msg = fmt.Sprintf("MPIJob %s/%s is running.", mpiJob.Namespace, mpiJob.Name)
@@ -893,18 +892,18 @@ func TestSuspendedRunningMPIJob(t *testing.T) {
893892

894893
// setup launcher and its pod
895894
launcher := fmjc.newLauncherJob(mpiJob)
896-
launcher.Spec.Suspend = pointer.Bool(false)
895+
launcher.Spec.Suspend = ptr.To(false)
897896
launcherPod := mockJobPod(launcher)
898897
launcherPod.Status.Phase = corev1.PodRunning
899898
f.setUpLauncher(launcher)
900899
f.setUpPod(launcherPod)
901900

902901
// transition the MPIJob into suspended state
903-
mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(true)
902+
mpiJob.Spec.RunPolicy.Suspend = ptr.To(true)
904903

905904
// expect moving the launcher pod into suspended state
906905
launcherCopy := launcher.DeepCopy()
907-
launcherCopy.Spec.Suspend = pointer.Bool(true)
906+
launcherCopy.Spec.Suspend = ptr.To(true)
908907
f.expectUpdateJobAction(launcherCopy)
909908

910909
// expect removal of the pods
@@ -939,7 +938,7 @@ func TestResumeMPIJob(t *testing.T) {
939938
var replicas int32 = 8
940939
startTime := metav1.Now()
941940
mpiJob := newMPIJob("test", &replicas, &startTime, nil)
942-
mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(true)
941+
mpiJob.Spec.RunPolicy.Suspend = ptr.To(true)
943942
msg := fmt.Sprintf("MPIJob %s/%s is created.", mpiJob.Namespace, mpiJob.Name)
944943
updateMPIJobConditions(mpiJob, kubeflow.JobCreated, corev1.ConditionTrue, mpiJobCreatedReason, msg)
945944
updateMPIJobConditions(mpiJob, kubeflow.JobSuspended, corev1.ConditionTrue, mpiJobSuspendedReason, "MPIJob suspended")
@@ -966,14 +965,14 @@ func TestResumeMPIJob(t *testing.T) {
966965
// expect creating of the launcher
967966
fmjc := f.newFakeMPIJobController()
968967
launcher := fmjc.newLauncherJob(mpiJob)
969-
launcher.Spec.Suspend = pointer.Bool(true)
968+
launcher.Spec.Suspend = ptr.To(true)
970969
f.setUpLauncher(launcher)
971970

972971
// move the timer by a second so that the StartTime is updated after resume
973972
fakeClock.Sleep(time.Second)
974973

975974
// resume the MPIJob
976-
mpiJob.Spec.RunPolicy.Suspend = pointer.Bool(false)
975+
mpiJob.Spec.RunPolicy.Suspend = ptr.To(false)
977976

978977
// expect creation of the pods
979978
for i := 0; i < int(replicas); i++ {
@@ -983,7 +982,7 @@ func TestResumeMPIJob(t *testing.T) {
983982

984983
// expect the launcher update to resume it
985984
launcherCopy := launcher.DeepCopy()
986-
launcherCopy.Spec.Suspend = pointer.Bool(false)
985+
launcherCopy.Spec.Suspend = ptr.To(false)
987986
f.expectUpdateJobAction(launcherCopy)
988987

989988
// expect an update to add the conditions
@@ -1545,7 +1544,7 @@ func TestNewConfigMap(t *testing.T) {
15451544
},
15461545
Spec: kubeflow.MPIJobSpec{
15471546
MPIImplementation: kubeflow.MPIImplementationOpenMPI,
1548-
RunLauncherAsWorker: pointer.Bool(true),
1547+
RunLauncherAsWorker: ptr.To(true),
15491548
},
15501549
},
15511550
workerReplicas: 2,
@@ -1570,7 +1569,7 @@ func TestNewConfigMap(t *testing.T) {
15701569
},
15711570
Spec: kubeflow.MPIJobSpec{
15721571
MPIImplementation: kubeflow.MPIImplementationOpenMPI,
1573-
RunLauncherAsWorker: pointer.Bool(true),
1572+
RunLauncherAsWorker: ptr.To(true),
15741573
},
15751574
},
15761575
workerReplicas: 0,
@@ -1618,7 +1617,7 @@ func TestNewConfigMap(t *testing.T) {
16181617
Namespace: "project-x",
16191618
},
16201619
Spec: kubeflow.MPIJobSpec{
1621-
SlotsPerWorker: pointer.Int32(10),
1620+
SlotsPerWorker: ptr.To[int32](10),
16221621
MPIImplementation: kubeflow.MPIImplementationIntel,
16231622
},
16241623
},
@@ -1643,7 +1642,7 @@ func TestNewConfigMap(t *testing.T) {
16431642
Namespace: "project-x",
16441643
},
16451644
Spec: kubeflow.MPIJobSpec{
1646-
SlotsPerWorker: pointer.Int32(10),
1645+
SlotsPerWorker: ptr.To[int32](10),
16471646
MPIImplementation: kubeflow.MPIImplementationMPICH,
16481647
},
16491648
},

pkg/controller/podgroup.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ import (
2525
schedulinglisters "k8s.io/client-go/listers/scheduling/v1"
2626
"k8s.io/client-go/tools/cache"
2727
"k8s.io/klog"
28-
"k8s.io/utils/pointer"
28+
"k8s.io/utils/ptr"
2929
schedv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1"
3030
schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned"
3131
schedinformers "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions"
@@ -242,7 +242,7 @@ func (s *SchedulerPluginsCtrl) newPodGroup(mpiJob *kubeflow.MPIJob) metav1.Objec
242242
if mpiJob == nil {
243243
return nil
244244
}
245-
scheduleTimeoutSec := pointer.Int32(0)
245+
scheduleTimeoutSec := ptr.To[int32](0)
246246
if schedPolicy := mpiJob.Spec.RunPolicy.SchedulingPolicy; schedPolicy != nil && schedPolicy.ScheduleTimeoutSeconds != nil {
247247
scheduleTimeoutSec = schedPolicy.ScheduleTimeoutSeconds
248248
}
@@ -364,9 +364,9 @@ func calPGMinResource(minMember *int32, mpiJob *kubeflow.MPIJob, pcLister schedu
364364
klog.Warningf("Couldn't find the worker replicas")
365365
return nil
366366
}
367-
order[wIndex].Replicas = pointer.Int32(*minMember - 1)
367+
order[wIndex].Replicas = ptr.To(*minMember - 1)
368368
} else {
369-
order[1].Replicas = pointer.Int32(*minMember - 1)
369+
order[1].Replicas = ptr.To(*minMember - 1)
370370
}
371371
}
372372

@@ -390,7 +390,7 @@ func calculateMinAvailable(mpiJob *kubeflow.MPIJob) *int32 {
390390
if schedulingPolicy := mpiJob.Spec.RunPolicy.SchedulingPolicy; schedulingPolicy != nil && schedulingPolicy.MinAvailable != nil {
391391
return schedulingPolicy.MinAvailable
392392
}
393-
return pointer.Int32(workerReplicas(mpiJob) + 1)
393+
return ptr.To(workerReplicas(mpiJob) + 1)
394394
}
395395

396396
// calculatePriorityClassName calculates the priorityClass name needed for podGroup according to the following priorities:

0 commit comments

Comments
 (0)