@@ -37,7 +37,6 @@ import (
37
37
"k8s.io/client-go/tools/record"
38
38
"k8s.io/utils/clock"
39
39
clocktesting "k8s.io/utils/clock/testing"
40
- "k8s.io/utils/pointer"
41
40
"k8s.io/utils/ptr"
42
41
schedv1alpha1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1"
43
42
schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned"
@@ -804,7 +803,7 @@ func TestCreateSuspendedMPIJob(t *testing.T) {
804
803
// create a suspended job
805
804
var replicas int32 = 8
806
805
mpiJob := newMPIJob ("test" , & replicas , nil , nil )
807
- mpiJob .Spec .RunPolicy .Suspend = pointer . Bool (true )
806
+ mpiJob .Spec .RunPolicy .Suspend = ptr . To (true )
808
807
mpiJob .Spec .MPIImplementation = implementation
809
808
f .setUpMPIJob (mpiJob )
810
809
@@ -823,7 +822,7 @@ func TestCreateSuspendedMPIJob(t *testing.T) {
823
822
// expect creating of the launcher
824
823
fmjc := f .newFakeMPIJobController ()
825
824
launcher := fmjc .newLauncherJob (mpiJob )
826
- launcher .Spec .Suspend = pointer . Bool (true )
825
+ launcher .Spec .Suspend = ptr . To (true )
827
826
f .expectCreateJobAction (launcher )
828
827
829
828
// expect an update to add the conditions
@@ -851,7 +850,7 @@ func TestSuspendedRunningMPIJob(t *testing.T) {
851
850
var replicas int32 = 8
852
851
startTime := metav1 .Now ()
853
852
mpiJob := newMPIJob ("test" , & replicas , & startTime , nil )
854
- mpiJob .Spec .RunPolicy .Suspend = pointer . Bool (false )
853
+ mpiJob .Spec .RunPolicy .Suspend = ptr . To (false )
855
854
msg := fmt .Sprintf ("MPIJob %s/%s is created." , mpiJob .Namespace , mpiJob .Name )
856
855
updateMPIJobConditions (mpiJob , kubeflow .JobCreated , corev1 .ConditionTrue , mpiJobCreatedReason , msg )
857
856
msg = fmt .Sprintf ("MPIJob %s/%s is running." , mpiJob .Namespace , mpiJob .Name )
@@ -893,18 +892,18 @@ func TestSuspendedRunningMPIJob(t *testing.T) {
893
892
894
893
// setup launcher and its pod
895
894
launcher := fmjc .newLauncherJob (mpiJob )
896
- launcher .Spec .Suspend = pointer . Bool (false )
895
+ launcher .Spec .Suspend = ptr . To (false )
897
896
launcherPod := mockJobPod (launcher )
898
897
launcherPod .Status .Phase = corev1 .PodRunning
899
898
f .setUpLauncher (launcher )
900
899
f .setUpPod (launcherPod )
901
900
902
901
// transition the MPIJob into suspended state
903
- mpiJob .Spec .RunPolicy .Suspend = pointer . Bool (true )
902
+ mpiJob .Spec .RunPolicy .Suspend = ptr . To (true )
904
903
905
904
// expect moving the launcher pod into suspended state
906
905
launcherCopy := launcher .DeepCopy ()
907
- launcherCopy .Spec .Suspend = pointer . Bool (true )
906
+ launcherCopy .Spec .Suspend = ptr . To (true )
908
907
f .expectUpdateJobAction (launcherCopy )
909
908
910
909
// expect removal of the pods
@@ -939,7 +938,7 @@ func TestResumeMPIJob(t *testing.T) {
939
938
var replicas int32 = 8
940
939
startTime := metav1 .Now ()
941
940
mpiJob := newMPIJob ("test" , & replicas , & startTime , nil )
942
- mpiJob .Spec .RunPolicy .Suspend = pointer . Bool (true )
941
+ mpiJob .Spec .RunPolicy .Suspend = ptr . To (true )
943
942
msg := fmt .Sprintf ("MPIJob %s/%s is created." , mpiJob .Namespace , mpiJob .Name )
944
943
updateMPIJobConditions (mpiJob , kubeflow .JobCreated , corev1 .ConditionTrue , mpiJobCreatedReason , msg )
945
944
updateMPIJobConditions (mpiJob , kubeflow .JobSuspended , corev1 .ConditionTrue , mpiJobSuspendedReason , "MPIJob suspended" )
@@ -966,14 +965,14 @@ func TestResumeMPIJob(t *testing.T) {
966
965
// expect creating of the launcher
967
966
fmjc := f .newFakeMPIJobController ()
968
967
launcher := fmjc .newLauncherJob (mpiJob )
969
- launcher .Spec .Suspend = pointer . Bool (true )
968
+ launcher .Spec .Suspend = ptr . To (true )
970
969
f .setUpLauncher (launcher )
971
970
972
971
// move the timer by a second so that the StartTime is updated after resume
973
972
fakeClock .Sleep (time .Second )
974
973
975
974
// resume the MPIJob
976
- mpiJob .Spec .RunPolicy .Suspend = pointer . Bool (false )
975
+ mpiJob .Spec .RunPolicy .Suspend = ptr . To (false )
977
976
978
977
// expect creation of the pods
979
978
for i := 0 ; i < int (replicas ); i ++ {
@@ -983,7 +982,7 @@ func TestResumeMPIJob(t *testing.T) {
983
982
984
983
// expect the launcher update to resume it
985
984
launcherCopy := launcher .DeepCopy ()
986
- launcherCopy .Spec .Suspend = pointer . Bool (false )
985
+ launcherCopy .Spec .Suspend = ptr . To (false )
987
986
f .expectUpdateJobAction (launcherCopy )
988
987
989
988
// expect an update to add the conditions
@@ -1545,7 +1544,7 @@ func TestNewConfigMap(t *testing.T) {
1545
1544
},
1546
1545
Spec : kubeflow.MPIJobSpec {
1547
1546
MPIImplementation : kubeflow .MPIImplementationOpenMPI ,
1548
- RunLauncherAsWorker : pointer . Bool (true ),
1547
+ RunLauncherAsWorker : ptr . To (true ),
1549
1548
},
1550
1549
},
1551
1550
workerReplicas : 2 ,
@@ -1570,7 +1569,7 @@ func TestNewConfigMap(t *testing.T) {
1570
1569
},
1571
1570
Spec : kubeflow.MPIJobSpec {
1572
1571
MPIImplementation : kubeflow .MPIImplementationOpenMPI ,
1573
- RunLauncherAsWorker : pointer . Bool (true ),
1572
+ RunLauncherAsWorker : ptr . To (true ),
1574
1573
},
1575
1574
},
1576
1575
workerReplicas : 0 ,
@@ -1618,7 +1617,7 @@ func TestNewConfigMap(t *testing.T) {
1618
1617
Namespace : "project-x" ,
1619
1618
},
1620
1619
Spec : kubeflow.MPIJobSpec {
1621
- SlotsPerWorker : pointer . Int32 (10 ),
1620
+ SlotsPerWorker : ptr. To [ int32 ] (10 ),
1622
1621
MPIImplementation : kubeflow .MPIImplementationIntel ,
1623
1622
},
1624
1623
},
@@ -1643,7 +1642,7 @@ func TestNewConfigMap(t *testing.T) {
1643
1642
Namespace : "project-x" ,
1644
1643
},
1645
1644
Spec : kubeflow.MPIJobSpec {
1646
- SlotsPerWorker : pointer . Int32 (10 ),
1645
+ SlotsPerWorker : ptr. To [ int32 ] (10 ),
1647
1646
MPIImplementation : kubeflow .MPIImplementationMPICH ,
1648
1647
},
1649
1648
},
0 commit comments