44# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause 
55import  gc 
66import  collections 
7+ import  warnings 
78
9+ from  coremltools  import  ComputeUnit  as  _ComputeUnit 
810from  coremltools .converters .mil .mil .passes .quantization_passes  import  AbstractQuantizationPass , FP16ComputePrecision 
911from  coremltools .converters .mil .mil .passes .quantization_passes  import  ComputePrecision  as  precision 
1012from  coremltools .converters .mil .input_types  import  InputType , ClassifierConfig 
@@ -43,6 +45,7 @@ def convert(
4345    convert_to = None ,
4446    compute_precision = None ,
4547    skip_model_load = False ,
48+     compute_units = _ComputeUnit .ALL ,
4649    ** kwargs 
4750):
4851    """ 
@@ -199,10 +202,9 @@ def convert(
199202
200203                  The above casts all the float32 tensors to be float16, except 
201204                  the input/output tensors to any ``linear`` op. 
202-             - If ``None``, the parameter defaults to ``coremltools.precision.FLOAT32``. 
203-         - TODO: rdar://74140243. 
204-             - Before coremltools 5.0 release, change the default 
205-               to coremltools.precision.FLOAT16 when convert_to="mlprogram" 
205+             - If ``None``, 
206+                 - when convert_to="mlprogram", compute_precision parameter defaults to ``coremltools.precision.FLOAT16``. 
207+                 - when convert_to="neuralnetwork", compute_precision parameter needs to be None and has no meaning. 
206208
207209    skip_model_load : bool 
208210        Set to True to prevent coremltools from calling into the Core ML framework 
@@ -216,6 +218,14 @@ def convert(
216218        can only be compiled and loaded from macOS12+. 
217219        Defaults to False. 
218220
221+     compute_units: coremltools.ComputeUnit 
222+         A enum with three possible values: 
223+             - coremltools.ComputeUnit.ALL - use all compute units available, including the 
224+                   neural engine. 
225+             - coremltools.ComputeUnit.CPU_ONLY - limit the model to only use the CPU. 
226+             - coremltools.ComputeUnit.CPU_AND_GPU - use both the CPU and GPU, but not the 
227+                   neural engine. 
228+ 
219229    Returns 
220230    ------- 
221231    model : ``coremltools.models.MLModel`` or ``coremltools.converters.mil.Program`` 
@@ -272,13 +282,14 @@ def convert(
272282    _validate_inputs (model , exact_source , inputs , outputs , classifier_config , compute_precision ,
273283                     exact_target , ** kwargs )
274284
285+     if  "useCPUOnly"  in  kwargs  and  kwargs ["useCPUOnly" ]:
286+         warnings .warn ('The "useCPUOnly" parameter is deprecated and will be removed in 6.0. ' 
287+                       'Use the compute_units parameter: "compute_units=coremotools.ComputeUnits.CPU_ONLY".' )
288+         compute_units  =  _ComputeUnit .CPU_ONLY 
289+ 
275290
276291    if  compute_precision  is  None :
277-         # TODO: rdar://74140243 
278-         # Before 5.0 release, 
279-         # map "None" to "fp32" for "neuralnetwork" 
280-         # and to "fp16" for "mlprogram" 
281-         transforms  =  list ()
292+         transforms  =  [FP16ComputePrecision (op_selector = lambda  op : True )] if  convert_to  !=  "neuralnetwork"  else  list ()
282293    elif  compute_precision  ==  precision .FLOAT32 :
283294        transforms  =  list ()
284295    elif  compute_precision  ==  precision .FLOAT16 :
@@ -295,8 +306,9 @@ def convert(
295306        inputs = inputs ,
296307        outputs = outputs ,
297308        classifier_config = classifier_config ,
298-         transforms = transforms ,
309+         transforms = tuple ( transforms ) ,
299310        skip_model_load = skip_model_load ,
311+         compute_units = compute_units ,
300312        ** kwargs 
301313    )
302314
@@ -355,12 +367,11 @@ def raise_if_duplicated(input_list):
355367            msg  =  '"classifier_config" must be of type ClassifierConfig' 
356368            raise  ValueError (msg )
357369
358-     if  convert_to .lower () ==  'neuralnetwork' :
359-         if  compute_precision  is  not   None :
360-             if  compute_precision  !=  precision .FLOAT32 :
361-                 msg  =  "'compute_precision' must be coremltools.precision.FLOAT32 when "  \
362-                     "the target is 'neuralnetwork' (i.e. deployment target is less than iOS15)" 
363-                 raise  ValueError (msg )
370+     if  convert_to .lower () ==  'neuralnetwork'  and  compute_precision  is  not   None :
371+         msg  =  "compute_precision is only supported for mlprogram target and must be None if target=='neuralnetwork'.\n "  \
372+               "Note that target may be implicitly set depending on the minimum_deployment_target.\n "  \
373+               "See minimum_deployment_target for more details." 
374+         raise  ValueError (msg )
364375
365376    if  compute_precision  is  not   None :
366377        if  compute_precision  not  in   [precision .FLOAT32 , precision .FLOAT16 ]:
0 commit comments