Skip to content

Commit c6354af

Browse files
authored
5.0b3 Release (#1285)
* 5.0b3 Release * Update wheel path in GitLab CI yaml * Skip test for macOS 11
1 parent b3f98d7 commit c6354af

File tree

84 files changed

+3000
-684
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

84 files changed

+3000
-684
lines changed

.github/ISSUE_TEMPLATE/---feature-request.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ assignees: ''
99

1010
## 🌱 Describe your Feature Request
1111
- A clear and concise description of what the problem is.
12-
- CoreML / iOS version you are using?
12+
- Core ML / iOS version you are using?
1313
- Are you interested in contributing?
1414

1515
## Use cases

.gitlab-ci.yml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,7 @@ test_macos11_py37_coremltools_test:
128128
dependencies:
129129
- build_wheel_macos_py37
130130
variables:
131-
WHEEL_PATH: build/dist/*cp37*10_16*
131+
WHEEL_PATH: build/dist/*cp37*10_15*
132132
TEST_PACKAGE: coremltools.test
133133
PYTHON: "3.7"
134134

@@ -141,7 +141,7 @@ test_macos11_py37_pytorch:
141141
variables:
142142
PYTHON: "3.7"
143143
TEST_PACKAGE: coremltools.converters.mil.frontend.torch
144-
WHEEL_PATH: build/dist/*cp37*10_16*
144+
WHEEL_PATH: build/dist/*cp37*10_15*
145145

146146
test_macos11_py37_tf1:
147147
<<: *test_macos_pkg
@@ -152,7 +152,7 @@ test_macos11_py37_tf1:
152152
variables:
153153
PYTHON: "3.7"
154154
TEST_PACKAGE: coremltools.converters.mil.frontend.tensorflow
155-
WHEEL_PATH: build/dist/*cp37*10_16*
155+
WHEEL_PATH: build/dist/*cp37*10_15*
156156

157157
test_macos11_py37_tf2:
158158
<<: *test_macos_pkg_with_reqs
@@ -164,7 +164,7 @@ test_macos11_py37_tf2:
164164
PYTHON: "3.7"
165165
REQUIREMENTS: reqs/test_tf2.pip
166166
TEST_PACKAGE: coremltools.converters.mil.frontend.tensorflow2
167-
WHEEL_PATH: build/dist/*cp37*10_16*
167+
WHEEL_PATH: build/dist/*cp37*10_15*
168168

169169
test_macos11_py37_mil:
170170
<<: *test_macos_pkg
@@ -175,7 +175,7 @@ test_macos11_py37_mil:
175175
variables:
176176
PYTHON: "3.7"
177177
TEST_PACKAGE: coremltools.converters.mil
178-
WHEEL_PATH: build/dist/*cp37*10_16*
178+
WHEEL_PATH: build/dist/*cp37*10_15*
179179

180180
#########################################################################
181181
##
@@ -190,7 +190,7 @@ test_macos11_py38_coremltools_smoke_test:
190190
dependencies:
191191
- build_wheel_macos_py38
192192
variables:
193-
WHEEL_PATH: build/dist/*cp38*10_16*
193+
WHEEL_PATH: build/dist/*cp38*10_15*
194194
TEST_PACKAGE: coremltools.test.neural_network.test_simple_nn_inference
195195
PYTHON: "3.8"
196196

CMakeLists.txt

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,15 @@ endif()
194194

195195
set(PYTHON_TAG "cp${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}")
196196
if(APPLE)
197-
set(PLAT_NAME "macosx_10_16_intel;macosx_10_15_intel;macosx_10_14_intel;macosx_10_13_intel;macosx_10_12_intel")
197+
execute_process(COMMAND uname -m OUTPUT_VARIABLE HARDWARE_NAME OUTPUT_STRIP_TRAILING_WHITESPACE)
198+
if(${HARDWARE_NAME} MATCHES "x86_64")
199+
set(MIN_MAC_OS "10_15")
200+
elseif(${HARDWARE_NAME} MATCHES "arm64")
201+
set(MIN_MAC_OS "11_0")
202+
else()
203+
message(FATAL_ERROR "Unsupported hardware type. On macOS, x86_64 and arm64 are supported.")
204+
endif()
205+
set(PLAT_NAME "macosx_${MIN_MAC_OS}_${HARDWARE_NAME}")
198206
elseif("${CMAKE_SYSTEM_NAME}" MATCHES "Linux")
199207
set(PLAT_NAME "manylinux1_x86_64")
200208
else()

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ The coremltools 5 package offers several performance improvements over previous
3434
To install coremltools 5, use the following command:
3535

3636
```shell
37-
pip install coremltools==5.0b2
37+
pip install coremltools==5.0b3
3838
```
3939

4040

coremlpython/CoreMLPython.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
// Copyright (c) 2021, Apple Inc. All rights reserved.
2+
//
3+
// Use of this source code is governed by a BSD-3-clause license that can be
4+
// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
15
#pragma clang diagnostic push
26
#pragma clang diagnostic ignored "-Wexit-time-destructors"
37
#pragma clang diagnostic ignored "-Wdocumentation"
@@ -22,7 +26,7 @@ namespace CoreML {
2226
Model(const Model&) = delete;
2327
Model& operator=(const Model&) = delete;
2428
~Model();
25-
explicit Model(const std::string& urlStr, bool useCPUOnly);
29+
explicit Model(const std::string& urlStr, const std::string& computeUnits);
2630
py::dict predict(const py::dict& input, bool useCPUOnly);
2731
static py::bytes autoSetSpecificationVersion(const py::bytes& modelBytes);
2832
static int32_t maximumSupportedSpecificationVersion();

coremlpython/CoreMLPython.mm

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
// Copyright (c) 2021, Apple Inc. All rights reserved.
2+
//
3+
// Use of this source code is governed by a BSD-3-clause license that can be
4+
// found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
15
#import <CoreML/CoreML.h>
26
#import "CoreMLPythonArray.h"
37
#import "CoreMLPython.h"
@@ -27,7 +31,7 @@
2731
}
2832
}
2933

30-
Model::Model(const std::string& urlStr, bool useCPUOnly) {
34+
Model::Model(const std::string& urlStr, const std::string& computeUnits) {
3135
@autoreleasepool {
3236

3337
// Compile the model
@@ -58,8 +62,13 @@
5862

5963
if (@available(macOS 10.14, *)) {
6064
MLModelConfiguration *configuration = [MLModelConfiguration new];
61-
if (useCPUOnly){
65+
if (computeUnits == "CPU_ONLY") {
6266
configuration.computeUnits = MLComputeUnitsCPUOnly;
67+
} else if (computeUnits == "CPU_AND_GPU") {
68+
configuration.computeUnits = MLComputeUnitsCPUAndGPU;
69+
} else {
70+
assert(computeUnits == "ALL");
71+
configuration.computeUnits = MLComputeUnitsAll;
6372
}
6473
m_model = [MLModel modelWithContentsOfURL:compiledUrl configuration:configuration error:&error];
6574
} else {
@@ -141,7 +150,7 @@
141150
py::module m("libcoremlpython", "CoreML.Framework Python bindings");
142151

143152
py::class_<Model>(m, "_MLModelProxy")
144-
.def(py::init<const std::string&, bool>())
153+
.def(py::init<const std::string&, const std::string&>())
145154
.def("predict", &Model::predict)
146155
.def_static("auto_set_specification_version", &Model::autoSetSpecificationVersion)
147156
.def_static("maximum_supported_specification_version", &Model::maximumSupportedSpecificationVersion);

coremltools/__init__.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,11 @@
2121
2222
For more information: http://developer.apple.com/documentation/coreml
2323
"""
24+
from enum import Enum as _Enum
25+
from logging import getLogger as _getLogger
26+
2427

2528
# Backup root logger handlers
26-
from logging import getLogger as _getLogger
2729
_root_logger = _getLogger()
2830
_root_logger_handlers_backup = _root_logger.handlers.copy()
2931

@@ -58,6 +60,14 @@
5860
# New versions for iOS 15.0
5961
_SPECIFICATION_VERSION_IOS_15 = 6
6062

63+
class ComputeUnit(_Enum):
64+
'''
65+
The set of processing-unit configurations the model can use to make predictions.
66+
'''
67+
ALL = 1 # Allows the model to use all compute units available, including the neural engine
68+
CPU_AND_GPU = 2 # Allows the model to use both the CPU and GPU, but not the neural engine
69+
CPU_ONLY = 3 # Limit the model to only use the CPU
70+
6171
# expose sub packages as directories
6272
from . import converters
6373
from . import proto

coremltools/_deps/__init__.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -229,6 +229,13 @@ def __get_sklearn_version(version):
229229
_HAS_ONNX = False
230230
MSG_ONNX_NOT_FOUND = "ONNX not found."
231231

232+
try:
233+
import scipy
234+
except:
235+
_HAS_SCIPY = False
236+
else:
237+
_HAS_SCIPY = True
238+
232239
# General utils
233240
def version_ge(module, target_version):
234241
"""

coremltools/converters/_converters_entry.py

Lines changed: 27 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@
44
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
55
import gc
66
import collections
7+
import warnings
78

9+
from coremltools import ComputeUnit as _ComputeUnit
810
from coremltools.converters.mil.mil.passes.quantization_passes import AbstractQuantizationPass, FP16ComputePrecision
911
from coremltools.converters.mil.mil.passes.quantization_passes import ComputePrecision as precision
1012
from coremltools.converters.mil.input_types import InputType, ClassifierConfig
@@ -43,6 +45,7 @@ def convert(
4345
convert_to=None,
4446
compute_precision=None,
4547
skip_model_load=False,
48+
compute_units=_ComputeUnit.ALL,
4649
**kwargs
4750
):
4851
"""
@@ -199,10 +202,9 @@ def convert(
199202
200203
The above casts all the float32 tensors to be float16, except
201204
the input/output tensors to any ``linear`` op.
202-
- If ``None``, the parameter defaults to ``coremltools.precision.FLOAT32``.
203-
- TODO: rdar://74140243.
204-
- Before coremltools 5.0 release, change the default
205-
to coremltools.precision.FLOAT16 when convert_to="mlprogram"
205+
- If ``None``,
206+
- when convert_to="mlprogram", compute_precision parameter defaults to ``coremltools.precision.FLOAT16``.
207+
- when convert_to="neuralnetwork", compute_precision parameter needs to be None and has no meaning.
206208
207209
skip_model_load : bool
208210
Set to True to prevent coremltools from calling into the Core ML framework
@@ -216,6 +218,14 @@ def convert(
216218
can only be compiled and loaded from macOS12+.
217219
Defaults to False.
218220
221+
compute_units: coremltools.ComputeUnit
222+
A enum with three possible values:
223+
- coremltools.ComputeUnit.ALL - use all compute units available, including the
224+
neural engine.
225+
- coremltools.ComputeUnit.CPU_ONLY - limit the model to only use the CPU.
226+
- coremltools.ComputeUnit.CPU_AND_GPU - use both the CPU and GPU, but not the
227+
neural engine.
228+
219229
Returns
220230
-------
221231
model : ``coremltools.models.MLModel`` or ``coremltools.converters.mil.Program``
@@ -272,13 +282,14 @@ def convert(
272282
_validate_inputs(model, exact_source, inputs, outputs, classifier_config, compute_precision,
273283
exact_target, **kwargs)
274284

285+
if "useCPUOnly" in kwargs and kwargs["useCPUOnly"]:
286+
warnings.warn('The "useCPUOnly" parameter is deprecated and will be removed in 6.0. '
287+
'Use the compute_units parameter: "compute_units=coremotools.ComputeUnits.CPU_ONLY".')
288+
compute_units = _ComputeUnit.CPU_ONLY
289+
275290

276291
if compute_precision is None:
277-
# TODO: rdar://74140243
278-
# Before 5.0 release,
279-
# map "None" to "fp32" for "neuralnetwork"
280-
# and to "fp16" for "mlprogram"
281-
transforms = list()
292+
transforms = [FP16ComputePrecision(op_selector=lambda op: True)] if convert_to != "neuralnetwork" else list()
282293
elif compute_precision == precision.FLOAT32:
283294
transforms = list()
284295
elif compute_precision == precision.FLOAT16:
@@ -295,8 +306,9 @@ def convert(
295306
inputs=inputs,
296307
outputs=outputs,
297308
classifier_config=classifier_config,
298-
transforms=transforms,
309+
transforms=tuple(transforms),
299310
skip_model_load=skip_model_load,
311+
compute_units=compute_units,
300312
**kwargs
301313
)
302314

@@ -355,12 +367,11 @@ def raise_if_duplicated(input_list):
355367
msg = '"classifier_config" must be of type ClassifierConfig'
356368
raise ValueError(msg)
357369

358-
if convert_to.lower() == 'neuralnetwork':
359-
if compute_precision is not None:
360-
if compute_precision != precision.FLOAT32:
361-
msg = "'compute_precision' must be coremltools.precision.FLOAT32 when " \
362-
"the target is 'neuralnetwork' (i.e. deployment target is less than iOS15)"
363-
raise ValueError(msg)
370+
if convert_to.lower() == 'neuralnetwork' and compute_precision is not None:
371+
msg = "compute_precision is only supported for mlprogram target and must be None if target=='neuralnetwork'.\n" \
372+
"Note that target may be implicitly set depending on the minimum_deployment_target.\n" \
373+
"See minimum_deployment_target for more details."
374+
raise ValueError(msg)
364375

365376
if compute_precision is not None:
366377
if compute_precision not in [precision.FLOAT32, precision.FLOAT16]:

coremltools/converters/mil/backend/mil/load.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import tempfile
55

66
from coremltools.converters.mil.backend.mil.helper import *
7-
from coremltools.converters.mil.backend.mil.passes.mil_passes import mil_backend_passes
7+
import coremltools.converters.mil.backend.mil.passes.mil_passes as mil_passes
88
import coremltools.proto.MIL_pb2 as pm
99
from coremltools.converters.mil.mil import types
1010
from coremltools.converters.mil.mil import Function
@@ -249,7 +249,7 @@ def load(prog, weights_dir, resume_on_errors=False, **kwargs):
249249
if "main" not in prog.functions:
250250
raise ValueError("main function not found in program")
251251

252-
mil_backend_passes(prog)
252+
mil_passes.mil_backend_passes(prog)
253253

254254
# if user has specified "ClassifierConfig", then add the "classify" op to the prog
255255
classifier_config = kwargs.get("classifier_config", None)

0 commit comments

Comments
 (0)