Skip to content

Commit e4b0d63

Browse files
authored
7.0 Release (#1977)
1 parent d7a1479 commit e4b0d63

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+1237
-453
lines changed

cmake/coreml-utils.cmake

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ function(coreml_add_build_proto proto_fn target_suffix)
3737
${CMAKE_CURRENT_BINARY_DIR}/format/${proto_fn}_enum.h
3838
COMMENT "Generating c++ enums from ${proto_fn}.proto into ${CMAKE_CURRENT_BINARY_DIR}/format/"
3939
COMMAND ${CMAKE_BINARY_DIR}/deps/protobuf/cmake/protoc
40-
--plugin=protoc-gen-enum=mlmodel${target_suffix}/enumgen
40+
--plugin=protoc-gen-enum=mlmodel/enumgen
4141
--enum_out=${CMAKE_CURRENT_BINARY_DIR}/format/
4242
-I${CMAKE_CURRENT_SOURCE_DIR}/format/
4343
${CMAKE_CURRENT_SOURCE_DIR}/format/${proto_fn}.proto
@@ -77,7 +77,7 @@ function(coreml_add_build_proto proto_fn target_suffix)
7777
add_custom_target(tgt_${proto_fn}_enums ALL
7878
COMMENT "Generating c++ enums from ${proto_fn}.proto into ${CMAKE_CURRENT_SOURCE_DIR}/build/format/"
7979
COMMAND ${CMAKE_BINARY_DIR}/deps/protobuf/cmake/protoc
80-
--plugin=protoc-gen-enum=mlmodel${target_suffix}/enumgen
80+
--plugin=protoc-gen-enum=mlmodel/enumgen
8181
--enum_out=${CMAKE_CURRENT_SOURCE_DIR}/build/format/
8282
-I${CMAKE_CURRENT_SOURCE_DIR}/format/
8383
${CMAKE_CURRENT_SOURCE_DIR}/format/${proto_fn}.proto

coremltools/converters/mil/_deployment_compatibility.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,6 @@ class AvailableTarget(IntEnum):
2323
iOS17 = _SPECIFICATION_VERSION_IOS_17
2424

2525
# macOS versions (aliases of iOS versions)
26-
macOS15 = _SPECIFICATION_VERSION_IOS_13
27-
macOS16 = _SPECIFICATION_VERSION_IOS_14
2826
macOS10_15 = _SPECIFICATION_VERSION_IOS_13
2927
macOS10_16 = _SPECIFICATION_VERSION_IOS_14
3028
macOS11 = _SPECIFICATION_VERSION_IOS_14

coremltools/converters/mil/backend/mil/helper.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@
1919
from coremltools.converters.mil.mil.types.type_mapping import np_val_to_py_type
2020
from coremltools.models.utils import _WEIGHTS_DIR_NAME, _WEIGHTS_FILE_NAME
2121

22+
# For immediate values, those types are stored in bytes (MIL parser reads those types from bytes).
23+
IMMEDIATE_VALUE_TYPES_IN_BYTES = (types.fp16, types.int8, types.uint8, types.uint32)
24+
2225

2326
def create_valuetype_scalar(data_type):
2427
"""
@@ -105,7 +108,7 @@ def _tensor_field_by_type(tensor_val, builtin_type):
105108
elif types.is_int(builtin_type):
106109
if builtin_type == types.int64 or builtin_type == types.uint64:
107110
return tensor_val.longInts.values
108-
if builtin_type in (types.int8, types.uint8, types.uint32):
111+
if builtin_type in IMMEDIATE_VALUE_TYPES_IN_BYTES:
109112
return tensor_val.bytes.values
110113
if builtin_type == types.int16 or builtin_type == types.uint16:
111114
# TODO (rdar://111797203): Serialize to byte after MIL changes to read from byte field.
@@ -132,7 +135,7 @@ def _set_empty_tensor_field_by_type(tensor_val, builtin_type):
132135
elif types.is_int(builtin_type):
133136
if (builtin_type == types.int64 or builtin_type == types.uint64):
134137
tensor_val.longInts.SetInParent()
135-
elif builtin_type in (types.int8, types.uint8, types.uint32):
138+
elif builtin_type in IMMEDIATE_VALUE_TYPES_IN_BYTES:
136139
tensor_val.bytes.SetInParent()
137140
else:
138141
tensor_val.ints.SetInParent()
@@ -167,7 +170,7 @@ def create_tensor_value(np_tensor):
167170
if builtin_type == types.str:
168171
for x in np.nditer(np_tensor):
169172
t_field.append(x.encode("utf-8"))
170-
elif builtin_type in (types.fp16, types.int8, types.uint8, types.uint32):
173+
elif builtin_type in IMMEDIATE_VALUE_TYPES_IN_BYTES:
171174
val.immediateValue.tensor.bytes.values = np_val_to_py_type(np_tensor)
172175
else:
173176
for x in np_tensor.flatten():
@@ -189,7 +192,7 @@ def create_scalar_value(py_scalar):
189192

190193
# Set the tensor value
191194
t_field = _tensor_field_by_type(t_val, builtin_type)
192-
if builtin_type in (types.fp16, types.int8, types.uint8, types.uint32):
195+
if builtin_type in IMMEDIATE_VALUE_TYPES_IN_BYTES:
193196
# Serialize to bytes because MIL read them from the "bytes" field in TensorValue.
194197
val.immediateValue.tensor.bytes.values = np_val_to_py_type(py_scalar)
195198
else:
@@ -295,7 +298,7 @@ def types_to_proto(valuetype):
295298
return create_valuetype_scalar(types_to_proto_primitive(valuetype))
296299

297300

298-
def create_file_value(output_var, blob_writer):
301+
def _get_offset_by_writing_data(output_var, blob_writer):
299302
if output_var.val.dtype.kind == 'f' and output_var.val.dtype.itemsize == 4:
300303
offset = blob_writer.write_float_data(np.ascontiguousarray(output_var.val.flatten()))
301304
elif output_var.val.dtype.kind == "f" and output_var.val.dtype.itemsize == 2:
@@ -316,6 +319,12 @@ def create_file_value(output_var, blob_writer):
316319
else:
317320
raise TypeError("Unsupported type, {}, for net buffer serialization.".format(output_var.val.dtype))
318321

322+
return offset
323+
324+
325+
def create_file_value(output_var, blob_writer):
326+
offset = _get_offset_by_writing_data(output_var, blob_writer)
327+
319328
return create_file_value_tensor(
320329
file_name=os.path.join(os.path.join('@model_path', _WEIGHTS_DIR_NAME), _WEIGHTS_FILE_NAME),
321330
offset=offset,

coremltools/converters/mil/backend/mil/load.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@
4242

4343
try:
4444
from coremltools.libmilstoragepython import _BlobStorageWriter as BlobWriter
45-
except:
45+
except Exception as e:
46+
logger.warning(f"Fail to import BlobWriter from libmilstoragepython. {e}")
4647
BlobWriter = None
4748

4849

coremltools/converters/mil/backend/mil/passes/test_passes.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1088,13 +1088,13 @@ def program(x):
10881088
x = mb.pow(x=x, y=2.0)
10891089
x = mb.sqrt(x=x)
10901090
x = mb.reduce_argmax(x=x)
1091-
x = mb.reshape(x=x, shape=[*x_shape])
1091+
x = mb.reshape(x=x, shape=[*x_shape[:-1]])
10921092
else:
10931093
x = mb.mul(x=x, y=x)
10941094
x = mb.sqrt(x=x)
10951095
x = mb.pow(x=x, y=2.0)
10961096
x = mb.reduce_argmax(x=x)
1097-
x = mb.reshape(x=x, shape=[*x_shape])
1097+
x = mb.reshape(x=x, shape=[*x_shape[:-1]])
10981098
return x
10991099

11001100
prev_prog, _, block = apply_pass_and_basic_check(
@@ -1108,5 +1108,5 @@ def program(x):
11081108
program=program,
11091109
inputs={"x": x_shape},
11101110
backend=("mlprogram", "fp32"),
1111-
expected_output_shapes={block.outputs[0].name: tuple(x_shape)},
1111+
expected_output_shapes={block.outputs[0].name: tuple(x_shape[:-1])},
11121112
)

coremltools/converters/mil/converter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ def mil_convert_to_proto(
288288

289289
PassPipelineManager.apply_pipeline(prog, main_pipeline)
290290

291-
prog._check_invalid_tensor_rank()
291+
prog._check_invalid_program()
292292

293293
if convert_to == 'milinternal':
294294
return None, prog

coremltools/converters/mil/frontend/milproto/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,4 @@
33
# Use of this source code is governed by a BSD-3-clause license that can be
44
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
55

6-
from .load import load
6+
from . import load

coremltools/converters/mil/frontend/milproto/load.py

Lines changed: 33 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -8,24 +8,31 @@
88
import numpy as np
99

1010
from coremltools import _logger as logger
11-
from coremltools.converters.mil._deployment_compatibility import \
12-
AvailableTarget as _target
11+
from coremltools.converters.mil._deployment_compatibility import AvailableTarget as _target
12+
from coremltools.converters.mil.backend.mil import helper
1313
from coremltools.converters.mil.mil import Block
1414
from coremltools.converters.mil.mil import Builder as mb
15-
from coremltools.converters.mil.mil import (Function, ListVar, Placeholder,
16-
Program, TupleInputType, Var,
17-
mil_list, types)
15+
from coremltools.converters.mil.mil import (
16+
Function,
17+
ListVar,
18+
Placeholder,
19+
Program,
20+
TupleInputType,
21+
Var,
22+
mil_list,
23+
types,
24+
)
1825
from coremltools.converters.mil.mil.block import curr_block
19-
from coremltools.converters.mil.mil.ops.registry import \
20-
SSAOpRegistry as _SSAOpRegistry
26+
from coremltools.converters.mil.mil.ops.registry import SSAOpRegistry as _SSAOpRegistry
2127
from coremltools.proto import MIL_pb2 as pm
2228
from coremltools.proto import Model_pb2 as ml
2329

2430
from .helper import proto_to_types
2531

2632
try:
2733
from coremltools.libmilstoragepython import _BlobStorageReader as BlobReader
28-
except:
34+
except Exception as e:
35+
logger.warning(f"Fail to import BlobReader from libmilstoragepython. {e}")
2936
BlobReader = None
3037

3138

@@ -145,7 +152,7 @@ def _load_value(context, value_spec):
145152
else:
146153
value = _load_file_value(context, value_spec.blobFileValue, dtype)
147154

148-
if dtype in (types.fp16, types.int8, types.uint8, types.uint32):
155+
if dtype in helper.IMMEDIATE_VALUE_TYPES_IN_BYTES:
149156
value = np.frombuffer(value, types.nptype_from_builtin(dtype)).reshape(
150157
shape
151158
)
@@ -246,20 +253,23 @@ def _dummy_false_fn(*loop_vars):
246253
inputs["_false_fn"] = _dummy_false_fn
247254

248255

256+
def _load_const_op(context, op_spec):
257+
inputs = {k: _load_value(context, v) for k, v in op_spec.attributes.items()}
258+
pymil_var = getattr(mb, op_spec.type)(**inputs)
259+
context.register_var_with_name(op_spec.outputs[0].name, pymil_var)
260+
261+
249262
def _load_operation(context, op_spec):
250263
if not isinstance(op_spec, pm.Operation):
251264
raise TypeError("Invalid Operation spec object")
252265

253266
op_type = op_spec.type
254-
if op_type == "const" or op_type.startswith("constexpr_"):
267+
if op_type == "const" or "constexpr_" in op_type:
255268
if op_spec.blocks:
256269
raise ValueError("const / constexpr operation can't have any block")
257270
if op_spec.inputs:
258271
raise ValueError("const / constexpr operation can't have any input")
259-
260-
inputs = {k: _load_value(context, v) for k, v in op_spec.attributes.items()}
261-
pymil_var = getattr(mb, op_type)(**inputs)
262-
context.register_var_with_name(op_spec.outputs[0].name, pymil_var)
272+
_load_const_op(context, op_spec)
263273

264274
else:
265275
if op_type == "custom_layer":
@@ -402,11 +412,19 @@ def _load_function(context, func_spec, spec_version):
402412

403413

404414
def load(model_spec, specification_version, file_weights_dir="", **kwargs):
415+
"""
416+
Load MILProto to Pymil.
417+
418+
Set force_spec_version to force override the spec version.
419+
"""
405420
if not isinstance(model_spec, ml.Model):
406421
raise TypeError("Invalid Model sepc object")
407422

408423
if specification_version < model_spec.specificationVersion:
409-
raise ValueError("specification_version must be greater or equal to the input model spec version")
424+
if not kwargs.get("force_spec_version", False):
425+
raise ValueError(
426+
"specification_version must be greater or equal to the input model spec version"
427+
)
410428

411429
if model_spec.WhichOneof("Type") != "mlProgram":
412430
raise ValueError("Only MIL proto based mlmodels can be loaded")

coremltools/converters/mil/frontend/tensorflow/test/test_load.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ def build_model(x):
158158

159159
@pytest.mark.parametrize(
160160
"target",
161-
[ct.target.iOS13, ct.target.macOS15, ct.target.watchOS6, ct.target.tvOS13],
161+
[ct.target.iOS13, ct.target.macOS10_15, ct.target.watchOS6, ct.target.tvOS13],
162162
)
163163
def test_invalid_deployment_target_cumsum(self, target):
164164
x_shape = (3, 4, 5)
@@ -179,7 +179,7 @@ def build_model(x):
179179

180180
@pytest.mark.parametrize(
181181
"target",
182-
[ct.target.iOS14, ct.target.macOS16, ct.target.watchOS7, ct.target.tvOS14],
182+
[ct.target.iOS14, ct.target.macOS10_16, ct.target.watchOS7, ct.target.tvOS14],
183183
)
184184
def test_valid_deployment_target_cumsum(self, target):
185185
x_shape = (3, 4, 5)

coremltools/converters/mil/frontend/torch/converter.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
from .._utils import get_output_names
2020
from .internal_graph import InternalTorchIRGraph, InternalTorchIRNode
2121
from .ops import convert_nodes
22+
from .quantization_ops import _dequantized_weight
2223
from .torch_op_registry import _TORCH_OPS_REGISTRY
2324
from .torchir_passes import (
2425
flatten_graph_input_values,
@@ -147,8 +148,6 @@ def get_dequantized_var(self, name: str, dequantized_name: str = None):
147148
# the MIL op.
148149
if dequantized_name is not None:
149150
self._context.add(original_var, dequantized_name)
150-
if self._quant_dtype is None:
151-
raise AssertionError("Trying to dequantize without quantization info")
152151
return original_var, self._quant_dtype
153152

154153
quant_params = self.get_quantization_info(name)
@@ -429,6 +428,10 @@ def convert_const(self):
429428
if isinstance(val, torch._C.ScriptObject):
430429
logger.info(f"Encountered constant {name} of type _torch._C.ScriptObject")
431430
continue
431+
elif isinstance(val, torch.Tensor) and val.is_quantized:
432+
const = _dequantized_weight(val.cpu(), name)
433+
self.context.add(const)
434+
continue
432435
elif not isinstance(val, np.ndarray):
433436
raise ValueError(f"unsupported class for {name} in PyTorch graph: {type(val)}")
434437
# TODO (rdar://107718371): support uint8 quantization
@@ -623,10 +626,10 @@ def _lower_graph_block(graph):
623626
if is_tensor or is_quantized_tensor:
624627
if is_tensor and prefix in state_dict:
625628
assert torch.equal(
626-
module, state_dict[prefix]
629+
module.cpu(), state_dict[prefix].cpu()
627630
), "tensor value not consistent between torch ir and state_dict"
628631
if prefix in params_dict:
629-
assert torch.equal(module, params_dict[prefix])
632+
assert torch.equal(module.cpu(), params_dict[prefix].cpu())
630633
replace_input[_output] = first_node_with_prefix[prefix]
631634
else:
632635
params_dict[prefix] = module

0 commit comments

Comments
 (0)