coremltools: CoreML conversion fails. the named input `alpha` must have the same data type as the named input `x`. However, alpha has dtype fp32 whereas x has dtype int32.
🐞Describing the bug
Cannot convert traced model to coreml. Error is ValueError: In op, of type clip, named num_proposals_i.1, the named input alpha must have the same data type as the named input x. However, alpha has dtype fp32 whereas x has dtype int32.
Stack Trace
ValueError Traceback (most recent call last) <ipython-input-2-500867b44162> in <module> 21 ts_model = torch.jit.trace(tracebale_model, (image, )) 22 —> 23 mlmodel = cm.convert(ts_model, 24 inputs=[cm.TensorType(shape=[3, 480, 640])], 25 source=“pytorch”,
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/_converters_entry.py in convert(model, source, inputs, outputs, classifier_config, minimum_deployment_target, convert_to, compute_precision, skip_model_load, compute_units, package_dir, debug) 442 specification_version = _set_default_specification_version(exact_target) 443 –> 444 mlmodel = mil_convert( 445 model, 446 convert_from=exact_source,
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/converter.py in mil_convert(model, convert_from, convert_to, compute_units, **kwargs)
188 See coremltools.converters.convert
189 “”"
–> 190 return _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, **kwargs)
191
192
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/converter.py in _mil_convert(model, convert_from, convert_to, registry, modelClass, compute_units, **kwargs) 215 _os.chmod(weights_dir, _stat.S_IRWXU | _stat.S_IRWXG | _stat.S_IRWXO) 216 –> 217 proto, mil_program = mil_convert_to_proto( 218 model, 219 convert_from,
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/converter.py in mil_convert_to_proto(model, convert_from, convert_to, converter_registry, **kwargs) 280 frontend_converter = frontend_converter_type() 281 –> 282 prog = frontend_converter(model, **kwargs) 283 284 if convert_to.lower() != “neuralnetwork”:
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/converter.py in call(self, *args, **kwargs) 110 from .frontend.torch import load 111 –> 112 return load(*args, **kwargs) 113 114
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/frontend/torch/load.py in load(model_spec, inputs, specification_version, debug, outputs, cut_at_symbols, **kwargs) 55 inputs = _convert_to_torch_inputtype(inputs) 56 converter = TorchConverter(torchscript, inputs, outputs, cut_at_symbols, specification_version) —> 57 return _perform_torch_convert(converter, debug) 58 59
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/frontend/torch/load.py in _perform_torch_convert(converter, debug) 94 def _perform_torch_convert(converter, debug): 95 try: —> 96 prog = converter.convert() 97 except RuntimeError as e: 98 if debug and “convert function” in str(e):
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/frontend/torch/converter.py in convert(self) 268 269 # Add the rest of the operations –> 270 convert_nodes(self.context, self.graph) 271 272 graph_outputs = [self.context[name] for name in self.graph.outputs]
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/frontend/torch/ops.py in convert_nodes(context, graph) 101 “PyTorch convert function for op ‘{}’ not implemented.”.format(node.kind) 102 ) –> 103 add_op(context, node) 104 105 # We’ve generated all the outputs the graph needs, terminate conversion.
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/frontend/torch/ops.py in clamp(context, node) 4395 min_val = inputs[1] if inputs[1] else _np.finfo(_np.float32).min 4396 max_val = inputs[2] if inputs[2] else _np.finfo(_np.float32).max -> 4397 context.add(mb.clip(x=inputs[0], alpha=min_val, beta=max_val, name=node.name)) 4398 4399
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/mil/ops/registry.py in add_op(cls, **kwargs) 176 op_cls_to_add = op_reg[op_type] 177 –> 178 return cls._add_op(op_cls_to_add, **kwargs) 179 180 setattr(Builder, op_type, add_op)
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/mil/builder.py in _add_op(cls, op_cls, **kwargs) 164 op_name=kwargs[“name”], before_op=before_op, 165 candidate_kv=kwargs)) –> 166 new_op = op_cls(**kwargs) 167 168 # Initialize optional input Vars if it wasn’t in kwargs
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/mil/operation.py in init(self, **kwargs) 180 input_kv = {k: v for k, v in kwargs.items() 181 if k in self._input_types and v is not None} –> 182 self._validate_and_set_inputs(input_kv) 183 self._ensure_required_inputs() 184
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/mil/operation.py in _validate_and_set_inputs(self, input_kvs, no_check_var_types) 477 v_old.remove_child_op(op, no_check_var_types) 478 –> 479 self.input_spec.validate_inputs(self.name, self.op_type, input_kvs) 480 481 for name, var in input_kvs.items():
~/opt/anaconda3/envs/coreml/lib/python3.8/site-packages/coremltools/converters/mil/mil/input_type.py in validate_inputs(self, op_name, op_type, candidate_kvs) 123 ).format(op_type, op_name, name, ref_name, name, 124 var.dtype.type_info(), ref_name, expected_dtype.type_info()) –> 125 raise ValueError(msg) 126 127 # Ensure candidate_kvs doesn’t contain None
ValueError: In op, of type clip, named num_proposals_i.1, the named input alpha
must have the same data type as the named input x
. However, alpha has dtype fp32 whereas x has dtype int32.
To Reproduce
import torch
from detectron2 import model_zoo
from detectron2.export import TracingAdapter
from detectron2.utils.testing import get_sample_coco_image
import coremltools as cm
def inference(model, inputs):
inst = model.inference(inputs, do_postprocess=False)[0]
return [{"instances": inst}]
model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
model.eval()
image = get_sample_coco_image(True).to(dtype=torch.float)
inputs = [{'image':image}]
tracebale_model = TracingAdapter(model, inputs, inference)
ts_model = torch.jit.trace(tracebale_model, (image, ))
mlmodel = cm.convert(ts_model,
inputs=[cm.TensorType(shape=[3, 480, 640])],
source="pytorch",
convert_to="mlprogram")
System environment (please complete the following information):
- coremltools version: 6.1
- OS (e.g. MacOS version or Linux type): Mac
- Any other relevant version information (e.g. PyTorch or TensorFlow version): torch=1.13.0.dev20220609
Any fixes or pointers on how to fix would be very much appreciated.
About this issue
- Original URL
- State: closed
- Created 2 years ago
- Comments: 16
I can reproduce this error with the following minimal example:
I’ll look into this.