fairseq: Problem with exporting wav2vec2 to onnx

I am trying to export wav2vec2 model to onnx format, but have an issue. I inserted the following code to infer.py

wav_input_16khz = torch.randn(1,10000) torch.onnx.export(models[0], wav_input_16khz, “fairseq_transformer.onnx”, verbose=True, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)

And got an issue:

File "examples/speech_recognition/infer.py", line 443, in <module> cli_main() File "examples/speech_recognition/infer.py", line 439, in cli_main main(args) File "examples/speech_recognition/infer.py", line 271, in main torch.onnx.export(models[0], wav_input_16khz, "fairseq_transformer.onnx", verbose=True, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) File "/usr/local/lib/python3.8/site-packages/torch/onnx/__init__.py", line 225, in export return utils.export(model, args, f, export_params, verbose, training, File "/usr/local/lib/python3.8/site-packages/torch/onnx/utils.py", line 85, in export _export(model, args, f, export_params, verbose, training, input_names, output_names, File "/usr/local/lib/python3.8/site-packages/torch/onnx/utils.py", line 632, in _export _model_to_graph(model, args, verbose, input_names, File "/usr/local/lib/python3.8/site-packages/torch/onnx/utils.py", line 409, in _model_to_graph graph, params, torch_out = _create_jit_graph(model, args, File "/usr/local/lib/python3.8/site-packages/torch/onnx/utils.py", line 379, in _create_jit_graph graph, torch_out = _trace_and_get_graph_from_model(model, args) File "/usr/local/lib/python3.8/site-packages/torch/onnx/utils.py", line 342, in _trace_and_get_graph_from_model torch.jit._get_trace_graph(model, args, strict=False, _force_outplace=False, _return_inputs_states=True) File "/usr/local/lib/python3.8/site-packages/torch/jit/_trace.py", line 1148, in _get_trace_graph outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs) File "/usr/local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl result = self.forward(*input, **kwargs) File "/usr/local/lib/python3.8/site-packages/torch/jit/_trace.py", line 125, in forward graph, out = torch._C._create_graph_by_tracing( File "/usr/local/lib/python3.8/site-packages/torch/jit/_trace.py", line 116, in wrapper outs.append(self.inner(*trace_inputs)) File "/usr/local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 725, in _call_impl result = self._slow_forward(*input, **kwargs) File "/usr/local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 709, in _slow_forward result = self.forward(*input, **kwargs) File "/fairseq/models/wav2vec/wav2vec2.py", line 490, in forward x, mask_indices = self.apply_mask(features, padding_mask) File "/fairseq/models/wav2vec/wav2vec2.py", line 336, in apply_mask mask_indices = compute_mask_indices( File "/fairseq/data/data_utils.py", line 470, in compute_mask_indices mask_idcs.append(np.unique(mask_idc[mask_idc < sz])) TypeError: '<' not supported between instances of 'numpy.ndarray' and 'Tensor' File "examples/speech_recognition/infer.py", line 443, in <module> cli_main() File "examples/speech_recognition/infer.py", line 439, in cli_main main(args) File "examples/speech_recognition/infer.py", line 271, in main torch.onnx.export(models[0], wav_input_16khz, "fairseq_transformer.onnx", verbose=True, operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK) File "/usr/local/lib/python3.8/site-packages/torch/onnx/__init__.py", line 225, in export return utils.export(model, args, f, export_params, verbose, training, File "/usr/local/lib/python3.8/site-packages/torch/onnx/utils.py", line 85, in export _export(model, args, f, export_params, verbose, training, input_names, output_names, File "/usr/local/lib/python3.8/site-packages/torch/onnx/utils.py", line 632, in _export _model_to_graph(model, args, verbose, input_names, File "/usr/local/lib/python3.8/site-packages/torch/onnx/utils.py", line 409, in _model_to_graph graph, params, torch_out = _create_jit_graph(model, args, File "/usr/local/lib/python3.8/site-packages/torch/onnx/utils.py", line 379, in _create_jit_graph graph, torch_out = _trace_and_get_graph_from_model(model, args) File "/usr/local/lib/python3.8/site-packages/torch/onnx/utils.py", line 342, in _trace_and_get_graph_from_model torch.jit._get_trace_graph(model, args, strict=False, _force_outplace=False, _return_inputs_states=True) File "/usr/local/lib/python3.8/site-packages/torch/jit/_trace.py", line 1148, in _get_trace_graph outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs) File "/usr/local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl result = self.forward(*input, **kwargs) File "/usr/local/lib/python3.8/site-packages/torch/jit/_trace.py", line 125, in forward graph, out = torch._C._create_graph_by_tracing( File "/usr/local/lib/python3.8/site-packages/torch/jit/_trace.py", line 116, in wrapper outs.append(self.inner(*trace_inputs)) File "/usr/local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 725, in _call_impl result = self._slow_forward(*input, **kwargs) File "/usr/local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 709, in _slow_forward result = self.forward(*input, **kwargs) File "/fairseq/models/wav2vec/wav2vec2.py", line 490, in forward x, mask_indices = self.apply_mask(features, padding_mask) File "/fairseq/models/wav2vec/wav2vec2.py", line 336, in apply_mask mask_indices = compute_mask_indices( File "/fairseq/data/data_utils.py", line 470, in compute_mask_indices mask_idcs.append(np.unique(mask_idc[mask_idc < sz])) TypeError: '<' not supported between instances of 'numpy.ndarray' and 'Tensor'

Can anyone help me what am I to do with this or what am I doing wrong?

About this issue

  • Original URL
  • State: closed
  • Created 4 years ago
  • Comments: 20 (1 by maintainers)

Most upvoted comments

ONNX export is not currently tested, but please share if you’re able to get it working!