-
🐛 BugLoading the model with hub.load fails To Reproducebase $~ python3Python 3.8.5 (default, Sep 4 2020, 07:30:14)
[GCC 7.3.0] :: Anaconda, Inc. on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import torch
>>> import torchaudio
>>> import soundfile
>>>
>>> torch.__version__
'1.8.2+cu102'
>>> torchaudio.__version__
'0.8.2'
>>> soundfile.__version__
'0.10.3'
>>> model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad:a345715',
... model='silero_vad')
Using cache found in /lium/home/pchampi/.cache/torch/hub/snakers4_silero-vad_a345715
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/lium/raid01_b/pchampi/lab/sidekit-for-vpc/venv/lib/python3.8/site-packages/torch/hub.py", line 339, in load
model = _load_local(repo_or_dir, model, *args, **kwargs)
File "/lium/raid01_b/pchampi/lab/sidekit-for-vpc/venv/lib/python3.8/site-packages/torch/hub.py", line 368, in _load_local
model = entry(*args, **kwargs)
File "/lium/home/pchampi/.cache/torch/hub/snakers4_silero-vad_a345715/hubconf.py", line 24, in silero_vad
model = init_jit_model(model_path=f'{hub_dir}/snakers4_silero-vad_master/files/model.jit')
File "/lium/home/pchampi/.cache/torch/hub/snakers4_silero-vad_a345715/utils_vad.py", line 74, in init_jit_model
model = torch.jit.load(model_path, map_location=device)
File "/lium/raid01_b/pchampi/lab/sidekit-for-vpc/venv/lib/python3.8/site-packages/torch/jit/_serialization.py", line 161, in load
cpp_module = torch._C.import_ir_module(cu, str(f), map_location, _extra_files)
RuntimeError: The following operation failed in the TorchScript interpreter.
Traceback of TorchScript, serialized code (most recent call last):
File "code/__torch__/torch/nn/quantized/modules/linear.py", line 17, in __setstate__
state: Tuple[Tensor, Optional[Tensor], bool, int]) -> None:
self.dtype = (state)[3]
_1 = (self).set_weight_bias((state)[0], (state)[1], )
~~~~~~~~~~~~~~~~~~~~~ <--- HERE
self.training = (state)[2]
return None
File "code/__torch__/torch/nn/quantized/modules/linear.py", line 40, in set_weight_bias
_10 = "Unsupported dtype on dynamic quantized linear!"
if torch.eq(self.dtype, 12):
_11 = ops.quantized.linear_prepack(weight, bias)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
self._packed_params = _11
else:
Traceback of TorchScript, original code (most recent call last):
File "/opt/conda/lib/python3.8/site-packages/torch/nn/quantized/modules/linear.py", line 93, in __setstate__
def __setstate__(self, state):
self.dtype = state[3]
self.set_weight_bias(state[0], state[1])
~~~~~~~~~~~~~~~~~~~~ <--- HERE
self.training = state[2]
File "/opt/conda/lib/python3.8/site-packages/torch/nn/quantized/modules/linear.py", line 23, in set_weight_bias
def set_weight_bias(self, weight: torch.Tensor, bias: Optional[torch.Tensor]) -> None:
if self.dtype == torch.qint8:
self._packed_params = torch.ops.quantized.linear_prepack(weight, bias)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE
elif self.dtype == torch.float16:
self._packed_params = torch.ops.quantized.linear_prepack_fp16(weight, bias)
RuntimeError: Didn't find engine for operation quantized::linear_prepack NoQEngine
>>> Environment
|
Beta Was this translation helpful? Give feedback.
Answered by
pchampio
Oct 27, 2021
Replies: 2 comments 2 replies
-
I'm running this on:
|
Beta Was this translation helpful? Give feedback.
1 reply
-
Using |
Beta Was this translation helpful? Give feedback.
1 reply
Answer selected by
pchampio
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Using
torch.backends.quantized.engine = 'qnnpack'
seems to work yes