Skip to content

Commit

Permalink
Disabling radtts tests untin we have real model (#6036)
Browse files Browse the repository at this point in the history
Signed-off-by: Boris Fomitchev <[email protected]>
  • Loading branch information
borisfom committed Feb 16, 2023
1 parent ccaf908 commit ccfca84
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion nemo/collections/tts/helpers/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -756,7 +756,7 @@ def sample_tts_input(
sz = (max_batch * max_dim,) if export_config["enable_ragged_batches"] else (max_batch, max_dim)
inp = torch.randint(*export_config["emb_range"], sz, device=device, dtype=torch.int64)
pitch = torch.randn(sz, device=device, dtype=torch.float32) * 0.5
pace = torch.clamp(torch.randn(sz, device=device, dtype=torch.float32) * 0.1 + 1, min=0.01)
pace = torch.clamp(torch.randn(sz, device=device, dtype=torch.float32) * 0.1 + 1.0, min=0.2)
inputs = {'text': inp, 'pitch': pitch, 'pace': pace}
if export_config["enable_ragged_batches"]:
batch_lengths = torch.zeros((max_batch + 1), device=device, dtype=torch.int32)
Expand Down
4 changes: 2 additions & 2 deletions tests/collections/tts/test_tts_exportables.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ def test_HifiGanModel_export_to_onnx(self, hifigan_model):
filename = os.path.join(tmpdir, 'hfg.onnx')
model.export(output=filename, verbose=True, check_trace=True)

@pytest.mark.pleasefixme
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_RadTTSModel_export_to_torchscript(self, radtts_model):
Expand All @@ -89,6 +90,7 @@ def test_RadTTSModel_export_to_torchscript(self, radtts_model):
input_example2 = model.input_module.input_example(max_batch=19, max_dim=999)
model.export(output=filename, verbose=True, input_example=input_example1, check_trace=[input_example2])

@pytest.mark.pleasefixme
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_RadTTSModel_export_to_onnx(self, radtts_model):
Expand All @@ -98,8 +100,6 @@ def test_RadTTSModel_export_to_onnx(self, radtts_model):
with torch.cuda.amp.autocast(enabled=True, cache_enabled=False, dtype=torch.float16):
input_example1 = model.input_module.input_example(max_batch=13, max_dim=777)
input_example2 = model.input_module.input_example(max_batch=19, max_dim=999)
input_example1[0]["pace"] = torch.clamp(input_example1[0]["pace"] - 0.3, min=0.1)
input_example2[0]["pace"] = torch.clamp(input_example2[0]["pace"] - 0.3, min=0.1)
model.export(
output=filename,
input_example=input_example1,
Expand Down

0 comments on commit ccfca84

Please sign in to comment.