Skip to content

Commit

Permalink
Implementing the use of TrainingDataImporter in rasa test (run_evalua…
Browse files Browse the repository at this point in the history
…tion)
  • Loading branch information
Imod7 committed Jan 3, 2021
1 parent 085eb04 commit a7387f4
Show file tree
Hide file tree
Showing 10 changed files with 86 additions and 64 deletions.
6 changes: 5 additions & 1 deletion rasa/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import os
import platform
import sys
import inspect

from rasa_sdk import __version__ as rasa_sdk_version

Expand Down Expand Up @@ -113,7 +114,10 @@ def main() -> None:
set_log_and_warnings_filters()
rasa.telemetry.initialize_telemetry()
rasa.telemetry.initialize_error_reporting()
cmdline_arguments.func(cmdline_arguments)
if inspect.iscoroutinefunction(cmdline_arguments.func):
rasa.utils.common.run_in_loop(cmdline_arguments.func(cmdline_arguments))
else:
cmdline_arguments.func(cmdline_arguments)
elif hasattr(cmdline_arguments, "version"):
print_version()
else:
Expand Down
16 changes: 13 additions & 3 deletions rasa/cli/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from rasa.core.test import FAILED_STORIES_FILE
import rasa.shared.utils.validation as validation_utils
import rasa.cli.utils
import rasa.utils.common

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -115,7 +116,7 @@ def run_core_test(args: argparse.Namespace) -> None:
)


def run_nlu_test(args: argparse.Namespace) -> None:
async def run_nlu_test_async(args: argparse.Namespace) -> None:
"""Run NLU tests."""
from rasa.test import compare_nlu_models, perform_nlu_cross_validation, test_nlu

Expand Down Expand Up @@ -149,7 +150,7 @@ def run_nlu_test(args: argparse.Namespace) -> None:
)
continue

compare_nlu_models(
await compare_nlu_models(
configs=config_files,
nlu=nlu_data,
output=output,
Expand All @@ -167,7 +168,16 @@ def run_nlu_test(args: argparse.Namespace) -> None:
args.model, "model", DEFAULT_MODELS_PATH
)

test_nlu(model_path, nlu_data, output, vars(args))
await test_nlu(model_path, nlu_data, output, vars(args))


def run_nlu_test(args: argparse.Namespace) -> None:
"""Adding this function layer to be able to run run_nlu_test_async in the event loop.
I have run_nlu_test_async to be able to have await calls inside because functions
test_nlu and compare_nlu_models are async.
"""
rasa.utils.common.run_in_loop(run_nlu_test_async(args))


def test(args: argparse.Namespace):
Expand Down
4 changes: 2 additions & 2 deletions rasa/cli/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def train_core(
)


def train_nlu(
async def train_nlu(
args: argparse.Namespace, train_path: Optional[Text] = None
) -> Optional[Text]:
"""Trains an NLU model.
Expand All @@ -192,7 +192,7 @@ def train_nlu(
args.domain, "domain", DEFAULT_DOMAIN_PATH, none_is_valid=True
)

return train_nlu(
return await train_nlu(
config=config,
nlu_data=nlu_data,
output=output,
Expand Down
14 changes: 8 additions & 6 deletions rasa/nlu/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
from rasa.nlu.components import Component
from rasa.nlu.tokenizers.tokenizer import Token
from rasa.utils.tensorflow.constants import ENTITY_RECOGNITION
from rasa.shared.importers.importer import TrainingDataImporter

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -1415,7 +1416,7 @@ def remove_pretrained_extractors(pipeline: List[Component]) -> List[Component]:
return pipeline


def run_evaluation(
async def run_evaluation(
data_path: Text,
model_path: Text,
output_directory: Optional[Text] = None,
Expand Down Expand Up @@ -1448,9 +1449,10 @@ def run_evaluation(
interpreter = Interpreter.load(model_path, component_builder)

interpreter.pipeline = remove_pretrained_extractors(interpreter.pipeline)
test_data = rasa.shared.nlu.training_data.loading.load_data(
data_path, interpreter.model_metadata.language
test_data_importer = TrainingDataImporter.load_from_dict(
training_data_paths=[data_path]
)
test_data = await test_data_importer.get_nlu_data()

result: Dict[Text, Optional[Dict]] = {
"intent_evaluation": None,
Expand Down Expand Up @@ -1822,7 +1824,7 @@ def compute_metrics(
)


def compare_nlu(
async def compare_nlu(
configs: List[Text],
data: TrainingData,
exclusion_percentages: List[int],
Expand Down Expand Up @@ -1895,7 +1897,7 @@ def compare_nlu(
)

try:
model_path = train_nlu(
model_path = await train_nlu(
nlu_config,
train_split_path,
model_output_path,
Expand All @@ -1911,7 +1913,7 @@ def compare_nlu(
model_path = os.path.join(get_model(model_path), "nlu")

output_path = os.path.join(model_output_path, f"{model_name}_report")
result = run_evaluation(
result = await run_evaluation(
test_path, model_path, output_directory=output_path, errors=True
)

Expand Down
2 changes: 1 addition & 1 deletion rasa/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -1151,7 +1151,7 @@ async def _evaluate_model_using_test_set(
model_directory = eval_agent.model_directory
_, nlu_model = model.get_model_subdirectories(model_directory)

return run_evaluation(
return await run_evaluation(
data_path, nlu_model, disable_plotting=True, report_as_dict=True
)

Expand Down
4 changes: 2 additions & 2 deletions rasa/shared/importers/importer.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ def load_nlu_importer_from_config(

@staticmethod
def load_from_dict(
config: Optional[Dict],
config_path: Text,
config: Optional[Dict] = None,
config_path: Optional[Text] = None,
domain_path: Optional[Text] = None,
training_data_paths: Optional[List[Text]] = None,
training_type: Optional[TrainingType] = TrainingType.BOTH,
Expand Down
14 changes: 9 additions & 5 deletions rasa/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,9 @@ def test(
additional_arguments = {}

test_core(model, stories, output, additional_arguments)
test_nlu(model, nlu_data, output, additional_arguments)
rasa.utils.common.run_in_loop(
test_nlu(model, nlu_data, output, additional_arguments)
)


def test_core(
Expand Down Expand Up @@ -154,7 +156,7 @@ def test_core(
rasa.utils.common.run_in_loop(test(stories, _agent, out_directory=output, **kwargs))


def test_nlu(
async def test_nlu(
model: Optional[Text],
nlu_data: Optional[Text],
output_directory: Text = DEFAULT_RESULTS_PATH,
Expand All @@ -180,15 +182,17 @@ def test_nlu(
kwargs = rasa.shared.utils.common.minimal_kwargs(
additional_arguments, run_evaluation, ["data_path", "model"]
)
run_evaluation(nlu_data, nlu_model, output_directory=output_directory, **kwargs)
await run_evaluation(
nlu_data, nlu_model, output_directory=output_directory, **kwargs
)
else:
rasa.shared.utils.cli.print_error(
"Could not find any model. Use 'rasa train nlu' to train a "
"Rasa model and provide it via the '--model' argument."
)


def compare_nlu_models(
async def compare_nlu_models(
configs: List[Text],
nlu: Text,
output: Text,
Expand All @@ -214,7 +218,7 @@ def compare_nlu_models(
model_name: [[] for _ in range(runs)] for model_name in model_names
}

training_examples_per_run = compare_nlu(
training_examples_per_run = await compare_nlu(
configs,
data,
exclusion_percentages,
Expand Down
26 changes: 12 additions & 14 deletions rasa/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -661,7 +661,7 @@ async def _core_model_for_finetuning(
return None


def train_nlu(
async def train_nlu(
config: Text,
nlu_data: Text,
output: Text,
Expand Down Expand Up @@ -697,19 +697,17 @@ def train_nlu(
otherwise the path to the directory with the trained model files.
"""
return rasa.utils.common.run_in_loop(
_train_nlu_async(
config,
nlu_data,
output,
train_path,
fixed_model_name,
persist_nlu_training_data,
additional_arguments,
domain=domain,
model_to_finetune=model_to_finetune,
finetuning_epoch_fraction=finetuning_epoch_fraction,
)
return await _train_nlu_async(
config,
nlu_data,
output,
train_path,
fixed_model_name,
persist_nlu_training_data,
additional_arguments,
domain=domain,
model_to_finetune=model_to_finetune,
finetuning_epoch_fraction=finetuning_epoch_fraction,
)


Expand Down
8 changes: 4 additions & 4 deletions tests/nlu/test_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,8 +357,8 @@ def test_drop_intents_below_freq():
assert clean_td.intents == {"affirm", "restaurant_search"}


def test_run_evaluation(unpacked_trained_moodbot_path: Text):
result = run_evaluation(
async def test_run_evaluation(unpacked_trained_moodbot_path: Text):
result = await run_evaluation(
DEFAULT_DATA_PATH,
os.path.join(unpacked_trained_moodbot_path, "nlu"),
errors=False,
Expand Down Expand Up @@ -919,7 +919,7 @@ def test_label_replacement():
assert substitute_labels(original_labels, "O", "no_entity") == target_labels


def test_nlu_comparison(tmp_path: Path):
async def test_nlu_comparison(tmp_path: Path):
config = {
"language": "en",
"pipeline": [
Expand All @@ -933,7 +933,7 @@ def test_nlu_comparison(tmp_path: Path):
configs = [write_file_config(config).name, write_file_config(config).name]

output = str(tmp_path)
compare_nlu_models(
await compare_nlu_models(
configs, DEFAULT_DATA_PATH, output, runs=2, exclusion_percentages=[50, 80]
)

Expand Down
Loading

0 comments on commit a7387f4

Please sign in to comment.