Skip to content

Commit

Permalink
Merge pull request #84 from RasaHQ/same-llm-for-generate
Browse files Browse the repository at this point in the history
use the same llm for response generation as for command prediction
  • Loading branch information
tmbo authored Nov 14, 2023
2 parents 5465c08 + 38e0530 commit 1c0a8c8
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions rasa/shared/core/flows/steps/generate_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@

from rasa.shared.utils.llm import (
DEFAULT_OPENAI_TEMPERATURE,
DEFAULT_OPENAI_GENERATE_MODEL_NAME,
DEFAULT_OPENAI_CHAT_MODEL_NAME_ADVANCED,
)

DEFAULT_LLM_CONFIG = {
"_type": "openai",
"request_timeout": 5,
"request_timeout": 7,
"temperature": DEFAULT_OPENAI_TEMPERATURE,
"model_name": DEFAULT_OPENAI_GENERATE_MODEL_NAME,
"model_name": DEFAULT_OPENAI_CHAT_MODEL_NAME_ADVANCED,
"max_tokens": DEFAULT_OPENAI_MAX_GENERATED_TOKENS,
}
structlogger = structlog.get_logger()
Expand Down

0 comments on commit 1c0a8c8

Please sign in to comment.