Skip to content

Commit

Permalink
feat: LLM - CodeChat - Added support for context
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 563934372
  • Loading branch information
Ark-kun authored and copybara-github committed Sep 9, 2023
1 parent d76bceb commit f7feeca
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 0 deletions.
1 change: 1 addition & 0 deletions tests/unit/aiplatform/test_language_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2038,6 +2038,7 @@ def test_code_chat(self):
)

code_chat = model.start_chat(
context="We're working on large-scale production system.",
max_output_tokens=128,
temperature=0.2,
stop_sequences=["\n"],
Expand Down
8 changes: 8 additions & 0 deletions vertexai/language_models/_language_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1287,6 +1287,7 @@ class CodeChatModel(_ChatModelBase):
code_chat_model = CodeChatModel.from_pretrained("codechat-bison@001")
code_chat = code_chat_model.start_chat(
context="I'm writing a large-scale enterprise application.",
max_output_tokens=128,
temperature=0.2,
)
Expand All @@ -1301,6 +1302,7 @@ class CodeChatModel(_ChatModelBase):
def start_chat(
self,
*,
context: Optional[str] = None,
max_output_tokens: Optional[int] = None,
temperature: Optional[float] = None,
message_history: Optional[List[ChatMessage]] = None,
Expand All @@ -1309,6 +1311,9 @@ def start_chat(
"""Starts a chat session with the code chat model.
Args:
context: Context shapes how the model responds throughout the conversation.
For example, you can use context to specify words the model can or
cannot use, topics to focus on or avoid, or the response format or style.
max_output_tokens: Max length of the output text in tokens. Range: [1, 1000].
temperature: Controls the randomness of predictions. Range: [0, 1].
stop_sequences: Customized stop sequences to stop the decoding process.
Expand All @@ -1318,6 +1323,7 @@ def start_chat(
"""
return CodeChatSession(
model=self,
context=context,
max_output_tokens=max_output_tokens,
temperature=temperature,
message_history=message_history,
Expand Down Expand Up @@ -1653,13 +1659,15 @@ class CodeChatSession(_ChatSessionBase):
def __init__(
self,
model: CodeChatModel,
context: Optional[str] = None,
max_output_tokens: Optional[int] = None,
temperature: Optional[float] = None,
message_history: Optional[List[ChatMessage]] = None,
stop_sequences: Optional[List[str]] = None,
):
super().__init__(
model=model,
context=context,
max_output_tokens=max_output_tokens,
temperature=temperature,
message_history=message_history,
Expand Down

0 comments on commit f7feeca

Please sign in to comment.