Skip to content

Commit 3f3d137

Browse files
committed
Improve docstrings
1 parent bc64bd8 commit 3f3d137

File tree

2 files changed

+16
-3
lines changed

2 files changed

+16
-3
lines changed

autogen/agentchat/contrib/retrieve_user_proxy_agent.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ def __init__(
130130
This is the same as that used in chromadb. Default is False.
131131
- custom_token_count_function(Optional, Callable): a custom function to count the number of tokens in a string.
132132
The function should take a string as input and return three integers (token_count, tokens_per_message, tokens_per_name).
133-
Default is None, tiktoken will be used and works well for OpenAI models, but may not accurate for other models.
133+
Default is None, tiktoken will be used and may not be accurate for non-OpenAI models.
134134
**kwargs (dict): other kwargs in [UserProxyAgent](../user_proxy_agent#__init__).
135135
"""
136136
super().__init__(

autogen/retrieve_utils.py

+15-2
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,20 @@ def num_tokens_from_text(
3838
return_tokens_per_name_and_message: bool = False,
3939
custom_token_count_function: Callable = None,
4040
) -> Union[int, Tuple[int, int, int]]:
41-
"""Return the number of tokens used by a text."""
41+
"""Return the number of tokens used by a text.
42+
43+
Args:
44+
text (str): The text to count tokens for.
45+
model (Optional, str): The model to use for tokenization. Default is "gpt-3.5-turbo-0613".
46+
return_tokens_per_name_and_message (Optional, bool): Whether to return the number of tokens per name and per
47+
message. Default is False.
48+
custom_token_count_function (Optional, Callable): A custom function to count tokens. Default is None.
49+
50+
Returns:
51+
int: The number of tokens used by the text.
52+
int: The number of tokens per message. Only returned if return_tokens_per_name_and_message is True.
53+
int: The number of tokens per name. Only returned if return_tokens_per_name_and_message is True.
54+
"""
4255
if isinstance(custom_token_count_function, Callable):
4356
token_count, tokens_per_message, tokens_per_name = custom_token_count_function(text)
4457
else:
@@ -89,7 +102,7 @@ def num_tokens_from_messages(
89102
if key == "name":
90103
num_tokens += tokens_per_name
91104
num_tokens += tokens_per_message
92-
num_tokens += custom_prime_count # ChatGPT every reply is primed with <|start|>assistant<|message|>
105+
num_tokens += custom_prime_count # With ChatGPT, every reply is primed with <|start|>assistant<|message|>
93106
return num_tokens
94107

95108

0 commit comments

Comments
 (0)