Skip to content

Commit 95dffc8

Browse files
authored
Replace the use of assert in non-test code (microsoft#80)
* Replace `assert`s in the `conversable_agent` module with `if-log-raise`. * Use a `logger` object in the `code_utils` module. * Replace use of `assert` with `if-log-raise` in the `code_utils` module. * Replace use of `assert` in the `math_utils` module with `if-not-raise`. * Replace `assert` with `if` in the `oai.completion` module. * Replace `assert` in the `retrieve_utils` module with an if statement. * Add missing `not`. * Blacken `completion.py`. * Test `generate_reply` and `a_generate_reply` raise an assertion error when there are neither `messages` nor a `sender`. * Test `execute_code` raises an `AssertionError` when neither code nor filename is provided. * Test `split_text_to_chunks` raises when passed an invalid chunk mode. * * Add `tiktoken` and `chromadb` to test dependencies as they're used in the `test_retrieve_utils` module. * Sort the test requirements alphabetically.
1 parent 9afe8da commit 95dffc8

9 files changed

+91
-22
lines changed

autogen/agentchat/conversable_agent.py

+14-2
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from collections import defaultdict
33
import copy
44
import json
5+
import logging
56
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
67
from autogen import oai
78
from .agent import Agent
@@ -21,6 +22,9 @@ def colored(x, *args, **kwargs):
2122
return x
2223

2324

25+
logger = logging.getLogger(__name__)
26+
27+
2428
class ConversableAgent(Agent):
2529
"""(In preview) A class for generic conversable agents which can be configured as assistant or user proxy.
2630
@@ -757,7 +761,11 @@ def generate_reply(
757761
Returns:
758762
str or dict or None: reply. None if no reply is generated.
759763
"""
760-
assert messages is not None or sender is not None, "Either messages or sender must be provided."
764+
if all((messages is None, sender is None)):
765+
error_msg = f"Either {messages=} or {sender=} must be provided."
766+
logger.error(error_msg)
767+
raise AssertionError(error_msg)
768+
761769
if messages is None:
762770
messages = self._oai_messages[sender]
763771

@@ -804,7 +812,11 @@ async def a_generate_reply(
804812
Returns:
805813
str or dict or None: reply. None if no reply is generated.
806814
"""
807-
assert messages is not None or sender is not None, "Either messages or sender must be provided."
815+
if all((messages is None, sender is None)):
816+
error_msg = f"Either {messages=} or {sender=} must be provided."
817+
logger.error(error_msg)
818+
raise AssertionError(error_msg)
819+
808820
if messages is None:
809821
messages = self._oai_messages[sender]
810822

autogen/code_utils.py

+8-2
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@
2626
WIN32 = sys.platform == "win32"
2727
PATH_SEPARATOR = WIN32 and "\\" or "/"
2828

29+
logger = logging.getLogger(__name__)
30+
2931

3032
def infer_lang(code):
3133
"""infer the language for the code.
@@ -250,7 +252,11 @@ def execute_code(
250252
str: The error message if the code fails to execute; the stdout otherwise.
251253
image: The docker image name after container run when docker is used.
252254
"""
253-
assert code is not None or filename is not None, "Either code or filename must be provided."
255+
if all((code is None, filename is None)):
256+
error_msg = f"Either {code=} or {filename=} must be provided."
257+
logger.error(error_msg)
258+
raise AssertionError(error_msg)
259+
254260
timeout = timeout or DEFAULT_TIMEOUT
255261
original_filename = filename
256262
if WIN32 and lang in ["sh", "shell"]:
@@ -276,7 +282,7 @@ def execute_code(
276282
f".\\{filename}" if WIN32 else filename,
277283
]
278284
if WIN32:
279-
logging.warning("SIGALRM is not supported on Windows. No timeout will be enforced.")
285+
logger.warning("SIGALRM is not supported on Windows. No timeout will be enforced.")
280286
result = subprocess.run(
281287
cmd,
282288
cwd=work_dir,

autogen/math_utils.py

+9-5
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,9 @@ def remove_boxed(string: str) -> Optional[str]:
3535
"""
3636
left = "\\boxed{"
3737
try:
38-
assert string[: len(left)] == left
39-
assert string[-1] == "}"
38+
if not all((string[: len(left)] == left, string[-1] == "}")):
39+
raise AssertionError
40+
4041
return string[len(left) : -1]
4142
except Exception:
4243
return None
@@ -94,7 +95,8 @@ def _fix_fracs(string: str) -> str:
9495
new_str += substr
9596
else:
9697
try:
97-
assert len(substr) >= 2
98+
if not len(substr) >= 2:
99+
raise AssertionError
98100
except Exception:
99101
return string
100102
a = substr[0]
@@ -129,7 +131,8 @@ def _fix_a_slash_b(string: str) -> str:
129131
try:
130132
a = int(a_str)
131133
b = int(b_str)
132-
assert string == "{}/{}".format(a, b)
134+
if not string == "{}/{}".format(a, b):
135+
raise AssertionError
133136
new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
134137
return new_string
135138
except Exception:
@@ -143,7 +146,8 @@ def _remove_right_units(string: str) -> str:
143146
"""
144147
if "\\text{ " in string:
145148
splits = string.split("\\text{ ")
146-
assert len(splits) == 2
149+
if not len(splits) == 2:
150+
raise AssertionError
147151
return splits[0]
148152
else:
149153
return string

autogen/oai/completion.py

+20-9
Original file line numberDiff line numberDiff line change
@@ -582,23 +582,31 @@ def eval_func(responses, **data):
582582
cls._prompts = space.get("prompt")
583583
if cls._prompts is None:
584584
cls._messages = space.get("messages")
585-
assert isinstance(cls._messages, list) and isinstance(
586-
cls._messages[0], (dict, list)
587-
), "messages must be a list of dicts or a list of lists."
585+
if not all((isinstance(cls._messages, list), isinstance(cls._messages[0], (dict, list)))):
586+
error_msg = "messages must be a list of dicts or a list of lists."
587+
logger.error(error_msg)
588+
raise AssertionError(error_msg)
588589
if isinstance(cls._messages[0], dict):
589590
cls._messages = [cls._messages]
590591
space["messages"] = tune.choice(list(range(len(cls._messages))))
591592
else:
592-
assert space.get("messages") is None, "messages and prompt cannot be provided at the same time."
593-
assert isinstance(cls._prompts, (str, list)), "prompt must be a string or a list of strings."
593+
if space.get("messages") is not None:
594+
error_msg = "messages and prompt cannot be provided at the same time."
595+
logger.error(error_msg)
596+
raise AssertionError(error_msg)
597+
if not isinstance(cls._prompts, (str, list)):
598+
error_msg = "prompt must be a string or a list of strings."
599+
logger.error(error_msg)
600+
raise AssertionError(error_msg)
594601
if isinstance(cls._prompts, str):
595602
cls._prompts = [cls._prompts]
596603
space["prompt"] = tune.choice(list(range(len(cls._prompts))))
597604
cls._stops = space.get("stop")
598605
if cls._stops:
599-
assert isinstance(
600-
cls._stops, (str, list)
601-
), "stop must be a string, a list of strings, or a list of lists of strings."
606+
if not isinstance(cls._stops, (str, list)):
607+
error_msg = "stop must be a string, a list of strings, or a list of lists of strings."
608+
logger.error(error_msg)
609+
raise AssertionError(error_msg)
602610
if not (isinstance(cls._stops, list) and isinstance(cls._stops[0], list)):
603611
cls._stops = [cls._stops]
604612
space["stop"] = tune.choice(list(range(len(cls._stops))))
@@ -969,7 +977,10 @@ def eval_func(responses, **data):
969977
elif isinstance(agg_method, dict):
970978
for key in metric_keys:
971979
metric_agg_method = agg_method[key]
972-
assert callable(metric_agg_method), "please provide a callable for each metric"
980+
if not callable(metric_agg_method):
981+
error_msg = "please provide a callable for each metric"
982+
logger.error(error_msg)
983+
raise AssertionError(error_msg)
973984
result_agg[key] = metric_agg_method([r[key] for r in result_list])
974985
else:
975986
raise ValueError(

autogen/retrieve_utils.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
"yml",
3030
"pdf",
3131
]
32+
VALID_CHUNK_MODES = frozenset({"one_line", "multi_lines"})
3233

3334

3435
def num_tokens_from_text(
@@ -96,7 +97,8 @@ def split_text_to_chunks(
9697
overlap: int = 10,
9798
):
9899
"""Split a long text into chunks of max_tokens."""
99-
assert chunk_mode in {"one_line", "multi_lines"}
100+
if chunk_mode not in VALID_CHUNK_MODES:
101+
raise AssertionError
100102
if chunk_mode == "one_line":
101103
must_break_at_empty_line = False
102104
chunks = []

setup.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -38,15 +38,18 @@
3838
install_requires=install_requires,
3939
extras_require={
4040
"test": [
41-
"pytest>=6.1.1",
41+
"chromadb",
4242
"coverage>=5.3",
43-
"pre-commit",
4443
"datasets",
44+
"ipykernel",
4545
"nbconvert",
4646
"nbformat",
47-
"ipykernel",
47+
"pre-commit",
4848
"pydantic==1.10.9",
49+
"pytest-asyncio",
50+
"pytest>=6.1.1",
4951
"sympy",
52+
"tiktoken",
5053
"wolframalpha",
5154
],
5255
"blendsearch": ["flaml[blendsearch]"],

test/agentchat/test_conversable_agent.py

+22
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,17 @@
22
from autogen.agentchat import ConversableAgent
33

44

5+
@pytest.fixture
6+
def conversable_agent():
7+
return ConversableAgent(
8+
"conversable_agent_0",
9+
max_consecutive_auto_reply=10,
10+
code_execution_config=False,
11+
llm_config=False,
12+
human_input_mode="NEVER",
13+
)
14+
15+
516
def test_trigger():
617
agent = ConversableAgent("a0", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER")
718
agent1 = ConversableAgent("a1", max_consecutive_auto_reply=0, human_input_mode="NEVER")
@@ -217,6 +228,17 @@ def add_num(num_to_be_added):
217228
), "generate_reply not working when messages is None"
218229

219230

231+
def test_generate_reply_raises_on_messages_and_sender_none(conversable_agent):
232+
with pytest.raises(AssertionError):
233+
conversable_agent.generate_reply(messages=None, sender=None)
234+
235+
236+
@pytest.mark.asyncio
237+
async def test_a_generate_reply_raises_on_messages_and_sender_none(conversable_agent):
238+
with pytest.raises(AssertionError):
239+
await conversable_agent.a_generate_reply(messages=None, sender=None)
240+
241+
220242
if __name__ == "__main__":
221243
test_trigger()
222244
# test_context()

test/test_code.py

+5
Original file line numberDiff line numberDiff line change
@@ -264,6 +264,11 @@ def test_execute_code(use_docker=None):
264264
assert isinstance(image, str) or docker is None or os.path.exists("/.dockerenv") or use_docker is False
265265

266266

267+
def test_execute_code_raises_when_code_and_filename_are_both_none():
268+
with pytest.raises(AssertionError):
269+
execute_code(code=None, filename=None)
270+
271+
267272
@pytest.mark.skipif(
268273
sys.platform in ["darwin"],
269274
reason="do not run on MacOS",

test/test_retrieve_utils.py

+4
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,10 @@ def test_split_text_to_chunks(self):
4848
chunks = split_text_to_chunks(long_text, max_tokens=1000)
4949
assert all(num_tokens_from_text(chunk) <= 1000 for chunk in chunks)
5050

51+
def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self):
52+
with pytest.raises(AssertionError):
53+
split_text_to_chunks("A" * 10000, chunk_mode="bogus_chunk_mode")
54+
5155
def test_extract_text_from_pdf(self):
5256
pdf_file_path = os.path.join(test_dir, "example.pdf")
5357
assert "".join(expected_text.split()) == "".join(extract_text_from_pdf(pdf_file_path).strip().split())

0 commit comments

Comments
 (0)