-
-
Notifications
You must be signed in to change notification settings - Fork 15.4k
[Misc] Use helper function to generate dummy messages in OpenAI MM tests #26875
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -53,22 +53,35 @@ def base64_encoded_audio() -> dict[str, str]: | |
| } | ||
|
|
||
|
|
||
| @pytest.mark.asyncio | ||
| @pytest.mark.parametrize("model_name", [MODEL_NAME]) | ||
| @pytest.mark.parametrize("audio_url", [TEST_AUDIO_URLS[0]]) | ||
| async def test_single_chat_session_audio( | ||
| client: openai.AsyncOpenAI, model_name: str, audio_url: str | ||
| def dummy_messages_from_audio_url( | ||
| audio_urls: str | list[str], | ||
| content_text: str = "What's happening in this audio?", | ||
| ): | ||
| messages = [ | ||
| if isinstance(audio_urls, str): | ||
| audio_urls = [audio_urls] | ||
|
|
||
| return [ | ||
| { | ||
| "role": "user", | ||
| "content": [ | ||
| {"type": "audio_url", "audio_url": {"url": audio_url}}, | ||
| {"type": "text", "text": "What's happening in this audio?"}, | ||
| *( | ||
| {"type": "audio_url", "audio_url": {"url": audio_url}} | ||
| for audio_url in audio_urls | ||
| ), | ||
| {"type": "text", "text": content_text}, | ||
| ], | ||
| } | ||
| ] | ||
|
Comment on lines
+56
to
74
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. While this helper function is a good step towards reducing code duplication within this file, similar helper functions ( To further improve maintainability, consider creating a single, more generic helper function in a shared location like For example, you could have a function in def dummy_messages_from_media_url(
media_type: str,
media_urls: str | list[str],
content_text: str,
):
if isinstance(media_urls, str):
media_urls = [media_urls]
media_key = f"{media_type}_url"
return [
{
"role": "user",
"content": [
*(
{"type": media_key, media_key: {"url": media_url}}
for media_url in media_urls
),
{"type": "text", "text": content_text},
],
}
]Then, in this file, you could use it like this: from tests.utils import dummy_messages_from_media_url
# ...
messages = dummy_messages_from_media_url(
"audio",
audio_url,
"What's happening in this audio?"
)This would eliminate the duplicated helper functions and make the test suite easier to maintain. The default text can be passed from each test file. |
||
|
|
||
|
|
||
| @pytest.mark.asyncio | ||
| @pytest.mark.parametrize("model_name", [MODEL_NAME]) | ||
| @pytest.mark.parametrize("audio_url", [TEST_AUDIO_URLS[0]]) | ||
| async def test_single_chat_session_audio( | ||
| client: openai.AsyncOpenAI, model_name: str, audio_url: str | ||
| ): | ||
| messages = dummy_messages_from_audio_url(audio_url) | ||
|
|
||
| # test single completion | ||
| chat_completion = await client.chat.completions.create( | ||
| model=model_name, | ||
|
|
@@ -138,20 +151,9 @@ async def test_single_chat_session_audio_base64encoded( | |
| audio_url: str, | ||
| base64_encoded_audio: dict[str, str], | ||
| ): | ||
| messages = [ | ||
| { | ||
| "role": "user", | ||
| "content": [ | ||
| { | ||
| "type": "audio_url", | ||
| "audio_url": { | ||
| "url": f"data:audio/wav;base64,{base64_encoded_audio[audio_url]}" # noqa: E501 | ||
| }, | ||
| }, | ||
| {"type": "text", "text": "What's happening in this audio?"}, | ||
| ], | ||
| } | ||
| ] | ||
| messages = dummy_messages_from_audio_url( | ||
| f"data:audio/wav;base64,{base64_encoded_audio[audio_url]}" | ||
| ) | ||
|
|
||
| # test single completion | ||
| chat_completion = await client.chat.completions.create( | ||
|
|
@@ -252,15 +254,7 @@ async def test_single_chat_session_input_audio( | |
| async def test_chat_streaming_audio( | ||
| client: openai.AsyncOpenAI, model_name: str, audio_url: str | ||
| ): | ||
| messages = [ | ||
| { | ||
| "role": "user", | ||
| "content": [ | ||
| {"type": "audio_url", "audio_url": {"url": audio_url}}, | ||
| {"type": "text", "text": "What's happening in this audio?"}, | ||
| ], | ||
| } | ||
| ] | ||
| messages = dummy_messages_from_audio_url(audio_url) | ||
|
|
||
| # test single completion | ||
| chat_completion = await client.chat.completions.create( | ||
|
|
@@ -365,18 +359,7 @@ async def test_chat_streaming_input_audio( | |
| async def test_multi_audio_input( | ||
| client: openai.AsyncOpenAI, model_name: str, audio_urls: list[str] | ||
| ): | ||
| messages = [ | ||
| { | ||
| "role": "user", | ||
| "content": [ | ||
| *( | ||
| {"type": "audio_url", "audio_url": {"url": audio_url}} | ||
| for audio_url in audio_urls | ||
| ), | ||
| {"type": "text", "text": "What's happening in this audio?"}, | ||
| ], | ||
| } | ||
| ] | ||
| messages = dummy_messages_from_audio_url(audio_urls) | ||
|
|
||
| if len(audio_urls) > MAXIMUM_AUDIOS: | ||
| with pytest.raises(openai.BadRequestError): # test multi-audio input | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.