@@ -20,15 +20,15 @@ class TestCompletions:
2020 @parametrize
2121 def test_method_create_overload_1 (self , client : OpenAI ) -> None :
2222 completion = client .completions .create (
23- model = "gpt-3.5-turbo-instruct " ,
23+ model = "string " ,
2424 prompt = "This is a test." ,
2525 )
2626 assert_matches_type (Completion , completion , path = ["response" ])
2727
2828 @parametrize
2929 def test_method_create_with_all_params_overload_1 (self , client : OpenAI ) -> None :
3030 completion = client .completions .create (
31- model = "gpt-3.5-turbo-instruct " ,
31+ model = "string " ,
3232 prompt = "This is a test." ,
3333 best_of = 0 ,
3434 echo = True ,
@@ -52,7 +52,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
5252 @parametrize
5353 def test_raw_response_create_overload_1 (self , client : OpenAI ) -> None :
5454 response = client .completions .with_raw_response .create (
55- model = "gpt-3.5-turbo-instruct " ,
55+ model = "string " ,
5656 prompt = "This is a test." ,
5757 )
5858
@@ -64,7 +64,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
6464 @parametrize
6565 def test_streaming_response_create_overload_1 (self , client : OpenAI ) -> None :
6666 with client .completions .with_streaming_response .create (
67- model = "gpt-3.5-turbo-instruct " ,
67+ model = "string " ,
6868 prompt = "This is a test." ,
6969 ) as response :
7070 assert not response .is_closed
@@ -78,7 +78,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
7878 @parametrize
7979 def test_method_create_overload_2 (self , client : OpenAI ) -> None :
8080 completion_stream = client .completions .create (
81- model = "gpt-3.5-turbo-instruct " ,
81+ model = "string " ,
8282 prompt = "This is a test." ,
8383 stream = True ,
8484 )
@@ -87,7 +87,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None:
8787 @parametrize
8888 def test_method_create_with_all_params_overload_2 (self , client : OpenAI ) -> None :
8989 completion_stream = client .completions .create (
90- model = "gpt-3.5-turbo-instruct " ,
90+ model = "string " ,
9191 prompt = "This is a test." ,
9292 stream = True ,
9393 best_of = 0 ,
@@ -111,7 +111,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
111111 @parametrize
112112 def test_raw_response_create_overload_2 (self , client : OpenAI ) -> None :
113113 response = client .completions .with_raw_response .create (
114- model = "gpt-3.5-turbo-instruct " ,
114+ model = "string " ,
115115 prompt = "This is a test." ,
116116 stream = True ,
117117 )
@@ -123,7 +123,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
123123 @parametrize
124124 def test_streaming_response_create_overload_2 (self , client : OpenAI ) -> None :
125125 with client .completions .with_streaming_response .create (
126- model = "gpt-3.5-turbo-instruct " ,
126+ model = "string " ,
127127 prompt = "This is a test." ,
128128 stream = True ,
129129 ) as response :
@@ -142,15 +142,15 @@ class TestAsyncCompletions:
142142 @parametrize
143143 async def test_method_create_overload_1 (self , async_client : AsyncOpenAI ) -> None :
144144 completion = await async_client .completions .create (
145- model = "gpt-3.5-turbo-instruct " ,
145+ model = "string " ,
146146 prompt = "This is a test." ,
147147 )
148148 assert_matches_type (Completion , completion , path = ["response" ])
149149
150150 @parametrize
151151 async def test_method_create_with_all_params_overload_1 (self , async_client : AsyncOpenAI ) -> None :
152152 completion = await async_client .completions .create (
153- model = "gpt-3.5-turbo-instruct " ,
153+ model = "string " ,
154154 prompt = "This is a test." ,
155155 best_of = 0 ,
156156 echo = True ,
@@ -174,7 +174,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
174174 @parametrize
175175 async def test_raw_response_create_overload_1 (self , async_client : AsyncOpenAI ) -> None :
176176 response = await async_client .completions .with_raw_response .create (
177- model = "gpt-3.5-turbo-instruct " ,
177+ model = "string " ,
178178 prompt = "This is a test." ,
179179 )
180180
@@ -186,7 +186,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -
186186 @parametrize
187187 async def test_streaming_response_create_overload_1 (self , async_client : AsyncOpenAI ) -> None :
188188 async with async_client .completions .with_streaming_response .create (
189- model = "gpt-3.5-turbo-instruct " ,
189+ model = "string " ,
190190 prompt = "This is a test." ,
191191 ) as response :
192192 assert not response .is_closed
@@ -200,7 +200,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe
200200 @parametrize
201201 async def test_method_create_overload_2 (self , async_client : AsyncOpenAI ) -> None :
202202 completion_stream = await async_client .completions .create (
203- model = "gpt-3.5-turbo-instruct " ,
203+ model = "string " ,
204204 prompt = "This is a test." ,
205205 stream = True ,
206206 )
@@ -209,7 +209,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None
209209 @parametrize
210210 async def test_method_create_with_all_params_overload_2 (self , async_client : AsyncOpenAI ) -> None :
211211 completion_stream = await async_client .completions .create (
212- model = "gpt-3.5-turbo-instruct " ,
212+ model = "string " ,
213213 prompt = "This is a test." ,
214214 stream = True ,
215215 best_of = 0 ,
@@ -233,7 +233,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
233233 @parametrize
234234 async def test_raw_response_create_overload_2 (self , async_client : AsyncOpenAI ) -> None :
235235 response = await async_client .completions .with_raw_response .create (
236- model = "gpt-3.5-turbo-instruct " ,
236+ model = "string " ,
237237 prompt = "This is a test." ,
238238 stream = True ,
239239 )
@@ -245,7 +245,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -
245245 @parametrize
246246 async def test_streaming_response_create_overload_2 (self , async_client : AsyncOpenAI ) -> None :
247247 async with async_client .completions .with_streaming_response .create (
248- model = "gpt-3.5-turbo-instruct " ,
248+ model = "string " ,
249249 prompt = "This is a test." ,
250250 stream = True ,
251251 ) as response :
0 commit comments