diff --git a/tests/api_resources/audio/test_speech.py b/tests/api_resources/audio/test_speech.py index 1f04a6643..781ebeceb 100644 --- a/tests/api_resources/audio/test_speech.py +++ b/tests/api_resources/audio/test_speech.py @@ -27,7 +27,7 @@ def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( input="string", - model="tts-1", + model="string", voice="alloy", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( input="string", - model="tts-1", + model="string", voice="alloy", response_format="mp3", speed=0.25, @@ -54,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> No response = client.audio.speech.with_raw_response.create( input="string", - model="tts-1", + model="string", voice="alloy", ) @@ -69,7 +69,7 @@ def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter) respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) with client.audio.speech.with_streaming_response.create( input="string", - model="tts-1", + model="string", voice="alloy", ) as response: assert not response.is_closed @@ -90,7 +90,7 @@ async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRo respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( input="string", - model="tts-1", + model="string", voice="alloy", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) @@ -102,7 +102,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( input="string", - model="tts-1", + model="string", voice="alloy", response_format="mp3", speed=0.25, @@ -117,7 +117,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock: response = await async_client.audio.speech.with_raw_response.create( input="string", - model="tts-1", + model="string", voice="alloy", ) @@ -132,7 +132,7 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_ respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) async with async_client.audio.speech.with_streaming_response.create( input="string", - model="tts-1", + model="string", voice="alloy", ) as response: assert not response.is_closed diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index fa7ae5213..69d914200 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -20,7 +20,7 @@ class TestCompletions: @parametrize def test_method_create_overload_1(self, client: OpenAI) -> None: completion = client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) assert_matches_type(Completion, completion, path=["response"]) @@ -28,7 +28,7 @@ def test_method_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: completion = client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", best_of=0, echo=True, @@ -52,7 +52,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_1(self, client: OpenAI) -> None: response = client.completions.with_raw_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) @@ -64,7 +64,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: with client.completions.with_streaming_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) as response: assert not response.is_closed @@ -78,7 +78,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: @parametrize def test_method_create_overload_2(self, client: OpenAI) -> None: completion_stream = client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) @@ -87,7 +87,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: completion_stream = client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, best_of=0, @@ -111,7 +111,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: @parametrize def test_raw_response_create_overload_2(self, client: OpenAI) -> None: response = client.completions.with_raw_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) @@ -123,7 +123,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: with client.completions.with_streaming_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) as response: @@ -142,7 +142,7 @@ class TestAsyncCompletions: @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: completion = await async_client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) assert_matches_type(Completion, completion, path=["response"]) @@ -150,7 +150,7 @@ async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: completion = await async_client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", best_of=0, echo=True, @@ -174,7 +174,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.completions.with_raw_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) @@ -186,7 +186,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: async with async_client.completions.with_streaming_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", ) as response: assert not response.is_closed @@ -200,7 +200,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe @parametrize async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None: completion_stream = await async_client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) @@ -209,7 +209,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: completion_stream = await async_client.completions.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, best_of=0, @@ -233,7 +233,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: response = await async_client.completions.with_raw_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) @@ -245,7 +245,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) - @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: async with async_client.completions.with_streaming_response.create( - model="gpt-3.5-turbo-instruct", + model="string", prompt="This is a test.", stream=True, ) as response: