Skip to content

Commit

Permalink
chore(internal): minor refactor of tests (#1471)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-bot authored and stainless-app[bot] committed Jun 6, 2024
1 parent 2fcc0e4 commit 156d13e
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 24 deletions.
16 changes: 8 additions & 8 deletions tests/api_resources/audio/test_speech.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None:
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = client.audio.speech.create(
input="string",
model="tts-1",
model="string",
voice="alloy",
)
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
Expand All @@ -39,7 +39,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = client.audio.speech.create(
input="string",
model="tts-1",
model="string",
voice="alloy",
response_format="mp3",
speed=0.25,
Expand All @@ -54,7 +54,7 @@ def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> No

response = client.audio.speech.with_raw_response.create(
input="string",
model="tts-1",
model="string",
voice="alloy",
)

Expand All @@ -69,7 +69,7 @@ def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter)
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
with client.audio.speech.with_streaming_response.create(
input="string",
model="tts-1",
model="string",
voice="alloy",
) as response:
assert not response.is_closed
Expand All @@ -90,7 +90,7 @@ async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRo
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = await async_client.audio.speech.create(
input="string",
model="tts-1",
model="string",
voice="alloy",
)
assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent)
Expand All @@ -102,7 +102,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, re
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
speech = await async_client.audio.speech.create(
input="string",
model="tts-1",
model="string",
voice="alloy",
response_format="mp3",
speed=0.25,
Expand All @@ -117,7 +117,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock:

response = await async_client.audio.speech.with_raw_response.create(
input="string",
model="tts-1",
model="string",
voice="alloy",
)

Expand All @@ -132,7 +132,7 @@ async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_
respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
async with async_client.audio.speech.with_streaming_response.create(
input="string",
model="tts-1",
model="string",
voice="alloy",
) as response:
assert not response.is_closed
Expand Down
32 changes: 16 additions & 16 deletions tests/api_resources/test_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@ class TestCompletions:
@parametrize
def test_method_create_overload_1(self, client: OpenAI) -> None:
completion = client.completions.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
)
assert_matches_type(Completion, completion, path=["response"])

@parametrize
def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
completion = client.completions.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
best_of=0,
echo=True,
Expand All @@ -52,7 +52,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
response = client.completions.with_raw_response.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
)

Expand All @@ -64,7 +64,7 @@ def test_raw_response_create_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
with client.completions.with_streaming_response.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
) as response:
assert not response.is_closed
Expand All @@ -78,7 +78,7 @@ def test_streaming_response_create_overload_1(self, client: OpenAI) -> None:
@parametrize
def test_method_create_overload_2(self, client: OpenAI) -> None:
completion_stream = client.completions.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
stream=True,
)
Expand All @@ -87,7 +87,7 @@ def test_method_create_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
completion_stream = client.completions.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
stream=True,
best_of=0,
Expand All @@ -111,7 +111,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
response = client.completions.with_raw_response.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
stream=True,
)
Expand All @@ -123,7 +123,7 @@ def test_raw_response_create_overload_2(self, client: OpenAI) -> None:
@parametrize
def test_streaming_response_create_overload_2(self, client: OpenAI) -> None:
with client.completions.with_streaming_response.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
stream=True,
) as response:
Expand All @@ -142,15 +142,15 @@ class TestAsyncCompletions:
@parametrize
async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.completions.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
)
assert_matches_type(Completion, completion, path=["response"])

@parametrize
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
completion = await async_client.completions.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
best_of=0,
echo=True,
Expand All @@ -174,7 +174,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
@parametrize
async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.completions.with_raw_response.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
)

Expand All @@ -186,7 +186,7 @@ async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -
@parametrize
async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None:
async with async_client.completions.with_streaming_response.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
) as response:
assert not response.is_closed
Expand All @@ -200,7 +200,7 @@ async def test_streaming_response_create_overload_1(self, async_client: AsyncOpe
@parametrize
async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None:
completion_stream = await async_client.completions.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
stream=True,
)
Expand All @@ -209,7 +209,7 @@ async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None
@parametrize
async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None:
completion_stream = await async_client.completions.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
stream=True,
best_of=0,
Expand All @@ -233,7 +233,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
@parametrize
async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
response = await async_client.completions.with_raw_response.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
stream=True,
)
Expand All @@ -245,7 +245,7 @@ async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -
@parametrize
async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None:
async with async_client.completions.with_streaming_response.create(
model="gpt-3.5-turbo-instruct",
model="string",
prompt="This is a test.",
stream=True,
) as response:
Expand Down

0 comments on commit 156d13e

Please sign in to comment.