Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -1004,6 +1004,15 @@ async def parse_stream():
return inner


@pytest.fixture()
def async_iterator():
async def inner(values):
for value in values:
yield value

return inner


class MockServerRequestHandler(BaseHTTPRequestHandler):
def do_GET(self): # noqa: N802
# Process an HTTP GET request and return a response.
Expand Down
11 changes: 3 additions & 8 deletions tests/integrations/anthropic/test_anthropic.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,11 +67,6 @@ async def __call__(self, *args, **kwargs):
)


async def async_iterator(values):
for value in values:
yield value


@pytest.mark.parametrize(
"send_default_pii, include_prompts",
[
Expand Down Expand Up @@ -324,7 +319,7 @@ def test_streaming_create_message(
],
)
async def test_streaming_create_message_async(
sentry_init, capture_events, send_default_pii, include_prompts
sentry_init, capture_events, send_default_pii, include_prompts, async_iterator
):
client = AsyncAnthropic(api_key="z")
returned_stream = AsyncStream(cast_to=None, response=None, client=client)
Expand Down Expand Up @@ -567,7 +562,7 @@ def test_streaming_create_message_with_input_json_delta(
],
)
async def test_streaming_create_message_with_input_json_delta_async(
sentry_init, capture_events, send_default_pii, include_prompts
sentry_init, capture_events, send_default_pii, include_prompts, async_iterator
):
client = AsyncAnthropic(api_key="z")
returned_stream = AsyncStream(cast_to=None, response=None, client=client)
Expand Down Expand Up @@ -1361,7 +1356,7 @@ def test_streaming_create_message_with_system_prompt(
],
)
async def test_streaming_create_message_with_system_prompt_async(
sentry_init, capture_events, send_default_pii, include_prompts
sentry_init, capture_events, send_default_pii, include_prompts, async_iterator
):
"""Test that system prompts are properly captured in streaming mode (async)."""
client = AsyncAnthropic(api_key="z")
Expand Down
25 changes: 13 additions & 12 deletions tests/integrations/openai/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,11 +124,6 @@ async def __call__(self, *args, **kwargs):
)


async def async_iterator(values):
for value in values:
yield value


@pytest.mark.parametrize(
"send_default_pii, include_prompts",
[
Expand Down Expand Up @@ -689,7 +684,7 @@ def test_streaming_chat_completion(sentry_init, capture_events, messages, reques
],
)
async def test_streaming_chat_completion_async_no_prompts(
sentry_init, capture_events, send_default_pii, include_prompts
sentry_init, capture_events, send_default_pii, include_prompts, async_iterator
):
sentry_init(
integrations=[
Expand Down Expand Up @@ -829,7 +824,7 @@ async def test_streaming_chat_completion_async_no_prompts(
],
)
async def test_streaming_chat_completion_async(
sentry_init, capture_events, messages, request
sentry_init, capture_events, messages, request, async_iterator
):
sentry_init(
integrations=[
Expand Down Expand Up @@ -1463,7 +1458,9 @@ def test_span_origin_streaming_chat(sentry_init, capture_events):


@pytest.mark.asyncio
async def test_span_origin_streaming_chat_async(sentry_init, capture_events):
async def test_span_origin_streaming_chat_async(
sentry_init, capture_events, async_iterator
):
sentry_init(
integrations=[OpenAIIntegration()],
traces_sample_rate=1.0,
Expand Down Expand Up @@ -2420,7 +2417,7 @@ async def test_ai_client_span_responses_async_api(
)
@pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available")
async def test_ai_client_span_streaming_responses_async_api(
sentry_init, capture_events, instructions, input, request
sentry_init, capture_events, instructions, input, request, async_iterator
):
sentry_init(
integrations=[OpenAIIntegration(include_prompts=True)],
Expand Down Expand Up @@ -2799,7 +2796,7 @@ def test_streaming_responses_api(
)
@pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available")
async def test_streaming_responses_api_async(
sentry_init, capture_events, send_default_pii, include_prompts
sentry_init, capture_events, send_default_pii, include_prompts, async_iterator
):
sentry_init(
integrations=[
Expand Down Expand Up @@ -3037,7 +3034,9 @@ def test_streaming_chat_completion_ttft(sentry_init, capture_events):

# noinspection PyTypeChecker
@pytest.mark.asyncio
async def test_streaming_chat_completion_ttft_async(sentry_init, capture_events):
async def test_streaming_chat_completion_ttft_async(
sentry_init, capture_events, async_iterator
):
"""
Test that async streaming chat completions capture time-to-first-token (TTFT).
"""
Expand Down Expand Up @@ -3142,7 +3141,9 @@ def test_streaming_responses_api_ttft(sentry_init, capture_events):
# noinspection PyTypeChecker
@pytest.mark.asyncio
@pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available")
async def test_streaming_responses_api_ttft_async(sentry_init, capture_events):
async def test_streaming_responses_api_ttft_async(
sentry_init, capture_events, async_iterator
):
"""
Test that async streaming responses API captures time-to-first-token (TTFT).
"""
Expand Down
Loading
Loading