Skip to content
Prev Previous commit
Next Next commit
Fix linting issues in test_reasoning_content.py
  • Loading branch information
axion66 committed Jun 26, 2025
commit 0b00d15dad7254b6d8639f55e436e4829b66cbce
35 changes: 19 additions & 16 deletions tests/test_reasoning_content.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

from typing import Any, AsyncIterator
from collections.abc import AsyncIterator

import pytest
from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
Expand All @@ -23,27 +23,27 @@
from agents.models.openai_provider import OpenAIProvider


# Helper functions to create test objects consistently
def create_content_delta(content: str) -> dict:
"""Create a delta dictionary with regular content"""
return {
"content": content,
"role": None,
"function_call": None,
"content": content,
"role": None,
"function_call": None,
"tool_calls": None
}

def create_reasoning_delta(content: str) -> dict:
"""Create a delta dictionary with reasoning content. The Only difference is reasoning_content"""
return {
"content": None,
"role": None,
"function_call": None,
"tool_calls": None,
"content": None,
"role": None,
"function_call": None,
"tool_calls": None,
"reasoning_content": content
}



def create_chunk(delta: dict, include_usage: bool = False) -> ChatCompletionChunk:
kwargs = {
"id": "chunk-id",
Expand All @@ -52,7 +52,7 @@ def create_chunk(delta: dict, include_usage: bool = False) -> ChatCompletionChun
"object": "chat.completion.chunk",
"choices": [Choice(index=0, delta=delta)],
}

if include_usage:
kwargs["usage"] = CompletionUsage(
completion_tokens=4,
Expand All @@ -61,10 +61,13 @@ def create_chunk(delta: dict, include_usage: bool = False) -> ChatCompletionChun
completion_tokens_details=CompletionTokensDetails(reasoning_tokens=2),
prompt_tokens_details=PromptTokensDetails(cached_tokens=0),
)

return ChatCompletionChunk(**kwargs)

async def create_fake_stream(chunks: list[ChatCompletionChunk]) -> AsyncIterator[ChatCompletionChunk]:

async def create_fake_stream(
chunks: list[ChatCompletionChunk],
) -> AsyncIterator[ChatCompletionChunk]:
for chunk in chunks:
yield chunk

Expand Down Expand Up @@ -157,7 +160,8 @@ async def test_get_response_with_reasoning_content(monkeypatch) -> None:
role="assistant",
content="The answer is 42",
)
setattr(msg, "reasoning_content", "Let me think about this question carefully")
# Use direct assignment instead of setattr
msg.reasoning_content = "Let me think about this question carefully"

# create a choice with the message
mock_choice = {
Expand All @@ -167,7 +171,6 @@ async def test_get_response_with_reasoning_content(monkeypatch) -> None:
"delta": None
}

# Create the completion
chat = ChatCompletion(
id="resp-id",
created=0,
Expand Down Expand Up @@ -256,7 +259,7 @@ async def patched_fetch_response(self, *args, **kwargs):
# verify the final response contains the content
response_event = output_events[-1]
assert response_event.type == "response.completed"

# should only have the message, not an empty reasoning item
assert len(response_event.response.output) == 1
assert isinstance(response_event.response.output[0], ResponseOutputMessage)
Expand Down