Skip to content

Commit 625083a

Browse files
authored
Fix a bug where debug logging fails with Iterable objects (#1683)
This pull request resolves an existing bug where the JSON format debug logging raises a Runtime exception when a given data has `Iterable` objects within it.
1 parent 9168348 commit 625083a

File tree

6 files changed

+288
-8
lines changed

6 files changed

+288
-8
lines changed

src/agents/extensions/models/litellm_model.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
from ...tracing.span_data import GenerationSpanData
4949
from ...tracing.spans import Span
5050
from ...usage import Usage
51+
from ...util._json import _to_dump_compatible
5152

5253

5354
class InternalChatCompletionMessage(ChatCompletionMessage):
@@ -265,6 +266,8 @@ async def _fetch_response(
265266
"role": "system",
266267
},
267268
)
269+
converted_messages = _to_dump_compatible(converted_messages)
270+
268271
if tracing.include_data():
269272
span.span_data.input = converted_messages
270273

@@ -283,13 +286,25 @@ async def _fetch_response(
283286
for handoff in handoffs:
284287
converted_tools.append(Converter.convert_handoff_tool(handoff))
285288

289+
converted_tools = _to_dump_compatible(converted_tools)
290+
286291
if _debug.DONT_LOG_MODEL_DATA:
287292
logger.debug("Calling LLM")
288293
else:
294+
messages_json = json.dumps(
295+
converted_messages,
296+
indent=2,
297+
ensure_ascii=False,
298+
)
299+
tools_json = json.dumps(
300+
converted_tools,
301+
indent=2,
302+
ensure_ascii=False,
303+
)
289304
logger.debug(
290305
f"Calling Litellm model: {self.model}\n"
291-
f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n"
292-
f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n"
306+
f"{messages_json}\n"
307+
f"Tools:\n{tools_json}\n"
293308
f"Stream: {stream}\n"
294309
f"Tool choice: {tool_choice}\n"
295310
f"Response format: {response_format}\n"

src/agents/models/openai_chatcompletions.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
from ..tracing.span_data import GenerationSpanData
2424
from ..tracing.spans import Span
2525
from ..usage import Usage
26+
from ..util._json import _to_dump_compatible
2627
from .chatcmpl_converter import Converter
2728
from .chatcmpl_helpers import HEADERS, ChatCmplHelpers
2829
from .chatcmpl_stream_handler import ChatCmplStreamHandler
@@ -237,6 +238,8 @@ async def _fetch_response(
237238
"role": "system",
238239
},
239240
)
241+
converted_messages = _to_dump_compatible(converted_messages)
242+
240243
if tracing.include_data():
241244
span.span_data.input = converted_messages
242245

@@ -255,12 +258,24 @@ async def _fetch_response(
255258
for handoff in handoffs:
256259
converted_tools.append(Converter.convert_handoff_tool(handoff))
257260

261+
converted_tools = _to_dump_compatible(converted_tools)
262+
258263
if _debug.DONT_LOG_MODEL_DATA:
259264
logger.debug("Calling LLM")
260265
else:
266+
messages_json = json.dumps(
267+
converted_messages,
268+
indent=2,
269+
ensure_ascii=False,
270+
)
271+
tools_json = json.dumps(
272+
converted_tools,
273+
indent=2,
274+
ensure_ascii=False,
275+
)
261276
logger.debug(
262-
f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n"
263-
f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n"
277+
f"{messages_json}\n"
278+
f"Tools:\n{tools_json}\n"
264279
f"Stream: {stream}\n"
265280
f"Tool choice: {tool_choice}\n"
266281
f"Response format: {response_format}\n"

src/agents/models/openai_responses.py

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
)
3939
from ..tracing import SpanError, response_span
4040
from ..usage import Usage
41+
from ..util._json import _to_dump_compatible
4142
from ..version import __version__
4243
from .interface import Model, ModelTracing
4344

@@ -240,6 +241,7 @@ async def _fetch_response(
240241
prompt: ResponsePromptParam | None = None,
241242
) -> Response | AsyncStream[ResponseStreamEvent]:
242243
list_input = ItemHelpers.input_to_new_input_list(input)
244+
list_input = _to_dump_compatible(list_input)
243245

244246
parallel_tool_calls = (
245247
True
@@ -251,6 +253,7 @@ async def _fetch_response(
251253

252254
tool_choice = Converter.convert_tool_choice(model_settings.tool_choice)
253255
converted_tools = Converter.convert_tools(tools, handoffs)
256+
converted_tools_payload = _to_dump_compatible(converted_tools.tools)
254257
response_format = Converter.get_response_format(output_schema)
255258

256259
include_set: set[str] = set(converted_tools.includes)
@@ -263,10 +266,20 @@ async def _fetch_response(
263266
if _debug.DONT_LOG_MODEL_DATA:
264267
logger.debug("Calling LLM")
265268
else:
269+
input_json = json.dumps(
270+
list_input,
271+
indent=2,
272+
ensure_ascii=False,
273+
)
274+
tools_json = json.dumps(
275+
converted_tools_payload,
276+
indent=2,
277+
ensure_ascii=False,
278+
)
266279
logger.debug(
267280
f"Calling LLM {self.model} with input:\n"
268-
f"{json.dumps(list_input, indent=2, ensure_ascii=False)}\n"
269-
f"Tools:\n{json.dumps(converted_tools.tools, indent=2, ensure_ascii=False)}\n"
281+
f"{input_json}\n"
282+
f"Tools:\n{tools_json}\n"
270283
f"Stream: {stream}\n"
271284
f"Tool choice: {tool_choice}\n"
272285
f"Response format: {response_format}\n"
@@ -290,7 +303,7 @@ async def _fetch_response(
290303
model=self.model,
291304
input=list_input,
292305
include=include,
293-
tools=converted_tools.tools,
306+
tools=converted_tools_payload,
294307
prompt=self._non_null_or_not_given(prompt),
295308
temperature=self._non_null_or_not_given(model_settings.temperature),
296309
top_p=self._non_null_or_not_given(model_settings.top_p),

src/agents/util/_json.py

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from __future__ import annotations
22

3-
from typing import Literal
3+
from collections.abc import Iterable
4+
from typing import Any, Literal
45

56
from pydantic import TypeAdapter, ValidationError
67
from typing_extensions import TypeVar
@@ -29,3 +30,20 @@ def validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) ->
2930
raise ModelBehaviorError(
3031
f"Invalid JSON when parsing {json_str} for {type_adapter}; {e}"
3132
) from e
33+
34+
35+
def _to_dump_compatible(obj: Any) -> Any:
36+
return _to_dump_compatible_internal(obj)
37+
38+
39+
def _to_dump_compatible_internal(obj: Any) -> Any:
40+
if isinstance(obj, dict):
41+
return {k: _to_dump_compatible_internal(v) for k, v in obj.items()}
42+
43+
if isinstance(obj, (list, tuple)):
44+
return [_to_dump_compatible_internal(x) for x in obj]
45+
46+
if isinstance(obj, Iterable) and not isinstance(obj, (str, bytes, bytearray)):
47+
return [_to_dump_compatible_internal(x) for x in obj]
48+
49+
return obj
Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
from __future__ import annotations
2+
3+
from collections.abc import Iterable, Iterator
4+
from typing import Any, cast
5+
6+
import httpx
7+
import pytest
8+
from openai import NOT_GIVEN
9+
from openai.types.chat.chat_completion import ChatCompletion
10+
from openai.types.responses import ToolParam
11+
12+
from agents import (
13+
ModelSettings,
14+
ModelTracing,
15+
OpenAIChatCompletionsModel,
16+
OpenAIResponsesModel,
17+
generation_span,
18+
)
19+
from agents.models import (
20+
openai_chatcompletions as chat_module,
21+
openai_responses as responses_module,
22+
)
23+
24+
25+
class _SingleUseIterable:
26+
"""Helper iterable that raises if iterated more than once."""
27+
28+
def __init__(self, values: list[object]) -> None:
29+
self._values = list(values)
30+
self.iterations = 0
31+
32+
def __iter__(self) -> Iterator[object]:
33+
if self.iterations:
34+
raise RuntimeError("Iterable should have been materialized exactly once.")
35+
self.iterations += 1
36+
yield from self._values
37+
38+
39+
def _force_materialization(value: object) -> None:
40+
if isinstance(value, dict):
41+
for nested in value.values():
42+
_force_materialization(nested)
43+
elif isinstance(value, list):
44+
for nested in value:
45+
_force_materialization(nested)
46+
elif isinstance(value, Iterable) and not isinstance(value, (str, bytes, bytearray)):
47+
list(value)
48+
49+
50+
@pytest.mark.allow_call_model_methods
51+
@pytest.mark.asyncio
52+
async def test_chat_completions_materializes_iterator_payload(
53+
monkeypatch: pytest.MonkeyPatch,
54+
) -> None:
55+
message_iter = _SingleUseIterable([{"type": "text", "text": "hi"}])
56+
tool_iter = _SingleUseIterable([{"type": "string"}])
57+
58+
chat_converter = cast(Any, chat_module).Converter
59+
60+
monkeypatch.setattr(
61+
chat_converter,
62+
"items_to_messages",
63+
classmethod(lambda _cls, _input: [{"role": "user", "content": message_iter}]),
64+
)
65+
monkeypatch.setattr(
66+
chat_converter,
67+
"tool_to_openai",
68+
classmethod(
69+
lambda _cls, _tool: {
70+
"type": "function",
71+
"function": {
72+
"name": "dummy",
73+
"parameters": {"properties": tool_iter},
74+
},
75+
}
76+
),
77+
)
78+
79+
captured_kwargs: dict[str, Any] = {}
80+
81+
class DummyCompletions:
82+
async def create(self, **kwargs):
83+
captured_kwargs.update(kwargs)
84+
_force_materialization(kwargs["messages"])
85+
if kwargs["tools"] is not NOT_GIVEN:
86+
_force_materialization(kwargs["tools"])
87+
return ChatCompletion(
88+
id="dummy-id",
89+
created=0,
90+
model="gpt-4",
91+
object="chat.completion",
92+
choices=[],
93+
usage=None,
94+
)
95+
96+
class DummyClient:
97+
def __init__(self) -> None:
98+
self.chat = type("_Chat", (), {"completions": DummyCompletions()})()
99+
self.base_url = httpx.URL("http://example.test")
100+
101+
model = OpenAIChatCompletionsModel(model="gpt-4", openai_client=DummyClient()) # type: ignore[arg-type]
102+
103+
with generation_span(disabled=True) as span:
104+
await cast(Any, model)._fetch_response(
105+
system_instructions=None,
106+
input="ignored",
107+
model_settings=ModelSettings(),
108+
tools=[object()],
109+
output_schema=None,
110+
handoffs=[],
111+
span=span,
112+
tracing=ModelTracing.DISABLED,
113+
stream=False,
114+
)
115+
116+
assert message_iter.iterations == 1
117+
assert tool_iter.iterations == 1
118+
assert isinstance(captured_kwargs["messages"][0]["content"], list)
119+
assert isinstance(captured_kwargs["tools"][0]["function"]["parameters"]["properties"], list)
120+
121+
122+
@pytest.mark.allow_call_model_methods
123+
@pytest.mark.asyncio
124+
async def test_responses_materializes_iterator_payload(monkeypatch: pytest.MonkeyPatch) -> None:
125+
input_iter = _SingleUseIterable([{"type": "input_text", "text": "hello"}])
126+
tool_iter = _SingleUseIterable([{"type": "string"}])
127+
128+
responses_item_helpers = cast(Any, responses_module).ItemHelpers
129+
responses_converter = cast(Any, responses_module).Converter
130+
131+
monkeypatch.setattr(
132+
responses_item_helpers,
133+
"input_to_new_input_list",
134+
classmethod(lambda _cls, _input: [{"role": "user", "content": input_iter}]),
135+
)
136+
137+
converted_tools = responses_module.ConvertedTools(
138+
tools=cast(
139+
list[ToolParam],
140+
[
141+
{
142+
"type": "function",
143+
"name": "dummy",
144+
"parameters": {"properties": tool_iter},
145+
}
146+
],
147+
),
148+
includes=[],
149+
)
150+
monkeypatch.setattr(
151+
responses_converter,
152+
"convert_tools",
153+
classmethod(lambda _cls, _tools, _handoffs: converted_tools),
154+
)
155+
156+
captured_kwargs: dict[str, Any] = {}
157+
158+
class DummyResponses:
159+
async def create(self, **kwargs):
160+
captured_kwargs.update(kwargs)
161+
_force_materialization(kwargs["input"])
162+
_force_materialization(kwargs["tools"])
163+
return object()
164+
165+
class DummyClient:
166+
def __init__(self) -> None:
167+
self.responses = DummyResponses()
168+
169+
model = OpenAIResponsesModel(model="gpt-4.1", openai_client=DummyClient()) # type: ignore[arg-type]
170+
171+
await cast(Any, model)._fetch_response(
172+
system_instructions=None,
173+
input="ignored",
174+
model_settings=ModelSettings(),
175+
tools=[],
176+
output_schema=None,
177+
handoffs=[],
178+
previous_response_id=None,
179+
conversation_id=None,
180+
stream=False,
181+
prompt=None,
182+
)
183+
184+
assert input_iter.iterations == 1
185+
assert tool_iter.iterations == 1
186+
assert isinstance(captured_kwargs["input"][0]["content"], list)
187+
assert isinstance(captured_kwargs["tools"][0]["parameters"]["properties"], list)

tests/utils/test_json.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import json
2+
3+
from openai.types.responses.response_output_message_param import ResponseOutputMessageParam
4+
from openai.types.responses.response_output_text_param import ResponseOutputTextParam
5+
6+
from agents.util._json import _to_dump_compatible
7+
8+
9+
def test_to_dump_compatible():
10+
# Given a list of message dictionaries, ensure the returned list is a deep copy.
11+
input_iter = [
12+
ResponseOutputMessageParam(
13+
id="a75654dc-7492-4d1c-bce0-89e8312fbdd7",
14+
content=[
15+
ResponseOutputTextParam(
16+
type="output_text",
17+
text="Hey, what's up?",
18+
annotations=[],
19+
)
20+
].__iter__(),
21+
role="assistant",
22+
status="completed",
23+
type="message",
24+
)
25+
].__iter__()
26+
# this fails if any of the properties are Iterable objects.
27+
# result = json.dumps(input_iter)
28+
result = json.dumps(_to_dump_compatible(input_iter))
29+
assert (
30+
result
31+
== """[{"id": "a75654dc-7492-4d1c-bce0-89e8312fbdd7", "content": [{"type": "output_text", "text": "Hey, what's up?", "annotations": []}], "role": "assistant", "status": "completed", "type": "message"}]""" # noqa: E501
32+
)

0 commit comments

Comments
 (0)