@@ -31,6 +31,31 @@ def _patch_graph_nodes():
31
31
CallToolsNode -> Handles tool calls (spans created in tool patching)
32
32
"""
33
33
34
+ def _extract_span_data (node , ctx ):
35
+ # type: (Any, Any) -> tuple[list[Any], Any, Any]
36
+ """Extract common data needed for creating chat spans.
37
+
38
+ Returns:
39
+ Tuple of (messages, model, model_settings)
40
+ """
41
+ # Extract model and settings from context
42
+ model = None
43
+ model_settings = None
44
+ if hasattr (ctx , "deps" ):
45
+ model = getattr (ctx .deps , "model" , None )
46
+ model_settings = getattr (ctx .deps , "model_settings" , None )
47
+
48
+ # Build full message list: history + current request
49
+ messages = []
50
+ if hasattr (ctx , "state" ) and hasattr (ctx .state , "message_history" ):
51
+ messages .extend (ctx .state .message_history )
52
+
53
+ current_request = getattr (node , "request" , None )
54
+ if current_request :
55
+ messages .append (current_request )
56
+
57
+ return messages , model , model_settings
58
+
34
59
# Patch UserPromptNode to create invoke_agent spans
35
60
original_user_prompt_run = UserPromptNode .run
36
61
@@ -71,24 +96,7 @@ async def wrapped_user_prompt_run(self, ctx):
71
96
@wraps (original_model_request_run )
72
97
async def wrapped_model_request_run (self , ctx ):
73
98
# type: (Any, Any) -> Any
74
- # Extract data from context
75
- model = None
76
- model_settings = None
77
- if hasattr (ctx , "deps" ):
78
- model = getattr (ctx .deps , "model" , None )
79
- model_settings = getattr (ctx .deps , "model_settings" , None )
80
-
81
- # Build full message list: history + current request
82
- messages = []
83
-
84
- # Add message history
85
- if hasattr (ctx , "state" ) and hasattr (ctx .state , "message_history" ):
86
- messages .extend (ctx .state .message_history )
87
-
88
- # Add current request
89
- current_request = getattr (self , "request" , None )
90
- if current_request :
91
- messages .append (current_request )
99
+ messages , model , model_settings = _extract_span_data (self , ctx )
92
100
93
101
with ai_client_span (messages , None , model , model_settings ) as span :
94
102
result = await original_model_request_run (self , ctx )
@@ -115,24 +123,7 @@ def create_wrapped_stream(original_stream_method):
115
123
@wraps (original_stream_method )
116
124
async def wrapped_model_request_stream (self , ctx ):
117
125
# type: (Any, Any) -> Any
118
- # Extract data from context
119
- model = None
120
- model_settings = None
121
- if hasattr (ctx , "deps" ):
122
- model = getattr (ctx .deps , "model" , None )
123
- model_settings = getattr (ctx .deps , "model_settings" , None )
124
-
125
- # Build full message list: history + current request
126
- messages = []
127
-
128
- # Add message history
129
- if hasattr (ctx , "state" ) and hasattr (ctx .state , "message_history" ):
130
- messages .extend (ctx .state .message_history )
131
-
132
- # Add current request
133
- current_request = getattr (self , "request" , None )
134
- if current_request :
135
- messages .append (current_request )
126
+ messages , model , model_settings = _extract_span_data (self , ctx )
136
127
137
128
# Create chat span for streaming request
138
129
import sentry_sdk
0 commit comments