1
+ # langgraph_agent.py
2
+ from typing import Dict , List , Tuple , Any
3
+ from langchain_core .messages import HumanMessage , SystemMessage
4
+ from langgraph .prebuilt import ToolExecutor
5
+ from langgraph .graph import Graph , END
6
+ from langchain_groq import ChatGroq
7
+ from langchain_core .tools import Tool
8
+ import operator
9
+ from typing import TypedDict , Annotated , Sequence
10
+ import json
11
+
12
+ # Define types for our graph
13
+ class AgentState (TypedDict ):
14
+ messages : Annotated [Sequence [Dict ], operator .add ]
15
+ next : str
16
+
17
+ # Initialize Groq LLM
18
+ llm = ChatGroq (
19
+ api_key = "your-groq-api-key" ,
20
+ model_name = "mixtral-8x7b-32768"
21
+ )
22
+
23
+ # Define our tools
24
+ def analyze_complexity (code : str ) -> Dict [str , str ]:
25
+ """Analyzes time and space complexity of given code"""
26
+ prompt = f"Analyze the following code and provide its time and space complexity:\n { code } "
27
+ response = llm .invoke (prompt )
28
+ return {
29
+ "timeComplexity" : response .content .split ("Time Complexity:" )[1 ].split ("Space Complexity:" )[0 ].strip (),
30
+ "spaceComplexity" : response .content .split ("Space Complexity:" )[1 ].strip ()
31
+ }
32
+
33
+ def generate_visualization (problem : str ) -> str :
34
+ """Generates a Mermaid diagram for the problem"""
35
+ prompt = f"Create a Mermaid diagram to visualize the solution approach for this problem:\n { problem } "
36
+ response = llm .invoke (prompt )
37
+ return response .content
38
+
39
+ def generate_test_cases (problem : str ) -> List [Dict [str , str ]]:
40
+ """Generates test cases for the problem"""
41
+ prompt = f"Generate 3 diverse test cases for this problem:\n { problem } "
42
+ response = llm .invoke (prompt )
43
+ return json .loads (response .content )
44
+
45
+ tools = [
46
+ Tool (
47
+ name = "complexity_analyzer" ,
48
+ description = "Analyzes time and space complexity of code" ,
49
+ func = analyze_complexity
50
+ ),
51
+ Tool (
52
+ name = "visualizer" ,
53
+ description = "Generates Mermaid diagram for visualization" ,
54
+ func = generate_visualization
55
+ ),
56
+ Tool (
57
+ name = "test_generator" ,
58
+ description = "Generates test cases" ,
59
+ func = generate_test_cases
60
+ )
61
+ ]
62
+
63
+ # Create tool executor
64
+ tool_executor = ToolExecutor (tools )
65
+
66
+ # Define agent functions
67
+ def should_use_tool (state : AgentState ) -> Tuple [str , str ]:
68
+ """Decide if and which tool to use"""
69
+ messages = state ["messages" ]
70
+ response = llm .invoke (
71
+ messages + [SystemMessage (content = "What tool should be used next? Reply with 'END' if no tool is needed." )]
72
+ )
73
+ tool_name = response .content
74
+ return "tool" if tool_name != "END" else "end" , tool_name
75
+
76
+ def call_tool (state : AgentState , tool_name : str ) -> AgentState :
77
+ """Call the specified tool"""
78
+ messages = state ["messages" ]
79
+ # Extract relevant info from messages and call tool
80
+ result = tool_executor .execute (tool_name , messages [- 1 ].content )
81
+ return {
82
+ "messages" : messages + [HumanMessage (content = str (result ))],
83
+ "next" : "agent"
84
+ }
85
+
86
+ def process_response (state : AgentState ) -> AgentState :
87
+ """Process the final response"""
88
+ messages = state ["messages" ]
89
+ final_response = llm .invoke (messages + [SystemMessage (content = "Provide final solution summary" )])
90
+ return {
91
+ "messages" : messages + [final_response ],
92
+ "next" : "end"
93
+ }
94
+
95
+ # Create the graph
96
+ workflow = Graph ()
97
+
98
+ # Add nodes
99
+ workflow .add_node ("agent" , should_use_tool )
100
+ workflow .add_node ("tool" , call_tool )
101
+ workflow .add_node ("process" , process_response )
102
+
103
+ # Add edges
104
+ workflow .add_edge ("agent" , "tool" )
105
+ workflow .add_edge ("tool" , "agent" )
106
+ workflow .add_edge ("agent" , "process" )
107
+ workflow .add_edge ("process" , END )
108
+
109
+ # Compile the graph
110
+ app = workflow .compile ()
111
+
112
+ def solve_dsa_problem (problem : str , test_cases : List [Dict [str , str ]] = None ) -> Dict :
113
+ """Main function to solve DSA problems"""
114
+ initial_state = {
115
+ "messages" : [HumanMessage (content = problem )],
116
+ "next" : "agent"
117
+ }
118
+ result = app .invoke (initial_state )
119
+
120
+ # Process result into structured format
121
+ final_message = result ["messages" ][- 1 ].content
122
+ # Parse the response into required format
123
+ try :
124
+ response_dict = json .loads (final_message )
125
+ except :
126
+ # Fallback structure if parsing fails
127
+ response_dict = {
128
+ "explanation" : final_message ,
129
+ "code" : "" ,
130
+ "timeComplexity" : "" ,
131
+ "spaceComplexity" : "" ,
132
+ "visualization" : "" ,
133
+ "testCases" : test_cases or []
134
+ }
135
+
136
+ return response_dict
0 commit comments