Skip to content

Commit 946f1f3

Browse files
authored
openai-agents with google colab and Gemini
anyone can practice with Google Gemini free
1 parent ff65fb4 commit 946f1f3

File tree

40 files changed

+8147
-0
lines changed

40 files changed

+8147
-0
lines changed

examples/colab-examples-gemini/01_hello_agent/hello_agent.ipynb

Lines changed: 423 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 322 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,322 @@
1+
{
2+
"nbformat": 4,
3+
"nbformat_minor": 0,
4+
"metadata": {
5+
"colab": {
6+
"provenance": []
7+
},
8+
"kernelspec": {
9+
"name": "python3",
10+
"display_name": "Python 3"
11+
},
12+
"language_info": {
13+
"name": "python"
14+
}
15+
},
16+
"cells": [
17+
{
18+
"cell_type": "markdown",
19+
"source": [
20+
"# Install openai-agents SDK"
21+
],
22+
"metadata": {
23+
"id": "PdKwzEluDBN7"
24+
}
25+
},
26+
{
27+
"cell_type": "code",
28+
"source": [
29+
"!pip install -Uq openai-agents"
30+
],
31+
"metadata": {
32+
"id": "3QdkOviEB2ay",
33+
"colab": {
34+
"base_uri": "https://localhost:8080/"
35+
},
36+
"outputId": "80fb32e1-719f-4d50-9d9e-cca72b0f2591"
37+
},
38+
"execution_count": 1,
39+
"outputs": [
40+
{
41+
"output_type": "stream",
42+
"name": "stdout",
43+
"text": [
44+
"\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/75.5 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.5/75.5 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
45+
"\u001b[?25h\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/128.5 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m128.5/128.5 kB\u001b[0m \u001b[31m6.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
46+
"\u001b[?25h\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/567.4 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m563.2/567.4 kB\u001b[0m \u001b[31m71.9 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m567.4/567.4 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
47+
"\u001b[?25h"
48+
]
49+
}
50+
]
51+
},
52+
{
53+
"cell_type": "markdown",
54+
"source": [
55+
"# Make your Jupyter Notebook capable of running asynchronous functions."
56+
],
57+
"metadata": {
58+
"id": "7yD91lz4DIAx"
59+
}
60+
},
61+
{
62+
"cell_type": "code",
63+
"source": [
64+
"import nest_asyncio\n",
65+
"nest_asyncio.apply()"
66+
],
67+
"metadata": {
68+
"id": "7A5YLi3HCfBV"
69+
},
70+
"execution_count": 11,
71+
"outputs": []
72+
},
73+
{
74+
"cell_type": "markdown",
75+
"source": [
76+
"# Run Google Gemini with OPENAI-Agent SDK"
77+
],
78+
"metadata": {
79+
"id": "K3VTUWDaGFcV"
80+
}
81+
},
82+
{
83+
"cell_type": "code",
84+
"source": [
85+
"import os\n",
86+
"from agents import Agent, Runner, AsyncOpenAI, OpenAIChatCompletionsModel\n",
87+
"from agents.run import RunConfig\n",
88+
"from google.colab import userdata\n",
89+
"\n",
90+
"\n",
91+
"gemini_api_key = userdata.get(\"GEMINI_API_KEY\")\n",
92+
"\n",
93+
"\n",
94+
"# Check if the API key is present; if not, raise an error\n",
95+
"if not gemini_api_key:\n",
96+
" raise ValueError(\"GEMINI_API_KEY is not set. Please ensure it is defined in your .env file.\")\n",
97+
"\n",
98+
"#Reference: https://ai.google.dev/gemini-api/docs/openai\n",
99+
"external_client = AsyncOpenAI(\n",
100+
" api_key=gemini_api_key,\n",
101+
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\",\n",
102+
")\n",
103+
"\n",
104+
"model = OpenAIChatCompletionsModel(\n",
105+
" model=\"gemini-2.0-flash\",\n",
106+
" openai_client=external_client\n",
107+
")\n",
108+
"\n",
109+
"config = RunConfig(\n",
110+
" model=model,\n",
111+
" model_provider=external_client,\n",
112+
" tracing_disabled=True\n",
113+
")"
114+
],
115+
"metadata": {
116+
"id": "QSIWS6RvC-a4"
117+
},
118+
"execution_count": 12,
119+
"outputs": []
120+
},
121+
{
122+
"cell_type": "markdown",
123+
"source": [
124+
"# Streaming Text code"
125+
],
126+
"metadata": {
127+
"id": "FXBrF52IPBM9"
128+
}
129+
},
130+
{
131+
"cell_type": "code",
132+
"source": [
133+
"os.environ['OPENAI_API_KEY'] = userdata.get('OPENAI_API_KEY')"
134+
],
135+
"metadata": {
136+
"id": "IrgKhgyTYvEn"
137+
},
138+
"execution_count": 32,
139+
"outputs": []
140+
},
141+
{
142+
"cell_type": "code",
143+
"execution_count": 33,
144+
"metadata": {
145+
"colab": {
146+
"base_uri": "https://localhost:8080/"
147+
},
148+
"id": "Tg583lQEBRWo",
149+
"outputId": "7ed11ad5-b98a-4539-9fe5-706343bc608c"
150+
},
151+
"outputs": [
152+
{
153+
"output_type": "stream",
154+
"name": "stdout",
155+
"text": [
156+
"Sure, here are five jokes for you:\n",
157+
"\n",
158+
"1. Why don't scientists trust atoms?\n",
159+
" - Because they make up everything!\n",
160+
"\n",
161+
"2. What do you call fake spaghetti?\n",
162+
" - An impasta!\n",
163+
"\n",
164+
"3. Why did the scarecrow win an award?\n",
165+
" - Because he was outstanding in his field!\n",
166+
"\n",
167+
"4. What do you get when you cross a snowman and a vampire?\n",
168+
" - Frostbite.\n",
169+
"\n",
170+
"5. Why couldn't the bicycle stand up by itself?\n",
171+
" - It was two-tired!"
172+
]
173+
}
174+
],
175+
"source": [
176+
"import asyncio\n",
177+
"\n",
178+
"from openai.types.responses import ResponseTextDeltaEvent\n",
179+
"\n",
180+
"from agents import Agent, Runner\n",
181+
"\n",
182+
"\n",
183+
"async def main():\n",
184+
" agent = Agent(\n",
185+
" name=\"Joker\",\n",
186+
" instructions=\"You are a helpful assistant.\",\n",
187+
"\n",
188+
" )\n",
189+
"\n",
190+
" result = Runner.run_streamed(agent, input=\"Please tell me 5 jokes.\")\n",
191+
" async for event in result.stream_events():\n",
192+
" if event.type == \"raw_response_event\" and isinstance(event.data, ResponseTextDeltaEvent):\n",
193+
" print(event.data.delta, end=\"\", flush=True)\n",
194+
"\n",
195+
"\n",
196+
"\n",
197+
"asyncio.run(main())"
198+
]
199+
},
200+
{
201+
"cell_type": "markdown",
202+
"source": [
203+
"# Stream item code"
204+
],
205+
"metadata": {
206+
"id": "pikemA8-SV0u"
207+
}
208+
},
209+
{
210+
"cell_type": "code",
211+
"source": [
212+
"import asyncio\n",
213+
"import random\n",
214+
"\n",
215+
"from agents import Agent, ItemHelpers, Runner, function_tool\n",
216+
"\n",
217+
"\n",
218+
"@function_tool\n",
219+
"def how_many_jokes() -> int:\n",
220+
" return random.randint(1, 10)\n",
221+
"\n",
222+
"\n",
223+
"async def main():\n",
224+
" agent = Agent(\n",
225+
" name=\"Joker\",\n",
226+
" instructions=\"First call the `how_many_jokes` tool, then tell that many jokes.\",\n",
227+
" tools=[how_many_jokes],\n",
228+
" )\n",
229+
"\n",
230+
" result = Runner.run_streamed(\n",
231+
" agent,\n",
232+
" input=\"Hello\",\n",
233+
"\n",
234+
" )\n",
235+
" print(\"=== Run starting ===\")\n",
236+
" async for event in result.stream_events():\n",
237+
" # We'll ignore the raw responses event deltas\n",
238+
" if event.type == \"raw_response_event\":\n",
239+
" continue\n",
240+
" elif event.type == \"agent_updated_stream_event\":\n",
241+
" print(f\"Agent updated: {event.new_agent.name}\")\n",
242+
" continue\n",
243+
" elif event.type == \"run_item_stream_event\":\n",
244+
" if event.item.type == \"tool_call_item\":\n",
245+
" print(\"-- Tool was called\")\n",
246+
" elif event.item.type == \"tool_call_output_item\":\n",
247+
" print(f\"-- Tool output: {event.item.output}\")\n",
248+
" elif event.item.type == \"message_output_item\":\n",
249+
" print(f\"-- Message output:\\n {ItemHelpers.text_message_output(event.item)}\")\n",
250+
" else:\n",
251+
" pass # Ignore other event types\n",
252+
"\n",
253+
"\n",
254+
"\n",
255+
"\n",
256+
"try:\n",
257+
" asyncio.run(main())\n",
258+
"except:\n",
259+
" pass\n",
260+
"print(\"=== Run complete ===\")"
261+
],
262+
"metadata": {
263+
"id": "pTNf1noxCRi-",
264+
"colab": {
265+
"base_uri": "https://localhost:8080/"
266+
},
267+
"outputId": "3645fd4c-0174-49bc-eb20-2321bc5e183f"
268+
},
269+
"execution_count": 34,
270+
"outputs": [
271+
{
272+
"output_type": "stream",
273+
"name": "stdout",
274+
"text": [
275+
"=== Run starting ===\n",
276+
"Agent updated: Joker\n",
277+
"-- Tool was called\n",
278+
"-- Tool output: 8\n",
279+
"-- Message output:\n",
280+
" Great! Here are 8 jokes for you:\n",
281+
"\n",
282+
"1. **Why don’t scientists trust atoms?**\n",
283+
" Because they make up everything!\n",
284+
"\n",
285+
"2. **What do you call fake spaghetti?**\n",
286+
" An impasta!\n",
287+
"\n",
288+
"3. **What’s orange and sounds like a parrot?**\n",
289+
" A carrot!\n",
290+
"\n",
291+
"4. **Why was the math book sad?**\n",
292+
" Because it had too many problems.\n",
293+
"\n",
294+
"5. **How do you organize a space party?**\n",
295+
" You planet!\n",
296+
"\n",
297+
"6. **Why did the scarecrow win an award?**\n",
298+
" Because he was outstanding in his field!\n",
299+
"\n",
300+
"7. **What do you get when you cross a snowman and a vampire?**\n",
301+
" Frostbite!\n",
302+
"\n",
303+
"8. **Why did the tomato turn red?**\n",
304+
" Because it saw the salad dressing!\n",
305+
"\n",
306+
"Enjoy the laughter!\n",
307+
"=== Run complete ===\n"
308+
]
309+
}
310+
]
311+
},
312+
{
313+
"cell_type": "code",
314+
"source": [],
315+
"metadata": {
316+
"id": "741wmWdpS22Z"
317+
},
318+
"execution_count": null,
319+
"outputs": []
320+
}
321+
]
322+
}

0 commit comments

Comments
 (0)