Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
423 changes: 423 additions & 0 deletions examples/colab-examples-gemini/01_hello_agent/hello_agent.ipynb

Large diffs are not rendered by default.

322 changes: 322 additions & 0 deletions examples/colab-examples-gemini/02_streaming/streaming.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,322 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"source": [
"# Install openai-agents SDK"
],
"metadata": {
"id": "PdKwzEluDBN7"
}
},
{
"cell_type": "code",
"source": [
"!pip install -Uq openai-agents"
],
"metadata": {
"id": "3QdkOviEB2ay",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "80fb32e1-719f-4d50-9d9e-cca72b0f2591"
},
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/75.5 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.5/75.5 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/128.5 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m128.5/128.5 kB\u001b[0m \u001b[31m6.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/567.4 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m563.2/567.4 kB\u001b[0m \u001b[31m71.9 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m567.4/567.4 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h"
]
}
]
},
{
"cell_type": "markdown",
"source": [
"# Make your Jupyter Notebook capable of running asynchronous functions."
],
"metadata": {
"id": "7yD91lz4DIAx"
}
},
{
"cell_type": "code",
"source": [
"import nest_asyncio\n",
"nest_asyncio.apply()"
],
"metadata": {
"id": "7A5YLi3HCfBV"
},
"execution_count": 11,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# Run Google Gemini with OPENAI-Agent SDK"
],
"metadata": {
"id": "K3VTUWDaGFcV"
}
},
{
"cell_type": "code",
"source": [
"import os\n",
"from agents import Agent, Runner, AsyncOpenAI, OpenAIChatCompletionsModel\n",
"from agents.run import RunConfig\n",
"from google.colab import userdata\n",
"\n",
"\n",
"gemini_api_key = userdata.get(\"GEMINI_API_KEY\")\n",
"\n",
"\n",
"# Check if the API key is present; if not, raise an error\n",
"if not gemini_api_key:\n",
" raise ValueError(\"GEMINI_API_KEY is not set. Please ensure it is defined in your .env file.\")\n",
"\n",
"#Reference: https://ai.google.dev/gemini-api/docs/openai\n",
"external_client = AsyncOpenAI(\n",
" api_key=gemini_api_key,\n",
" base_url=\"https://generativelanguage.googleapis.com/v1beta/openai/\",\n",
")\n",
"\n",
"model = OpenAIChatCompletionsModel(\n",
" model=\"gemini-2.0-flash\",\n",
" openai_client=external_client\n",
")\n",
"\n",
"config = RunConfig(\n",
" model=model,\n",
" model_provider=external_client,\n",
" tracing_disabled=True\n",
")"
],
"metadata": {
"id": "QSIWS6RvC-a4"
},
"execution_count": 12,
"outputs": []
},
{
"cell_type": "markdown",
"source": [
"# Streaming Text code"
],
"metadata": {
"id": "FXBrF52IPBM9"
}
},
{
"cell_type": "code",
"source": [
"os.environ['OPENAI_API_KEY'] = userdata.get('OPENAI_API_KEY')"
],
"metadata": {
"id": "IrgKhgyTYvEn"
},
"execution_count": 32,
"outputs": []
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Tg583lQEBRWo",
"outputId": "7ed11ad5-b98a-4539-9fe5-706343bc608c"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Sure, here are five jokes for you:\n",
"\n",
"1. Why don't scientists trust atoms?\n",
" - Because they make up everything!\n",
"\n",
"2. What do you call fake spaghetti?\n",
" - An impasta!\n",
"\n",
"3. Why did the scarecrow win an award?\n",
" - Because he was outstanding in his field!\n",
"\n",
"4. What do you get when you cross a snowman and a vampire?\n",
" - Frostbite.\n",
"\n",
"5. Why couldn't the bicycle stand up by itself?\n",
" - It was two-tired!"
]
}
],
"source": [
"import asyncio\n",
"\n",
"from openai.types.responses import ResponseTextDeltaEvent\n",
"\n",
"from agents import Agent, Runner\n",
"\n",
"\n",
"async def main():\n",
" agent = Agent(\n",
" name=\"Joker\",\n",
" instructions=\"You are a helpful assistant.\",\n",
"\n",
" )\n",
"\n",
" result = Runner.run_streamed(agent, input=\"Please tell me 5 jokes.\")\n",
" async for event in result.stream_events():\n",
" if event.type == \"raw_response_event\" and isinstance(event.data, ResponseTextDeltaEvent):\n",
" print(event.data.delta, end=\"\", flush=True)\n",
"\n",
"\n",
"\n",
"asyncio.run(main())"
]
},
{
"cell_type": "markdown",
"source": [
"# Stream item code"
],
"metadata": {
"id": "pikemA8-SV0u"
}
},
{
"cell_type": "code",
"source": [
"import asyncio\n",
"import random\n",
"\n",
"from agents import Agent, ItemHelpers, Runner, function_tool\n",
"\n",
"\n",
"@function_tool\n",
"def how_many_jokes() -> int:\n",
" return random.randint(1, 10)\n",
"\n",
"\n",
"async def main():\n",
" agent = Agent(\n",
" name=\"Joker\",\n",
" instructions=\"First call the `how_many_jokes` tool, then tell that many jokes.\",\n",
" tools=[how_many_jokes],\n",
" )\n",
"\n",
" result = Runner.run_streamed(\n",
" agent,\n",
" input=\"Hello\",\n",
"\n",
" )\n",
" print(\"=== Run starting ===\")\n",
" async for event in result.stream_events():\n",
" # We'll ignore the raw responses event deltas\n",
" if event.type == \"raw_response_event\":\n",
" continue\n",
" elif event.type == \"agent_updated_stream_event\":\n",
" print(f\"Agent updated: {event.new_agent.name}\")\n",
" continue\n",
" elif event.type == \"run_item_stream_event\":\n",
" if event.item.type == \"tool_call_item\":\n",
" print(\"-- Tool was called\")\n",
" elif event.item.type == \"tool_call_output_item\":\n",
" print(f\"-- Tool output: {event.item.output}\")\n",
" elif event.item.type == \"message_output_item\":\n",
" print(f\"-- Message output:\\n {ItemHelpers.text_message_output(event.item)}\")\n",
" else:\n",
" pass # Ignore other event types\n",
"\n",
"\n",
"\n",
"\n",
"try:\n",
" asyncio.run(main())\n",
"except:\n",
" pass\n",
"print(\"=== Run complete ===\")"
],
"metadata": {
"id": "pTNf1noxCRi-",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "3645fd4c-0174-49bc-eb20-2321bc5e183f"
},
"execution_count": 34,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"=== Run starting ===\n",
"Agent updated: Joker\n",
"-- Tool was called\n",
"-- Tool output: 8\n",
"-- Message output:\n",
" Great! Here are 8 jokes for you:\n",
"\n",
"1. **Why don’t scientists trust atoms?**\n",
" Because they make up everything!\n",
"\n",
"2. **What do you call fake spaghetti?**\n",
" An impasta!\n",
"\n",
"3. **What’s orange and sounds like a parrot?**\n",
" A carrot!\n",
"\n",
"4. **Why was the math book sad?**\n",
" Because it had too many problems.\n",
"\n",
"5. **How do you organize a space party?**\n",
" You planet!\n",
"\n",
"6. **Why did the scarecrow win an award?**\n",
" Because he was outstanding in his field!\n",
"\n",
"7. **What do you get when you cross a snowman and a vampire?**\n",
" Frostbite!\n",
"\n",
"8. **Why did the tomato turn red?**\n",
" Because it saw the salad dressing!\n",
"\n",
"Enjoy the laughter!\n",
"=== Run complete ===\n"
]
}
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "741wmWdpS22Z"
},
"execution_count": null,
"outputs": []
}
]
}
Loading