Skip to content

Commit cc24d61

Browse files
seanzhougooglecopybara-github
authored andcommitted
feat: Support ContentUnion as static instruction
PiperOrigin-RevId: 817278990
1 parent 0aede9f commit cc24d61

File tree

4 files changed

+191
-9
lines changed

4 files changed

+191
-9
lines changed

src/google/adk/agents/llm_agent.py

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ class LlmAgent(BaseAgent):
196196
or personality.
197197
"""
198198

199-
static_instruction: Optional[types.Content] = None
199+
static_instruction: Optional[types.ContentUnion] = None
200200
"""Static instruction content sent literally as system instruction at the beginning.
201201
202202
This field is for content that never changes and doesn't contain placeholders.
@@ -223,11 +223,20 @@ class LlmAgent(BaseAgent):
223223
For explicit caching control, configure context_cache_config at App level.
224224
225225
**Content Support:**
226-
Can contain text, files, binaries, or any combination as types.Content
227-
supports multiple part types (text, inline_data, file_data, etc.).
228-
229-
**Example:**
226+
Accepts types.ContentUnion which includes:
227+
- str: Simple text instruction
228+
- types.Content: Rich content object
229+
- types.Part: Single part (text, inline_data, file_data, etc.)
230+
- PIL.Image.Image: Image object
231+
- types.File: File reference
232+
- list[PartUnion]: List of parts
233+
234+
**Examples:**
230235
```python
236+
# Simple string instruction
237+
static_instruction = "You are a helpful assistant."
238+
239+
# Rich content with files
231240
static_instruction = types.Content(
232241
role='user',
233242
parts=[

src/google/adk/agents/llm_agent_config.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,10 @@ class LlmAgentConfig(BaseAgentConfig):
3535

3636
model_config = ConfigDict(
3737
extra='forbid',
38+
# Allow arbitrary types to support types.ContentUnion for static_instruction.
39+
# ContentUnion includes PIL.Image.Image which doesn't have Pydantic schema
40+
# support, but we validate it at runtime using google.genai._transformers.t_content()
41+
arbitrary_types_allowed=True,
3842
)
3943

4044
agent_class: str = Field(
@@ -62,14 +66,15 @@ class LlmAgentConfig(BaseAgentConfig):
6266
)
6367
)
6468

65-
static_instruction: Optional[types.Content] = Field(
69+
static_instruction: Optional[types.ContentUnion] = Field(
6670
default=None,
6771
description=(
6872
'Optional. LlmAgent.static_instruction. Static content sent literally'
6973
' at position 0 without placeholder processing. When set, changes'
7074
' instruction behavior to go to user content instead of'
71-
' system_instruction. Supports context caching and rich content'
72-
' (text, files, binaries).'
75+
' system_instruction. Supports context caching. Accepts'
76+
' types.ContentUnion (str, types.Content, types.Part,'
77+
' PIL.Image.Image, types.File, or list[PartUnion]).'
7378
),
7479
)
7580

src/google/adk/flows/llm_flows/instructions.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
from typing import AsyncGenerator
2020
from typing import TYPE_CHECKING
2121

22+
from google.genai import _transformers
2223
from typing_extensions import override
2324

2425
from ...agents.readonly_context import ReadonlyContext
@@ -84,7 +85,9 @@ async def run_async(
8485

8586
# Handle static_instruction - add via append_instructions
8687
if agent.static_instruction:
87-
llm_request.append_instructions(agent.static_instruction)
88+
# Convert ContentUnion to Content using genai transformer
89+
static_content = _transformers.t_content(agent.static_instruction)
90+
llm_request.append_instructions(static_content)
8891

8992
# Handle instruction based on whether static_instruction exists
9093
if agent.instruction and not agent.static_instruction:

tests/unittests/flows/llm_flows/test_instructions.py

Lines changed: 165 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -551,6 +551,56 @@ def test_static_instruction_field_exists(llm_backend):
551551
assert agent.static_instruction == static_content
552552

553553

554+
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
555+
def test_static_instruction_supports_string(llm_backend):
556+
"""Test that static_instruction field supports simple strings."""
557+
static_str = "This is a static instruction as a string"
558+
agent = LlmAgent(name="test_agent", static_instruction=static_str)
559+
assert agent.static_instruction == static_str
560+
assert isinstance(agent.static_instruction, str)
561+
562+
563+
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
564+
def test_static_instruction_supports_part(llm_backend):
565+
"""Test that static_instruction field supports types.Part."""
566+
static_part = types.Part(text="This is a static instruction as Part")
567+
agent = LlmAgent(name="test_agent", static_instruction=static_part)
568+
assert agent.static_instruction == static_part
569+
assert isinstance(agent.static_instruction, types.Part)
570+
571+
572+
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
573+
def test_static_instruction_supports_file(llm_backend):
574+
"""Test that static_instruction field supports types.File."""
575+
static_file = types.File(uri="gs://bucket/file.txt", mime_type="text/plain")
576+
agent = LlmAgent(name="test_agent", static_instruction=static_file)
577+
assert agent.static_instruction == static_file
578+
assert isinstance(agent.static_instruction, types.File)
579+
580+
581+
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
582+
def test_static_instruction_supports_list_of_parts(llm_backend):
583+
"""Test that static_instruction field supports list[PartUnion]."""
584+
static_parts_list = [
585+
types.Part(text="First part"),
586+
types.Part(text="Second part"),
587+
]
588+
agent = LlmAgent(name="test_agent", static_instruction=static_parts_list)
589+
assert agent.static_instruction == static_parts_list
590+
assert isinstance(agent.static_instruction, list)
591+
assert len(agent.static_instruction) == 2
592+
593+
594+
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
595+
def test_static_instruction_supports_list_of_strings(llm_backend):
596+
"""Test that static_instruction field supports list of strings."""
597+
static_strings_list = ["First instruction", "Second instruction"]
598+
agent = LlmAgent(name="test_agent", static_instruction=static_strings_list)
599+
assert agent.static_instruction == static_strings_list
600+
assert isinstance(agent.static_instruction, list)
601+
assert all(isinstance(s, str) for s in agent.static_instruction)
602+
603+
554604
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
555605
def test_static_instruction_supports_multiple_parts(llm_backend):
556606
"""Test that static_instruction supports multiple parts including files."""
@@ -607,6 +657,91 @@ async def test_static_instruction_added_to_contents(llm_backend):
607657
assert llm_request.config.system_instruction == "Static instruction content"
608658

609659

660+
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
661+
@pytest.mark.asyncio
662+
async def test_static_instruction_string_added_to_system(llm_backend):
663+
"""Test that string static instructions are added to system_instruction."""
664+
agent = LlmAgent(
665+
name="test_agent", static_instruction="Static instruction as string"
666+
)
667+
668+
invocation_context = await _create_invocation_context(agent)
669+
670+
llm_request = LlmRequest()
671+
672+
# Run the instruction processor
673+
async for _ in request_processor.run_async(invocation_context, llm_request):
674+
pass
675+
676+
# Static instruction should be added to system instructions, not contents
677+
assert len(llm_request.contents) == 0
678+
assert llm_request.config.system_instruction == "Static instruction as string"
679+
680+
681+
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
682+
@pytest.mark.asyncio
683+
async def test_static_instruction_part_converted_to_system(llm_backend):
684+
"""Test that Part static instructions are converted and added to system_instruction."""
685+
static_part = types.Part(text="Static instruction from Part")
686+
agent = LlmAgent(name="test_agent", static_instruction=static_part)
687+
688+
invocation_context = await _create_invocation_context(agent)
689+
llm_request = LlmRequest()
690+
691+
# Run the instruction processor
692+
async for _ in request_processor.run_async(invocation_context, llm_request):
693+
pass
694+
695+
# Part should be converted to Content and text extracted to system instruction
696+
assert llm_request.config.system_instruction == "Static instruction from Part"
697+
698+
699+
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
700+
@pytest.mark.asyncio
701+
async def test_static_instruction_list_of_parts_converted_to_system(
702+
llm_backend,
703+
):
704+
"""Test that list of Parts is converted and added to system_instruction."""
705+
static_parts_list = [
706+
types.Part(text="First part"),
707+
types.Part(text="Second part"),
708+
]
709+
agent = LlmAgent(name="test_agent", static_instruction=static_parts_list)
710+
711+
invocation_context = await _create_invocation_context(agent)
712+
llm_request = LlmRequest()
713+
714+
# Run the instruction processor
715+
async for _ in request_processor.run_async(invocation_context, llm_request):
716+
pass
717+
718+
# List of parts should be converted to Content with text extracted
719+
assert llm_request.config.system_instruction == "First part\n\nSecond part"
720+
721+
722+
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
723+
@pytest.mark.asyncio
724+
async def test_static_instruction_list_of_strings_converted_to_system(
725+
llm_backend,
726+
):
727+
"""Test that list of strings is converted and added to system_instruction."""
728+
static_strings_list = ["First instruction", "Second instruction"]
729+
agent = LlmAgent(name="test_agent", static_instruction=static_strings_list)
730+
731+
invocation_context = await _create_invocation_context(agent)
732+
llm_request = LlmRequest()
733+
734+
# Run the instruction processor
735+
async for _ in request_processor.run_async(invocation_context, llm_request):
736+
pass
737+
738+
# List of strings should be converted to Content with text extracted
739+
assert (
740+
llm_request.config.system_instruction
741+
== "First instruction\n\nSecond instruction"
742+
)
743+
744+
610745
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
611746
@pytest.mark.asyncio
612747
async def test_dynamic_instruction_without_static_goes_to_system(llm_backend):
@@ -658,6 +793,36 @@ async def test_dynamic_instruction_with_static_not_in_system(llm_backend):
658793
assert llm_request.contents[0].parts[0].text == "Dynamic instruction content"
659794

660795

796+
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
797+
@pytest.mark.asyncio
798+
async def test_dynamic_instruction_with_string_static_not_in_system(
799+
llm_backend,
800+
):
801+
"""Test that dynamic instructions go to user content when string static_instruction exists."""
802+
agent = LlmAgent(
803+
name="test_agent",
804+
instruction="Dynamic instruction content",
805+
static_instruction="Static instruction as string",
806+
)
807+
808+
invocation_context = await _create_invocation_context(agent)
809+
810+
llm_request = LlmRequest()
811+
812+
# Run the instruction processor
813+
async for _ in request_processor.run_async(invocation_context, llm_request):
814+
pass
815+
816+
# Static instruction should be in system instructions
817+
assert llm_request.config.system_instruction == "Static instruction as string"
818+
819+
# Dynamic instruction should be added as user content
820+
assert len(llm_request.contents) == 1
821+
assert llm_request.contents[0].role == "user"
822+
assert len(llm_request.contents[0].parts) == 1
823+
assert llm_request.contents[0].parts[0].text == "Dynamic instruction content"
824+
825+
661826
@pytest.mark.parametrize("llm_backend", ["GOOGLE_AI", "VERTEX"])
662827
@pytest.mark.asyncio
663828
async def test_dynamic_instructions_added_to_user_content(llm_backend):

0 commit comments

Comments
 (0)