Skip to content

Commit f159bd9

Browse files
seanzhougooglecopybara-github
authored andcommitted
fix: Use str() to calculate fingerprint instead of json.dumps
This is to avoid serialization issue for some fields that are not json serializable. meanwhile restructure the debug logs in context cache manager for better debugging potential issues. PiperOrigin-RevId: 811182492
1 parent d486795 commit f159bd9

File tree

1 file changed

+54
-25
lines changed

1 file changed

+54
-25
lines changed

src/google/adk/models/gemini_context_cache_manager.py

Lines changed: 54 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,19 @@ async def handle_context_caching(
6868
"""
6969
# Check if we have existing cache metadata and if it's valid
7070
if llm_request.cache_metadata:
71+
logger.debug(
72+
"Found existing cache metadata: cache_name=%s, invocations_used=%d,"
73+
" cached_contents_count=%d",
74+
llm_request.cache_metadata.cache_name,
75+
llm_request.cache_metadata.invocations_used,
76+
llm_request.cache_metadata.cached_contents_count,
77+
)
7178
if await self._is_cache_valid(llm_request):
7279
# Valid cache found - use it
80+
logger.debug(
81+
"Cache is valid, reusing cache: %s",
82+
llm_request.cache_metadata.cache_name,
83+
)
7384
cache_name = llm_request.cache_metadata.cache_name
7485
cache_contents_count = llm_request.cache_metadata.cached_contents_count
7586
self._apply_cache_to_request(
@@ -78,13 +89,22 @@ async def handle_context_caching(
7889
return llm_request.cache_metadata.model_copy()
7990
else:
8091
# Invalid cache - clean it up
92+
logger.debug(
93+
"Cache is invalid, cleaning up: %s",
94+
llm_request.cache_metadata.cache_name,
95+
)
8196
await self.cleanup_cache(llm_request.cache_metadata.cache_name)
8297
llm_request.cache_metadata = None
8398

8499
# Find contents to cache for new cache creation
85100
cache_contents_count = self._find_count_of_contents_to_cache(
86101
llm_request.contents
87102
)
103+
logger.debug(
104+
"Determined to cache %d contents from %d total contents",
105+
cache_contents_count,
106+
len(llm_request.contents),
107+
)
88108

89109
# Create new cache with the determined contents
90110
cache_metadata = await self._create_new_cache_with_contents(
@@ -97,6 +117,9 @@ async def handle_context_caching(
97117
self._apply_cache_to_request(
98118
llm_request, cache_metadata.cache_name, cache_contents_count
99119
)
120+
logger.debug(
121+
"Successfully applied cache to request: %s", cache_metadata.cache_name
122+
)
100123

101124
return cache_metadata
102125

@@ -218,8 +241,8 @@ def _generate_cache_fingerprint(
218241
contents_data.append(content.model_dump())
219242
fingerprint_data["cached_contents"] = contents_data
220243

221-
# Generate hash
222-
fingerprint_str = json.dumps(fingerprint_data, sort_keys=True)
244+
# Generate hash using str() instead of json.dumps() to handle bytes
245+
fingerprint_str = str(fingerprint_data)
223246
return hashlib.sha256(fingerprint_str.encode()).hexdigest()[:16]
224247

225248
async def _create_new_cache_with_contents(
@@ -310,6 +333,10 @@ async def _create_gemini_cache(
310333
# Add system instruction if present
311334
if llm_request.config and llm_request.config.system_instruction:
312335
cache_config.system_instruction = llm_request.config.system_instruction
336+
logger.debug(
337+
"Added system instruction to cache config (length=%d)",
338+
len(llm_request.config.system_instruction),
339+
)
313340

314341
# Add tools if present
315342
if llm_request.config and llm_request.config.tools:
@@ -319,36 +346,38 @@ async def _create_gemini_cache(
319346
if llm_request.config and llm_request.config.tool_config:
320347
cache_config.tool_config = llm_request.config.tool_config
321348

322-
try:
323-
cached_content = await self.genai_client.aio.caches.create(
324-
model=llm_request.model,
325-
config=cache_config,
326-
)
327-
# Set precise creation timestamp right after cache creation
328-
created_at = time.time()
329-
logger.info("Cache created successfully: %s", cached_content.name)
330-
331-
# Return complete cache metadata with precise timing
332-
return CacheMetadata(
333-
cache_name=cached_content.name,
334-
expire_time=created_at + llm_request.cache_config.ttl_seconds,
335-
fingerprint=self._generate_cache_fingerprint(
336-
llm_request, cache_contents_count
337-
),
338-
invocations_used=1,
339-
cached_contents_count=cache_contents_count,
340-
created_at=created_at,
341-
)
342-
except Exception as e:
343-
logger.error("Failed to create Gemini cache: %s", e)
344-
raise
349+
logger.debug(
350+
"Creating cache with model %s and config: %s",
351+
llm_request.model,
352+
cache_config,
353+
)
354+
cached_content = await self.genai_client.aio.caches.create(
355+
model=llm_request.model,
356+
config=cache_config,
357+
)
358+
# Set precise creation timestamp right after cache creation
359+
created_at = time.time()
360+
logger.info("Cache created successfully: %s", cached_content.name)
361+
362+
# Return complete cache metadata with precise timing
363+
return CacheMetadata(
364+
cache_name=cached_content.name,
365+
expire_time=created_at + llm_request.cache_config.ttl_seconds,
366+
fingerprint=self._generate_cache_fingerprint(
367+
llm_request, cache_contents_count
368+
),
369+
invocations_used=1,
370+
cached_contents_count=cache_contents_count,
371+
created_at=created_at,
372+
)
345373

346374
async def cleanup_cache(self, cache_name: str) -> None:
347375
"""Clean up cache by deleting it.
348376
349377
Args:
350378
cache_name: Name of cache to delete
351379
"""
380+
logger.debug("Attempting to delete cache: %s", cache_name)
352381
try:
353382
await self.genai_client.aio.caches.delete(name=cache_name)
354383
logger.info("Cache cleaned up: %s", cache_name)

0 commit comments

Comments
 (0)