Skip to content

Commit 0be7e7d

Browse files
committed
Revert "Support passing prompt_label to langfuse (#11018)"
This reverts commit 2b50b43.
1 parent 89daa1d commit 0be7e7d

31 files changed

+87
-116
lines changed

litellm/integrations/anthropic_cache_control_hook.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@ def get_chat_completion_prompt(
2828
prompt_id: Optional[str],
2929
prompt_variables: Optional[dict],
3030
dynamic_callback_params: StandardCallbackDynamicParams,
31-
prompt_label: Optional[str] = None,
3231
) -> Tuple[str, List[AllMessageValues], dict]:
3332
"""
3433
Apply cache control directives based on specified injection points.
@@ -80,10 +79,10 @@ def _process_message_injection(
8079
# Case 1: Target by specific index
8180
if targetted_index is not None:
8281
if 0 <= targetted_index < len(messages):
83-
messages[
84-
targetted_index
85-
] = AnthropicCacheControlHook._safe_insert_cache_control_in_message(
86-
messages[targetted_index], control
82+
messages[targetted_index] = (
83+
AnthropicCacheControlHook._safe_insert_cache_control_in_message(
84+
messages[targetted_index], control
85+
)
8786
)
8887
# Case 2: Target by role
8988
elif targetted_role is not None:

litellm/integrations/custom_logger.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,6 @@ async def async_get_chat_completion_prompt(
8787
dynamic_callback_params: StandardCallbackDynamicParams,
8888
litellm_logging_obj: LiteLLMLoggingObj,
8989
tools: Optional[List[Dict]] = None,
90-
prompt_label: Optional[str] = None,
9190
) -> Tuple[str, List[AllMessageValues], dict]:
9291
"""
9392
Returns:
@@ -105,7 +104,6 @@ def get_chat_completion_prompt(
105104
prompt_id: Optional[str],
106105
prompt_variables: Optional[dict],
107106
dynamic_callback_params: StandardCallbackDynamicParams,
108-
prompt_label: Optional[str] = None,
109107
) -> Tuple[str, List[AllMessageValues], dict]:
110108
"""
111109
Returns:

litellm/integrations/custom_prompt_management.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ def get_chat_completion_prompt(
1818
prompt_id: Optional[str],
1919
prompt_variables: Optional[dict],
2020
dynamic_callback_params: StandardCallbackDynamicParams,
21-
prompt_label: Optional[str] = None,
2221
) -> Tuple[str, List[AllMessageValues], dict]:
2322
"""
2423
Returns:
@@ -44,7 +43,6 @@ def _compile_prompt_helper(
4443
prompt_id: str,
4544
prompt_variables: Optional[dict],
4645
dynamic_callback_params: StandardCallbackDynamicParams,
47-
prompt_label: Optional[str] = None,
4846
) -> PromptManagementClient:
4947
raise NotImplementedError(
5048
"Custom prompt management does not support compile prompt helper"

litellm/integrations/humanloop.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -155,8 +155,11 @@ def get_chat_completion_prompt(
155155
prompt_id: Optional[str],
156156
prompt_variables: Optional[dict],
157157
dynamic_callback_params: StandardCallbackDynamicParams,
158-
prompt_label: Optional[str] = None,
159-
) -> Tuple[str, List[AllMessageValues], dict,]:
158+
) -> Tuple[
159+
str,
160+
List[AllMessageValues],
161+
dict,
162+
]:
160163
humanloop_api_key = dynamic_callback_params.get(
161164
"humanloop_api_key"
162165
) or get_secret_str("HUMANLOOP_API_KEY")

litellm/integrations/langfuse/langfuse_prompt_management.py

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -130,12 +130,9 @@ def integration_name(self):
130130
return "langfuse"
131131

132132
def _get_prompt_from_id(
133-
self,
134-
langfuse_prompt_id: str,
135-
langfuse_client: LangfuseClass,
136-
prompt_label: Optional[str] = None,
133+
self, langfuse_prompt_id: str, langfuse_client: LangfuseClass
137134
) -> PROMPT_CLIENT:
138-
return langfuse_client.get_prompt(langfuse_prompt_id, label=prompt_label)
135+
return langfuse_client.get_prompt(langfuse_prompt_id)
139136

140137
def _compile_prompt(
141138
self,
@@ -179,16 +176,18 @@ async def async_get_chat_completion_prompt(
179176
dynamic_callback_params: StandardCallbackDynamicParams,
180177
litellm_logging_obj: LiteLLMLoggingObj,
181178
tools: Optional[List[Dict]] = None,
182-
prompt_label: Optional[str] = None,
183-
) -> Tuple[str, List[AllMessageValues], dict,]:
179+
) -> Tuple[
180+
str,
181+
List[AllMessageValues],
182+
dict,
183+
]:
184184
return self.get_chat_completion_prompt(
185185
model,
186186
messages,
187187
non_default_params,
188188
prompt_id,
189189
prompt_variables,
190190
dynamic_callback_params,
191-
prompt_label=prompt_label,
192191
)
193192

194193
def should_run_prompt_management(
@@ -212,7 +211,6 @@ def _compile_prompt_helper(
212211
prompt_id: str,
213212
prompt_variables: Optional[dict],
214213
dynamic_callback_params: StandardCallbackDynamicParams,
215-
prompt_label: Optional[str] = None,
216214
) -> PromptManagementClient:
217215
langfuse_client = langfuse_client_init(
218216
langfuse_public_key=dynamic_callback_params.get("langfuse_public_key"),
@@ -221,9 +219,7 @@ def _compile_prompt_helper(
221219
langfuse_host=dynamic_callback_params.get("langfuse_host"),
222220
)
223221
langfuse_prompt_client = self._get_prompt_from_id(
224-
langfuse_prompt_id=prompt_id,
225-
langfuse_client=langfuse_client,
226-
prompt_label=prompt_label,
222+
langfuse_prompt_id=prompt_id, langfuse_client=langfuse_client
227223
)
228224

229225
## SET PROMPT

litellm/integrations/prompt_management_base.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ def _compile_prompt_helper(
3333
prompt_id: str,
3434
prompt_variables: Optional[dict],
3535
dynamic_callback_params: StandardCallbackDynamicParams,
36-
prompt_label: Optional[str] = None,
3736
) -> PromptManagementClient:
3837
pass
3938

@@ -50,13 +49,11 @@ def compile_prompt(
5049
prompt_variables: Optional[dict],
5150
client_messages: List[AllMessageValues],
5251
dynamic_callback_params: StandardCallbackDynamicParams,
53-
prompt_label: Optional[str] = None,
5452
) -> PromptManagementClient:
5553
compiled_prompt_client = self._compile_prompt_helper(
5654
prompt_id=prompt_id,
5755
prompt_variables=prompt_variables,
5856
dynamic_callback_params=dynamic_callback_params,
59-
prompt_label=prompt_label,
6057
)
6158

6259
try:
@@ -85,7 +82,6 @@ def get_chat_completion_prompt(
8582
prompt_id: Optional[str],
8683
prompt_variables: Optional[dict],
8784
dynamic_callback_params: StandardCallbackDynamicParams,
88-
prompt_label: Optional[str] = None,
8985
) -> Tuple[str, List[AllMessageValues], dict]:
9086
if prompt_id is None:
9187
raise ValueError("prompt_id is required for Prompt Management Base class")
@@ -99,7 +95,6 @@ def get_chat_completion_prompt(
9995
prompt_variables=prompt_variables,
10096
client_messages=messages,
10197
dynamic_callback_params=dynamic_callback_params,
102-
prompt_label=prompt_label,
10398
)
10499

105100
completed_messages = prompt_template["completed_messages"] or messages

litellm/integrations/vector_stores/bedrock_vector_store.py

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ async def async_get_chat_completion_prompt(
7575
dynamic_callback_params: StandardCallbackDynamicParams,
7676
litellm_logging_obj: LiteLLMLoggingObj,
7777
tools: Optional[List[Dict]] = None,
78-
prompt_label: Optional[str] = None,
7978
) -> Tuple[str, List[AllMessageValues], dict]:
8079
"""
8180
Retrieves the context from the Bedrock Knowledge Base and appends it to the messages.
@@ -100,11 +99,10 @@ async def async_get_chat_completion_prompt(
10099
f"Bedrock Knowledge Base Response: {bedrock_kb_response}"
101100
)
102101

103-
(
104-
context_message,
105-
context_string,
106-
) = self.get_chat_completion_message_from_bedrock_kb_response(
107-
bedrock_kb_response
102+
context_message, context_string = (
103+
self.get_chat_completion_message_from_bedrock_kb_response(
104+
bedrock_kb_response
105+
)
108106
)
109107
if context_message is not None:
110108
messages.append(context_message)
@@ -128,9 +126,9 @@ async def async_get_chat_completion_prompt(
128126
)
129127
)
130128

131-
litellm_logging_obj.model_call_details[
132-
"vector_store_request_metadata"
133-
] = vector_store_request_metadata
129+
litellm_logging_obj.model_call_details["vector_store_request_metadata"] = (
130+
vector_store_request_metadata
131+
)
134132

135133
return model, messages, non_default_params
136134

@@ -142,9 +140,9 @@ def transform_bedrock_kb_response_to_vector_store_search_response(
142140
"""
143141
Transform a BedrockKBResponse to a VectorStoreSearchResponse
144142
"""
145-
retrieval_results: Optional[
146-
List[BedrockKBRetrievalResult]
147-
] = bedrock_kb_response.get("retrievalResults", None)
143+
retrieval_results: Optional[List[BedrockKBRetrievalResult]] = (
144+
bedrock_kb_response.get("retrievalResults", None)
145+
)
148146
vector_store_search_response: VectorStoreSearchResponse = (
149147
VectorStoreSearchResponse(search_query=query, data=[])
150148
)

litellm/litellm_core_utils/litellm_logging.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -539,7 +539,6 @@ def get_chat_completion_prompt(
539539
prompt_id: Optional[str],
540540
prompt_variables: Optional[dict],
541541
prompt_management_logger: Optional[CustomLogger] = None,
542-
prompt_label: Optional[str] = None,
543542
) -> Tuple[str, List[AllMessageValues], dict]:
544543
custom_logger = (
545544
prompt_management_logger
@@ -560,7 +559,6 @@ def get_chat_completion_prompt(
560559
prompt_id=prompt_id,
561560
prompt_variables=prompt_variables,
562561
dynamic_callback_params=self.standard_callback_dynamic_params,
563-
prompt_label=prompt_label,
564562
)
565563
self.messages = messages
566564
return model, messages, non_default_params
@@ -574,7 +572,6 @@ async def async_get_chat_completion_prompt(
574572
prompt_variables: Optional[dict],
575573
prompt_management_logger: Optional[CustomLogger] = None,
576574
tools: Optional[List[Dict]] = None,
577-
prompt_label: Optional[str] = None,
578575
) -> Tuple[str, List[AllMessageValues], dict]:
579576
custom_logger = (
580577
prompt_management_logger
@@ -597,7 +594,6 @@ async def async_get_chat_completion_prompt(
597594
dynamic_callback_params=self.standard_callback_dynamic_params,
598595
litellm_logging_obj=self,
599596
tools=tools,
600-
prompt_label=prompt_label,
601597
)
602598
self.messages = messages
603599
return model, messages, non_default_params

litellm/main.py

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,6 @@
9797
get_optional_params_image_gen,
9898
get_optional_params_transcription,
9999
get_secret,
100-
get_standard_openai_params,
101100
mock_completion_streaming_obj,
102101
read_config_args,
103102
supports_httpx_timeout,
@@ -429,7 +428,6 @@ async def acompletion(
429428
prompt_id=kwargs.get("prompt_id", None),
430429
prompt_variables=kwargs.get("prompt_variables", None),
431430
tools=tools,
432-
prompt_label=kwargs.get("prompt_label", None),
433431
)
434432

435433
#########################################################
@@ -985,7 +983,6 @@ def completion( # type: ignore # noqa: PLR0915
985983
assistant_continue_message=assistant_continue_message,
986984
)
987985
######## end of unpacking kwargs ###########
988-
standard_openai_params = get_standard_openai_params(params=args)
989986
non_default_params = get_non_default_completion_params(kwargs=kwargs)
990987
litellm_params = {} # used to prevent unbound var errors
991988
## PROMPT MANAGEMENT HOOKS ##
@@ -1004,7 +1001,6 @@ def completion( # type: ignore # noqa: PLR0915
10041001
non_default_params=non_default_params,
10051002
prompt_id=prompt_id,
10061003
prompt_variables=prompt_variables,
1007-
prompt_label=kwargs.get("prompt_label", None),
10081004
)
10091005

10101006
try:
@@ -1238,13 +1234,10 @@ def completion( # type: ignore # noqa: PLR0915
12381234
max_retries=max_retries,
12391235
timeout=timeout,
12401236
)
1241-
cast(LiteLLMLoggingObj, logging).update_environment_variables(
1237+
logging.update_environment_variables(
12421238
model=model,
12431239
user=user,
1244-
optional_params={
1245-
**standard_openai_params,
1246-
**non_default_params,
1247-
}, # [IMPORTANT] - using standard_openai_params ensures consistent params logged to langfuse for finetuning / eval datasets.
1240+
optional_params=optional_params,
12481241
litellm_params=litellm_params,
12491242
custom_llm_provider=custom_llm_provider,
12501243
)

litellm/proxy/_new_secret_config.yaml

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
model_list:
2-
- model_name: "gemini-2.0-flash-gemini"
2+
- model_name: "gemini-2.0-flash"
33
litellm_params:
4-
model: gemini/gemini-2.0-flash
5-
- model_name: "gpt-4o-mini-openai"
4+
model: gemini/gemini-2.0-flash-live-001
5+
- model_name: "gpt-4.1-openai"
66
litellm_params:
77
model: gpt-4.1-mini-2025-04-14
88
api_key: os.environ/OPENAI_API_KEY
@@ -71,16 +71,6 @@ model_list:
7171
model: mistral/*
7272
api_key: os.environ/MISTRAL_API_KEY
7373
access_groups: ["beta-models"]
74-
- model_name: my-langfuse-model
75-
litellm_params:
76-
model: langfuse/gpt-3.5-turbo
77-
prompt_id: "jokes"
78-
prompt_label: "latest"
79-
api_key: os.environ/OPENAI_API_KEY
8074

8175
litellm_settings:
82-
callbacks: ["langfuse"]
83-
84-
general_settings:
85-
store_model_in_db: true
86-
store_prompts_in_spend_logs: true
76+
cache: true

0 commit comments

Comments
 (0)