@@ -181,6 +181,7 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
181181 try :
182182 self .model_token = models_tokens ["ollama" ][llm_params ["model" ]]
183183 except KeyError as exc :
184+ print ("model not found, using default token size (8192)" )
184185 self .model_token = 8192
185186 else :
186187 self .model_token = 8192
@@ -191,25 +192,28 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
191192 elif "hugging_face" in llm_params ["model" ]:
192193 try :
193194 self .model_token = models_tokens ["hugging_face" ][llm_params ["model" ]]
194- except KeyError as exc :
195- raise KeyError ("Model not supported" ) from exc
195+ except KeyError :
196+ print ("model not found, using default token size (8192)" )
197+ self .model_token = 8192
196198 return HuggingFace (llm_params )
197199 elif "groq" in llm_params ["model" ]:
198200 llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
199201
200202 try :
201203 self .model_token = models_tokens ["groq" ][llm_params ["model" ]]
202- except KeyError as exc :
203- raise KeyError ("Model not supported" ) from exc
204+ except KeyError :
205+ print ("model not found, using default token size (8192)" )
206+ self .model_token = 8192
204207 return Groq (llm_params )
205208 elif "bedrock" in llm_params ["model" ]:
206209 llm_params ["model" ] = llm_params ["model" ].split ("/" )[- 1 ]
207210 model_id = llm_params ["model" ]
208211 client = llm_params .get ('client' , None )
209212 try :
210213 self .model_token = models_tokens ["bedrock" ][llm_params ["model" ]]
211- except KeyError as exc :
212- raise KeyError ("Model not supported" ) from exc
214+ except KeyError :
215+ print ("model not found, using default token size (8192)" )
216+ self .model_token = 8192
213217 return Bedrock ({
214218 "client" : client ,
215219 "model_id" : model_id ,
@@ -218,13 +222,18 @@ def _create_llm(self, llm_config: dict, chat=False) -> object:
218222 }
219223 })
220224 elif "claude-3-" in llm_params ["model" ]:
221- self .model_token = models_tokens ["claude" ]["claude3" ]
225+ try :
226+ self .model_token = models_tokens ["claude" ]["claude3" ]
227+ except KeyError :
228+ print ("model not found, using default token size (8192)" )
229+ self .model_token = 8192
222230 return Anthropic (llm_params )
223231 elif "deepseek" in llm_params ["model" ]:
224232 try :
225233 self .model_token = models_tokens ["deepseek" ][llm_params ["model" ]]
226- except KeyError as exc :
227- raise KeyError ("Model not supported" ) from exc
234+ except KeyError :
235+ print ("model not found, using default token size (8192)" )
236+ self .model_token = 8192
228237 return DeepSeek (llm_params )
229238 else :
230239 raise ValueError (
@@ -312,10 +321,7 @@ def _create_embedder(self, embedder_config: dict) -> object:
312321 models_tokens ["bedrock" ][embedder_config ["model" ]]
313322 except KeyError as exc :
314323 raise KeyError ("Model not supported" ) from exc
315- return BedrockEmbeddings (client = client , model_id = embedder_config ["model" ])
316- else :
317- raise ValueError (
318- "Model provided by the configuration not supported" )
324+ return BedrockEmbeddings (client = client , model_id = embedder_config ["model" ])
319325
320326 def get_state (self , key = None ) -> dict :
321327 """""
0 commit comments