2828
2929logging .getLogger ("report_creator" ).setLevel (logging .WARNING )
3030AUTOMLX_N_ALGOS_TUNED  =  4 
31- AUTOMLX_DEFAULT_SCORE_METRIC  =  "neg_sym_mean_abs_percent_error" 
31+ AUTOMLX_DEFAULT_SCORE_METRIC  =  ['neg_sym_mean_abs_percent_error' ,
32+  'neg_mean_abs_percent_error' ,
33+  'neg_root_mean_squared_error' ]
3234
3335
3436class  AutoMLXOperatorModel (ForecastOperatorBaseModel ):
@@ -45,10 +47,13 @@ def set_kwargs(self):
4547 model_kwargs_cleaned ["n_algos_tuned" ] =  model_kwargs_cleaned .get (
4648 "n_algos_tuned" , AUTOMLX_N_ALGOS_TUNED 
4749 )
48-  model_kwargs_cleaned ["score_metric" ] =  AUTOMLX_METRIC_MAP .get (
49-  self .spec .metric ,
50-  model_kwargs_cleaned .get ("score_metric" , AUTOMLX_DEFAULT_SCORE_METRIC ),
51-  )
50+  metric_to_optimize  =  AUTOMLX_METRIC_MAP .get (self .spec .metric )
51+  model_kwargs_cleaned ["score_metric" ] =  AUTOMLX_DEFAULT_SCORE_METRIC 
52+  # The first score metric in the list will be the one for which the pipeline optimizes 
53+  if  metric_to_optimize  is  not None :
54+  model_kwargs_cleaned ["score_metric" ].remove (metric_to_optimize )
55+  model_kwargs_cleaned ["score_metric" ].insert (0 , metric_to_optimize )
56+ 
5257 model_kwargs_cleaned .pop ("task" , None )
5358 time_budget  =  model_kwargs_cleaned .pop ("time_budget" , - 1 )
5459 model_kwargs_cleaned ["preprocessing" ] =  (
@@ -70,7 +75,7 @@ def preprocess(self, data, series_id): # TODO: re-use self.le for explanations
7075 @runtime_dependency ( 
7176 module = "automlx" , 
7277 err_msg = ( 
73-  "Please run `pip3 install oracle-automlx[forecasting]>=25.1.1 ` "  
78+  "Please run `pip3 install oracle-automlx[forecasting]>=25.3.0 ` "  
7479 "to install the required dependencies for automlx."  
7580 ), 
7681 ) 
@@ -163,7 +168,7 @@ def _build_model(self) -> pd.DataFrame:
163168 self .models [s_id ] =  {}
164169 self .models [s_id ]["model" ] =  model 
165170 self .models [s_id ]["le" ] =  self .le [s_id ]
166-  self .models [s_id ]["score" ] =  self .get_validation_score_and_metric (model )
171+  self .models [s_id ]["score" ] =  self .get_all_metrics (model )
167172
168173 # In case of Naive model, model.forecast function call does not return confidence intervals. 
169174 if  f"{ target }   not  in summary_frame :
@@ -518,26 +523,27 @@ def explain_model(self):
518523 )
519524 logger .debug (f"Full Traceback: { traceback .format_exc ()}  )
520525
521-  def  get_validation_score_and_metric (self , model ):
526+  def  get_all_metrics (self , model ):
522527 trials  =  model .completed_trials_summary_ 
523528 model_params  =  model .selected_model_params_ 
524529 if  len (trials ) >  0 :
525-  score_col  =  [col  for  col  in  trials .columns  if  "Score"  in  col ][0 ]
526-  validation_score  =  trials [trials .Hyperparameters  ==  model_params ][
527-  score_col 
530+  all_metrics  =  trials [trials .Hyperparameters  ==  model_params ][
531+  "All Metrics" 
528532 ].iloc [0 ]
529533 else :
530-  validation_score  =  0 
531-  return  - 1  *  validation_score 
534+  all_metrics  =  {}
535+  reverse_map  =  {v : k  for  k , v  in  AUTOMLX_METRIC_MAP .items ()}
536+  all_metrics  =  {reverse_map [key ]: - 1  *  value  for  key , value  in  all_metrics .items () if  key  in  reverse_map }
537+  return  all_metrics 
532538
533539 def  generate_train_metrics (self ) ->  pd .DataFrame :
534540 """ 
535-  Generate Training Metrics when fitted data is not available.  
541+  Generate Training Metrics for Automlx  
536542 """ 
537543 total_metrics  =  pd .DataFrame ()
538544 for  s_id  in  self .forecast_output .list_series_ids ():
539545 try :
540-  metrics  =  { self .spec . metric . upper ():  self . models [s_id ]["score" ]} 
546+  metrics  =  self .models [s_id ]["score" ]
541547 metrics_df  =  pd .DataFrame .from_dict (
542548 metrics , orient = "index" , columns = [s_id ]
543549 )
0 commit comments