62 lines
2.3 KiB
Python
Executable file
62 lines
2.3 KiB
Python
Executable file
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
"""
|
|
Created on Fri Mar 11 11:15:50 2022
|
|
|
|
@author: tanu
|
|
"""
|
|
#%% variables
|
|
# rs = {'random_state': 42}
|
|
|
|
# skf_cv = StratifiedKFold(n_splits = 10
|
|
# #, shuffle = False, random_state= None)
|
|
# , shuffle = True,**rs)
|
|
#%% MultClassPipeSKFLoop: function call()
|
|
t3_res = MultClassPipeSKFLoop(input_df = num_df_wtgt[numerical_FN]
|
|
, target = num_df_wtgt['mutation_class']
|
|
, var_type = 'numerical'
|
|
, sel_cv = skf_cv)
|
|
#, sel_cv = rskf_cv)
|
|
pp.pprint(t3_res)
|
|
#print(t3_res)
|
|
################################################################
|
|
# extract items from wwithin a nested dict
|
|
#%% Classification Metrics we need to mean()
|
|
# classification_metrics = {
|
|
# 'F1_score': []
|
|
# ,'MCC': []
|
|
# ,'Precision': []
|
|
# ,'Recall': []
|
|
# ,'Accuracy': []
|
|
# ,'ROC_AUC':[]
|
|
# }
|
|
# "mean() of the current metric across all folds for this model"
|
|
# the output containing all the metrics across all folds for this model
|
|
out={}
|
|
# Just the mean() for each of the above metrics-per-model
|
|
out_means={}
|
|
|
|
# Build up out{} from t3_res, which came from loopity_loop
|
|
for model in t3_res:
|
|
# NOTE: can't copy objects in Python!!!
|
|
out[model]={'F1_score': [], 'MCC': [], 'Precision': [], 'Recall': [], 'Accuracy': [], 'ROC_AUC':[]}
|
|
out_means[model]={} # just to make life easier
|
|
print(model)
|
|
for fold in t3_res[model]:
|
|
for metric in {'F1_score': [], 'MCC': [], 'Precision': [], 'Recall': [], 'Accuracy': [], 'ROC_AUC':[]}:
|
|
metric_value = t3_res[model][fold][metric]
|
|
out[model][metric].append(metric_value)
|
|
# now that we've built out{}, let's mean() each metric
|
|
for model in out:
|
|
for metric in {'F1_score': [], 'MCC': [], 'Precision': [], 'Recall': [], 'Accuracy': [], 'ROC_AUC':[]}:
|
|
metric_mean = mean(out[model][metric])
|
|
# just some debug output
|
|
# print('model:', model
|
|
# , 'metric: ', metric
|
|
# , metric_mean
|
|
# )
|
|
out[model].update({(metric+'_mean'): metric_mean })
|
|
out_means[model].update({(metric+'_mean'): metric_mean })
|
|
|
|
out_scores = pd.DataFrame(out_means)
|
|
out_scores2 = round(out_scores, 2)
|