added UQ scripts to do hyperparam ML models
This commit is contained in:
parent
4dbc90ad44
commit
ee163d3978
3 changed files with 634 additions and 0 deletions
140
UQ_RF.py
Normal file
140
UQ_RF.py
Normal file
|
@ -0,0 +1,140 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Wed May 18 06:03:24 2022
|
||||
|
||||
@author: tanu
|
||||
"""
|
||||
#%% RandomForest + hyperparam: BaseEstimator: ClfSwitcher()
|
||||
class ClfSwitcher(BaseEstimator):
|
||||
def __init__(
|
||||
self,
|
||||
estimator = SGDClassifier(),
|
||||
):
|
||||
"""
|
||||
A Custom BaseEstimator that can switch between classifiers.
|
||||
:param estimator: sklearn object - The classifier
|
||||
"""
|
||||
self.estimator = estimator
|
||||
|
||||
def fit(self, X, y=None, **kwargs):
|
||||
self.estimator.fit(X, y)
|
||||
return self
|
||||
|
||||
def predict(self, X, y=None):
|
||||
return self.estimator.predict(X)
|
||||
|
||||
def predict_proba(self, X):
|
||||
return self.estimator.predict_proba(X)
|
||||
|
||||
def score(self, X, y):
|
||||
return self.estimator.score(X, y)
|
||||
|
||||
parameters = [
|
||||
{
|
||||
'clf__estimator': [RandomForestClassifier(**rs
|
||||
, **njobs
|
||||
, bootstrap = True
|
||||
, oob_score = True)],
|
||||
'clf__estimator__max_depth': [4, 6, 8, 10, 12, 16, 20, None]
|
||||
, 'clf__estimator__class_weight':['balanced','balanced_subsample']
|
||||
, 'clf__estimator__n_estimators': [10, 25, 50, 100]
|
||||
, 'clf__estimator__criterion': ['gini', 'entropy']#, 'log_loss']
|
||||
#, 'clf__estimator__max_features': ['auto', 'sqrt']
|
||||
, 'clf__estimator__min_samples_leaf': [1, 2, 3, 4, 5, 10]
|
||||
, 'clf__estimator__min_samples_split': [2, 5, 15, 20]
|
||||
}
|
||||
]
|
||||
|
||||
# Create pipeline
|
||||
pipeline = Pipeline([
|
||||
('pre', MinMaxScaler()),
|
||||
('clf', ClfSwitcher()),
|
||||
])
|
||||
|
||||
# Grid search i.e hyperparameter tuning and refitting on mcc
|
||||
gscv_rf = GridSearchCV(pipeline
|
||||
, parameters
|
||||
#, scoring = 'f1', refit = 'f1'
|
||||
, scoring = mcc_score_fn, refit = 'mcc'
|
||||
, cv = skf_cv
|
||||
, **njobs
|
||||
, return_train_score = False
|
||||
, verbose = 3)
|
||||
|
||||
# Fit
|
||||
gscv_rf_fit = gscv_rf.fit(X, y)
|
||||
|
||||
gscv_rf_fit_be_mod = gscv_rf_fit.best_params_
|
||||
gscv_rf_fit_be_res = gscv_rf_fit.cv_results_
|
||||
|
||||
print('Best model:\n', gscv_rf_fit_be_mod)
|
||||
print('Best models score:\n', gscv_rf_fit.best_score_, ':' , round(gscv_rf_fit.best_score_, 2))
|
||||
|
||||
print('\nMean test score from fit results:', round(mean(gscv_rf_fit_be_re['mean_test_mcc']),2))
|
||||
print('\nMean test score from fit results:', round(np.nanmean(gscv_rf_fit_be_res['mean_test_mcc']),2))
|
||||
|
||||
######################################
|
||||
# Blind test
|
||||
######################################
|
||||
|
||||
# See how it does on the BLIND test
|
||||
#print('\nBlind test score, mcc:', )
|
||||
|
||||
test_predict = gscv_rf_fit.predict(X_bts)
|
||||
print(test_predict)
|
||||
print(np.array(y_bts))
|
||||
y_btsf = np.array(y_bts)
|
||||
|
||||
print(accuracy_score(y_btsf, test_predict))
|
||||
print(matthews_corrcoef(y_btsf, test_predict))
|
||||
|
||||
# create a dict with all scores
|
||||
rf_bts_dict = {#'best_model': list(gscv_rf_fit_be_mod.items())
|
||||
'bts_fscore' : None
|
||||
, 'bts_mcc' : None
|
||||
, 'bts_precision': None
|
||||
, 'bts_recall' : None
|
||||
, 'bts_accuracy' : None
|
||||
, 'bts_roc_auc' : None
|
||||
, 'bts_jaccard' : None }
|
||||
rf_bts_dict
|
||||
rf_bts_dict['bts_fscore'] = round(f1_score(y_bts, test_predict),2)
|
||||
rf_bts_dict['bts_mcc'] = round(matthews_corrcoef(y_bts, test_predict),2)
|
||||
rf_bts_dict['bts_precision'] = round(precision_score(y_bts, test_predict),2)
|
||||
rf_bts_dict['bts_recall'] = round(recall_score(y_bts, test_predict),2)
|
||||
rf_bts_dict['bts_accuracy'] = round(accuracy_score(y_bts, test_predict),2)
|
||||
rf_bts_dict['bts_roc_auc'] = round(roc_auc_score(y_bts, test_predict),2)
|
||||
rf_bts_dict['bts_jaccard'] = round(jaccard_score(y_bts, test_predict),2)
|
||||
rf_bts_dict
|
||||
|
||||
# Create a df from dict with all scores
|
||||
pd.DataFrame.from_dict(rf_bts_dict, orient = 'index', columns = 'best_model')
|
||||
|
||||
rf_bts_df = pd.DataFrame.from_dict(rf_bts_dict,orient = 'index')
|
||||
rf_bts_df.columns = ['Logistic_Regression']
|
||||
print(rf_bts_df)
|
||||
|
||||
# Create df with best model params
|
||||
model_params = pd.Series(['best_model_params', list(gscv_rf_fit_be_mod.items() )])
|
||||
model_params_df = model_params.to_frame()
|
||||
model_params_df
|
||||
model_params_df.columns = ['Logistic_Regression']
|
||||
model_params_df.columns
|
||||
|
||||
# Combine the df of scores and the best model params
|
||||
rf_bts_df.columns
|
||||
rf_output = pd.concat([model_params_df, rf_bts_df], axis = 0)
|
||||
rf_output
|
||||
|
||||
# Format the combined df
|
||||
# Drop the best_model_params row from rf_output
|
||||
rf_df = rf_output.drop([0], axis = 0)
|
||||
rf_df
|
||||
|
||||
#FIXME: tidy the index of the formatted df
|
||||
|
||||
###############################################################################
|
||||
|
||||
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue