ML_AI_training/uq_ml_models/UQ_XGB.py

143 lines
4.6 KiB
Python

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 18 06:03:24 2022
@author: tanu
"""
#%%
#https://www.datatechnotes.com/2019/07/classification-example-with.html
# XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
# colsample_bynode=1, colsample_bytree=1, gamma=0, learning_rate=0.1,
# max_delta_step=0, max_depth=3, min_child_weight=1, missing=None,
# n_estimators=100, n_jobs=1, nthread=None,
# objective='multi:softprob', random_state=0, reg_alpha=0,
# reg_lambda=1, scale_pos_weight=1, seed=None, silent=None,
# subsample=1, verbosity=1)
#%% XGBoost + hyperparam: BaseEstimator: ClfSwitcher()
class ClfSwitcher(BaseEstimator):
def __init__(
self,
estimator = SGDClassifier(),
):
"""
A Custom BaseEstimator that can switch between classifiers.
:param estimator: sklearn object - The classifier
"""
self.estimator = estimator
def fit(self, X, y=None, **kwargs):
self.estimator.fit(X, y)
return self
def predict(self, X, y=None):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
def score(self, X, y):
return self.estimator.score(X, y)
parameters = [
{
'clf__estimator': [XGBClassifier(**rs , **njobs, verbose = 3)]
, 'clf__estimator__learning_rate': [0.01, 0.05, 0.1, 0.2]
, 'clf__estimator__max_depth': [4, 6, 8, 10, 12, 16, 20]
#, 'clf__estimator__min_samples_leaf': [4, 8, 12, 16, 20]
#, 'clf__estimator__max_features': ['auto', 'sqrt']
}
]
# Create pipeline
pipeline = Pipeline([
('pre', MinMaxScaler()),
('clf', ClfSwitcher()),
])
# Grid search i.e hyperparameter tuning and refitting on mcc
gscv_xgb = GridSearchCV(pipeline
, parameters
#, scoring = 'f1', refit = 'f1'
, scoring = mcc_score_fn, refit = 'mcc'
, cv = skf_cv
, **njobs
, return_train_score = False
, verbose = 3)
# Fit
gscv_xgb_fit = gscv_xgb.fit(X, y)
gscv_xgb_fit_be_mod = gscv_xgb_fit.best_params_
gscv_xgb_fit_be_res = gscv_xgb_fit.cv_results_
print('Best model:\n', gscv_xgb_fit_be_mod)
print('Best models score:\n', gscv_xgb_fit.best_score_, ':' , round(gscv_xgb_fit.best_score_, 2))
print('\nMean test score from fit results:', round(mean(gscv_xgb_fit_be_res['mean_test_mcc']),2))
print('\nMean test score from fit results:', round(np.nanmean(gscv_xgb_fit_be_res['mean_test_mcc']),2))
######################################
# Blind test
######################################
# See how it does on the BLIND test
#print('\nBlind test score, mcc:', )
test_predict = gscv_xgb_fit.predict(X_bts)
print(test_predict)
print(np.array(y_bts))
y_btsf = np.array(y_bts)
print(accuracy_score(y_btsf, test_predict))
print(matthews_corrcoef(y_btsf, test_predict))
# create a dict with all scores
xgb_bts_dict = {#'best_model': list(gscv_xgb_fit_be_mod.items())
'bts_fscore' : None
, 'bts_mcc' : None
, 'bts_precision': None
, 'bts_recall' : None
, 'bts_accuracy' : None
, 'bts_roc_auc' : None
, 'bts_jaccard' : None }
xgb_bts_dict
xgb_bts_dict['bts_fscore'] = round(f1_score(y_bts, test_predict),2)
xgb_bts_dict['bts_mcc'] = round(matthews_corrcoef(y_bts, test_predict),2)
xgb_bts_dict['bts_precision'] = round(precision_score(y_bts, test_predict),2)
xgb_bts_dict['bts_recall'] = round(recall_score(y_bts, test_predict),2)
xgb_bts_dict['bts_accuracy'] = round(accuracy_score(y_bts, test_predict),2)
xgb_bts_dict['bts_roc_auc'] = round(roc_auc_score(y_bts, test_predict),2)
xgb_bts_dict['bts_jaccard'] = round(jaccard_score(y_bts, test_predict),2)
xgb_bts_dict
# Create a df from dict with all scores
xgb_bts_df = pd.DataFrame.from_dict(xgb_bts_dict,orient = 'index')
xgb_bts_df.columns = ['XGBoost']
print(xgb_bts_df)
# Create df with best model params
model_params = pd.Series(['best_model_params', list(gscv_xgb_fit_be_mod.items() )])
model_params_df = model_params.to_frame()
model_params_df
model_params_df.columns = ['XGBoost']
model_params_df.columns
# Combine the df of scores and the best model params
xgb_bts_df.columns
xgb_output = pd.concat([model_params_df, xgb_bts_df], axis = 0)
xgb_output
# Format the combined df
# Drop the best_model_params row from xgb_output
xgb_df = xgb_output.drop([0], axis = 0)
xgb_df
#FIXME: tidy the index of the formatted df
###############################################################################