added and ran hyperparam script for all different classifiers, but couldn't successfully run the feature selection and hyperparam together
This commit is contained in:
parent
74af5ef890
commit
37bda41f44
18 changed files with 131 additions and 142 deletions
|
@ -5,7 +5,18 @@ Created on Wed May 18 06:03:24 2022
|
|||
|
||||
@author: tanu
|
||||
"""
|
||||
#%% RandomForest + hyperparam: BaseEstimator: ClfSwitcher()
|
||||
|
||||
#%%
|
||||
#https://www.datatechnotes.com/2019/07/classification-example-with.html
|
||||
# XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
|
||||
# colsample_bynode=1, colsample_bytree=1, gamma=0, learning_rate=0.1,
|
||||
# max_delta_step=0, max_depth=3, min_child_weight=1, missing=None,
|
||||
# n_estimators=100, n_jobs=1, nthread=None,
|
||||
# objective='multi:softprob', random_state=0, reg_alpha=0,
|
||||
# reg_lambda=1, scale_pos_weight=1, seed=None, silent=None,
|
||||
# subsample=1, verbosity=1)
|
||||
|
||||
#%% XGBoost + hyperparam: BaseEstimator: ClfSwitcher()
|
||||
class ClfSwitcher(BaseEstimator):
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -32,12 +43,11 @@ class ClfSwitcher(BaseEstimator):
|
|||
|
||||
parameters = [
|
||||
{
|
||||
'clf__estimator': [XGBClassifier(**rs
|
||||
, **njobs]
|
||||
'clf__estimator': [XGBClassifier(**rs , **njobs, verbose = 3)]
|
||||
, 'clf__estimator__learning_rate': [0.01, 0.05, 0.1, 0.2]
|
||||
, 'clf__estimator__max_depth': [4, 6, 8, 10, 12, 16, 20]
|
||||
, 'clf__estimator__min_samples_leaf': [4, 8, 12, 16, 20]
|
||||
, 'clf__estimator__max_features': ['auto', 'sqrt']
|
||||
#, 'clf__estimator__min_samples_leaf': [4, 8, 12, 16, 20]
|
||||
#, 'clf__estimator__max_features': ['auto', 'sqrt']
|
||||
}
|
||||
]
|
||||
|
||||
|
@ -66,7 +76,7 @@ gscv_xgb_fit_be_res = gscv_xgb_fit.cv_results_
|
|||
print('Best model:\n', gscv_xgb_fit_be_mod)
|
||||
print('Best models score:\n', gscv_xgb_fit.best_score_, ':' , round(gscv_xgb_fit.best_score_, 2))
|
||||
|
||||
print('\nMean test score from fit results:', round(mean(gscv_xgb_fit_be_re['mean_test_mcc']),2))
|
||||
print('\nMean test score from fit results:', round(mean(gscv_xgb_fit_be_res['mean_test_mcc']),2))
|
||||
print('\nMean test score from fit results:', round(np.nanmean(gscv_xgb_fit_be_res['mean_test_mcc']),2))
|
||||
|
||||
######################################
|
||||
|
@ -104,17 +114,15 @@ xgb_bts_dict['bts_jaccard'] = round(jaccard_score(y_bts, test_predict),2)
|
|||
xgb_bts_dict
|
||||
|
||||
# Create a df from dict with all scores
|
||||
pd.DataFrame.from_dict(xgb_bts_dict, orient = 'index', columns = 'best_model')
|
||||
|
||||
xgb_bts_df = pd.DataFrame.from_dict(xgb_bts_dict,orient = 'index')
|
||||
xgb_bts_df.columns = ['Logistic_Regression']
|
||||
xgb_bts_df.columns = ['XGBoost']
|
||||
print(xgb_bts_df)
|
||||
|
||||
# Create df with best model params
|
||||
model_params = pd.Series(['best_model_params', list(gscv_xgb_fit_be_mod.items() )])
|
||||
model_params_df = model_params.to_frame()
|
||||
model_params_df
|
||||
model_params_df.columns = ['Logistic_Regression']
|
||||
model_params_df.columns = ['XGBoost']
|
||||
model_params_df.columns
|
||||
|
||||
# Combine the df of scores and the best model params
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue