playing with gridsearchCV and base estimator

This commit is contained in:
Tanushree Tunstall 2022-03-17 18:19:43 +00:00
parent 458a933d73
commit 5138036d8b
2 changed files with 218 additions and 0 deletions

112
hyperparams.py Normal file
View file

@ -0,0 +1,112 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 16:55:06 2022
@author: tanu
"""
# https://stackoverflow.com/questions/57248072/gridsearchcv-gives-different-result
mcc_score_fn = {'mcc': make_scorer(matthews_corrcoef)}
scoring_refit = {'scoring': 'recall'
,'refit': 'recall'}
scoring_refit = {'scoring': mcc_score_fn
,'refit': 'mcc'}
#n_jobs = 10 # my desktop has 12 cores
njobs = {'n_jobs': 10}
skf_cv = StratifiedKFold(n_splits=10,shuffle = True)
#cv = {'cv': 10}
gs_dt = GridSearchCV(estimator=DecisionTreeClassifier(**rs
#,class_weight = {1:10, 0:1}
),
param_grid=[{'max_depth': [ 2, 4, 6, 8, 10]
, 'criterion':['gini','entropy']
, "max_features":["auto", None]
, "max_leaf_nodes":[10,20,30,40]}]
#, **cv
, cv = skf_cv
, **scoring_refit)
#, scoring = mcc_score_fn
#, refit = 'mcc')
#gs_dt.fit(X_train, y_train)
#gs_dt_fit = gs_dt.fit(X_train, y_train)
gs_dt.fit(num_df_wtgt[numerical_FN]
, num_df_wtgt['mutation_class'])
gs_dt_fit = gs_dt.fit(num_df_wtgt[numerical_FN]
, num_df_wtgt['mutation_class'])
gs_dt_fit_res = gs_dt_fit.cv_results_
print('Best model:\n', gs_dt.best_params_)
print('Best models score:\n', gs_dt.best_score_)
print('Check best models score:\n', mean(gs_dt_fit_res['mean_test_mcc']))
#%% Check the scores:
# https://stackoverflow.com/questions/44947574/what-is-the-meaning-of-mean-test-score-in-cv-result
print([(len(train), len(test)) for train, test in skf_cv.split(num_df_wtgt[numerical_FN]
, num_df_wtgt['mutation_class'])])
gs_dt_fit.cv_results_
#%%
gs_rf = GridSearchCV(estimator=RandomForestClassifier(n_jobs=-1, oob_score = True
,class_weight = {1: 10/11, 0: 1/11})
, param_grid=[{'max_depth': [4, 6, 8, 10, 12, 16, 20, None]
, 'max_features': ['auto', 'sqrt']
, 'min_samples_leaf': [2, 4, 8]
, 'min_samples_split': [10, 20]}]
, **cv
, **njobs
#, **scoring_refit
, scoring = scoring_fn
#, refit = T
)
gs_rf.fit(X_train, y_train)
#gs_rf_fit = gs_rf.fit(X_train, y_train)
#res_rf = res_dt_fit.cv_results_
print('Best model:\n', gs_rf.best_params_)
print('Best models score:\n', gs_rf.best_score_)
print('Check best models score:\n', mean(res_rf['mean_test_score']))
#%%
gs_lr = GridSearchCV(estimator=LogisticRegression(multi_class='ovr',random_state=42,class_weight={1:10, 0:1}),
param_grid=[{'C': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1 ,1],
'penalty':['l1','l2']}],
scoring=scoring,
cv=10,
refit='recall')
gs_gb = GridSearchCV(estimator=XGBClassifier(n_jobs=-1),
param_grid=[{'learning_rate': [0.01, 0.05, 0.1, 0.2],
'max_depth': [4, 6, 8, 10, 12, 16, 20],
'min_samples_leaf': [4, 8, 12, 16, 20],
'max_features': ['auto', 'sqrt']}],
scoring=scoring,
cv=10,
n_jobs=4,
refit='recall')
#%%
#%%
gs_mlp = MLPClassifier(max_iter=600)
clf = GridSearchCV(gs_mlp, parameter_space, n_jobs= 10, cv = 10)
#clf.fit(X_train, y_train.values.ravel())
parameter_space = {
'hidden_layer_sizes': [(1), (2), (3)],
}
print('Best parameters found:\n', clf.best_params_)
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))