ML_AI_training/gscv.py

208 lines
8.2 KiB
Python
Executable file

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 16:55:06 2022
@author: tanu
"""
# https://stackoverflow.com/questions/57248072/gridsearchcv-gives-different-result
mcc_score_fn = {'mcc': make_scorer(matthews_corrcoef)}
scoring_refit = {'scoring': 'recall'
,'refit': 'recall'}
scoring_refit = {'scoring': mcc_score_fn
,'refit': 'mcc'}
#n_jobs = 10 # my desktop has 12 cores
njobs = {'n_jobs': 10}
skf_cv = StratifiedKFold(n_splits = 10, shuffle = True)
#cv = {'cv': 10}
gs_dt = GridSearchCV(estimator=DecisionTreeClassifier(**rs
#,class_weight = {1:10, 0:1}
),
param_grid=[{'max_depth': [ 2, 4, 6, 8, 10]
, 'criterion':['gini','entropy']
, "max_features":["auto", None]
, "max_leaf_nodes":[10,20,30,40]}]
#, **cv
, cv = skf_cv
, **scoring_refit)
#, scoring = mcc_score_fn
#, refit = 'mcc')
#gs_dt.fit(X_train, y_train)
#gs_dt_fit = gs_dt.fit(X_train, y_train)
gs_dt.fit(num_df_wtgt[numerical_FN]
, num_df_wtgt['mutation_class'])
gs_dt_fit = gs_dt.fit(num_df_wtgt[numerical_FN]
, num_df_wtgt['mutation_class'])
gs_dt_fit_res = gs_dt_fit.cv_results_
print('Best model:\n' , gs_dt.best_params_)
print('Best models score:\n' , gs_dt.best_score_)
print('Check best models score:\n', mean(gs_dt_fit_res['mean_test_mcc']))
#%% Check the scores:
# https://stackoverflow.com/questions/44947574/what-is-the-meaning-of-mean-test-score-in-cv-result
print([(len(train), len(test)) for train, test in skf_cv.split(num_df_wtgt[numerical_FN]
, num_df_wtgt['mutation_class'])])
gs_dt_fit.cv_results_
#%%
gs_rf = GridSearchCV(estimator=RandomForestClassifier(n_jobs=-1, oob_score = True
,class_weight = {1: 10/11, 0: 1/11})
, param_grid=[{'max_depth': [4, 6, 8, 10, 12, 16, 20, None]
, 'max_features': ['auto', 'sqrt']
, 'min_samples_leaf': [2, 4, 8]
, 'min_samples_split': [10, 20]}]
, **cv
, **njobs
#, **scoring_refit
, scoring = scoring_fn
#, refit = T
)
gs_rf.fit(X_train, y_train)
#gs_rf_fit = gs_rf.fit(X_train, y_train)
#res_rf = res_dt_fit.cv_results_
print('Best model:\n', gs_rf.best_params_)
print('Best models score:\n', gs_rf.best_score_)
print('Check best models score:\n', mean(res_rf['mean_test_score']))
#%%
gs_lr = GridSearchCV(estimator=LogisticRegression(multi_class='ovr',random_state=42,class_weight={1:10, 0:1}),
param_grid=[{'C': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1 ,1],
'penalty':['l1','l2']}],
scoring=scoring,
cv=10,
refit='recall')
gs_gb = GridSearchCV(estimator=XGBClassifier(n_jobs=-1),
param_grid=[{'learning_rate': [0.01, 0.05, 0.1, 0.2],
'max_depth': [4, 6, 8, 10, 12, 16, 20],
'min_samples_leaf': [4, 8, 12, 16, 20],
'max_features': ['auto', 'sqrt']}],
scoring=scoring,
cv=10,
n_jobs=4,
refit='recall')
#%%
gs_mlp = MLPClassifier(max_iter=600)
clf = GridSearchCV(gs_mlp, parameter_space, n_jobs= 10, cv = 10)
#clf.fit(X_train, y_train.values.ravel())
parameter_space = {
'hidden_layer_sizes': [(1), (2), (3)],
}
print('Best parameters found:\n', clf.best_params_)
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r" % (mean, std * 2, params))
########################################################################
#%%: Hyperparams with SKF and trying different scoring functions
# https://stackoverflow.com/questions/57248072/gridsearchcv-gives-different-result
# https://stackoverflow.com/questions/44947574/what-is-the-meaning-of-mean-test-score-in-cv-result
#https://stackoverflow.com/questions/47257952/how-to-get-average-score-of-k-fold-cross-validation-with-sklearn
#https://stackoverflow.com/questions/47257952/how-to-get-average-score-of-k-fold-cross-validation-with-sklearn
# If you only want accuracy, then you can simply use cross_val_score()
# kf = KFold(n_splits=10)
# clf_tree=DecisionTreeClassifier()
# scores = cross_val_score(clf_tree, X, y, cv=kf)
# avg_score = np.mean(score_array)
# print(avg_score)
# Here cross_val_score will take as input your original X and y (without splitting into train and test). cross_val_score will automatically split them into train and test, fit the model on train data and score on test data. And those scores will be returned in the scores variable.
# So when you have 10 folds, 10 scores will be returned in scores variable. You can then just take an average of that.
mcc_score_fn = {'mcc': make_scorer(matthews_corrcoef)}
scoring_refit_recall = {'scoring': 'recall'
,'refit': 'recall'}
scoring_refit_recall = {'scoring': 'precision'
,'refit': 'precision'}
scoring_refit_mcc = {'scoring': mcc_score_fn
,'refit': 'mcc'}
#n_jobs = 10 # my desktop has 12 cores
#cv = {'cv': 10}#%%
njobs = {'n_jobs': 10}
skf_cv = StratifiedKFold(n_splits = 10, shuffle = True)
#%% GSCV: RandomForest
gs_rf = GridSearchCV(estimator=RandomForestClassifier(n_jobs=-1, oob_score = True
#,class_weight = {1: 10/11, 0: 1/11}
)
, param_grid=[{'max_depth': [4, 6, 8, 10, None]
, 'max_features': ['auto', 'sqrt']
, 'min_samples_leaf': [2, 4, 8]
, 'min_samples_split': [10, 20]}]
, cv = skf_cv
, **njobs
, **scoring_refit_recall
#, **scoring_refit_mcc
#, scoring = scoring_fn, refit = False
)
#gs_rf.fit(X_train, y_train)
#gs_rf_fit = gs_rf.fit(X_train y_train)
gs_rf.fit(X, y)
gs_rf_fit = gs_rf.fit(X, y)
gs_rf_res = gs_rf_fit.cv_results_
print('Best model:\n', gs_rf.best_params_)
print('Best models score:\n', gs_rf.best_score_)
print('Check mean models score:\n', mean(gs_rf_res['mean_test_score']))
#%% Proof of concept: manual inspection to see how best score is calcualted!
# SATISFIED!
# Best_model example: recall, Best model's score: 0.8059288537549408
# {'max_depth': 4, 'max_features': 'sqrt', 'min_samples_leaf': 2, 'min_samples_split': 10}
# Best model example: mcc, Best models score: 0.42504894661702863
# {'max_depth': 4, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 20}
# Best model example: precision, Best models score: 0.7144745254745255
# {'max_depth': 6, 'max_features': 'sqrt', 'min_samples_leaf': 8, 'min_samples_split': 10}
best_model = [{'max_depth': 6, 'max_features': 'sqrt', 'min_samples_leaf': 8, 'min_samples_split': 10 }]
gs_results_df = pd.DataFrame(gs_rf_res)
gs_results_df.shape
gs_best_df = gs_results_df.loc[gs_results_df['params'].isin(best_model)]
gs_best_df.shape
gs_best_df_test = gs_best_df.filter(like = 'test_', axis = 1)
gs_best_df_test.shape
gs_best_df_test_recall = gs_best_df_test.filter(like = '_score', axis = 1)
gs_best_df_test_recall.shape
f = gs_best_df_test_recall.filter(like='split', axis = 1)
f.shape
#gs_best_df_test_mcc = gs_best_df_test.filter(like = '_mcc', axis = 1)
#f = gs_best_df_test_mcc.filter(like='split', axis = 1)
f.iloc[:,[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]].mean(axis = 1)
# recall: 0.801186 vs 0.8059288537549408
# mcc: 0.425049 vs 0.42504894661702863
# precision: 0.714475 vs 0.7144745254745255
#%%
#%% Check the scores:
print([(len(train), len(test)) for train, test in skf_cv.split(X, y)])
gs_rf_fit.cv_results_
#its the weighted average!?
#%%