added all classification algorithms params for gridsearch

This commit is contained in:
Tanushree Tunstall 2022-03-21 13:51:20 +00:00
parent d012542435
commit 0c4f1e1e5f
8 changed files with 503 additions and 110 deletions

View file

@ -1,106 +1,158 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 16:55:06 2022
Created on Sun Mar 20 13:02:54 2022
@author: tanu
"""
# https://stackoverflow.com/questions/57248072/gridsearchcv-gives-different-result
# https://stackoverflow.com/questions/44947574/what-is-the-meaning-of-mean-test-score-in-cv-result
#https://stackoverflow.com/questions/47257952/how-to-get-average-score-of-k-fold-cross-validation-with-sklearn
# https://machinelearningmastery.com/hyperparameters-for-classification-machine-learning-algorithms/
#%% LogisticRegression
# example of grid searching key hyperparametres for logistic regression
from sklearn.datasets import make_blobs
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
# define dataset
X, y = make_blobs(n_samples=1000, centers=2, n_features=100, cluster_std=20)
# define models and parameters
model = LogisticRegression()
solvers = ['newton-cg', 'lbfgs', 'liblinear']
penalty = ['l2']
c_values = [100, 10, 1.0, 0.1, 0.01]
# define grid search
grid = dict(solver=solvers,penalty=penalty,C=c_values)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
grid_result = grid_search.fit(X, y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
#%% RidgeClassifier
from sklearn.datasets import make_blobs
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import RidgeClassifier
# define dataset
X, y = make_blobs(n_samples=1000, centers=2, n_features=100, cluster_std=20)
# define models and parameters
model = RidgeClassifier()
alpha = [0.9, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.1, 1.0]
# define grid search
grid = dict(alpha=alpha)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
grid_result = grid_search.fit(X, y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# NOTES:
# alpha: If all alphas return the same mean, which do you chose?
# Python seems to chose the first one?
# https://stats.stackexchange.com/questions/166950/alpha-parameter-in-ridge-regression-is-high
# The L2 norm term in ridge regression is weighted by the regularization parameter
# alpha. So, the alpha parameter need not be small. But, for a larger alpha, the
# flexibility of the fit would be very strict.
#https://stackoverflow.com/questions/47257952/how-to-get-average-score-of-k-fold-cross-validation-with-sklearn
# If you only want accuracy, then you can simply use cross_val_score()
# kf = KFold(n_splits=10)
# clf_tree=DecisionTreeClassifier()
# scores = cross_val_score(clf_tree, X, y, cv=kf)
# avg_score = np.mean(score_array)
# print(avg_score)
# Here cross_val_score will take as input your original X and y (without splitting into train and test). cross_val_score will automatically split them into train and test, fit the model on train data and score on test data. And those scores will be returned in the scores variable.
# So when you have 10 folds, 10 scores will be returned in scores variable. You can then just take an average of that.
# So, if the alpha value is 0, it means that it is just an Ordinary Least Squares
# Regression model. So, the larger is the alpha, the higher is the smoothness constraint.
# So, the smaller the value of alpha, the higher would be the magnitude of the coefficients.
mcc_score_fn = {'mcc': make_scorer(matthews_corrcoef)}
# Could be that the model does not fit very well. With a very large alpha,
# the algorithm more or else ignores the IV's and fits a mean. Placidia
# @Placidia, yes I would completely agree with your comment. I was just trying to
# explain the significance of alpha as a parameter (as asked in the question) in
# Ridge Regression, and how it's change would affect the fit and the coefficients.
# Thank you for including the point in the comment.
# ** READ: https://machinelearningcompass.com/machine_learning_models/ridge_regression/
#%% KNeighborsClassifier
from sklearn.datasets import make_blobs
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
# define dataset
X, y = make_blobs(n_samples=1000, centers=2, n_features=100, cluster_std=20)
# define models and parameters
model = KNeighborsClassifier()
n_neighbors = range(1, 21, 2)
weights = ['uniform', 'distance']
metric = ['euclidean', 'manhattan', 'minkowski']
#p = [1,2]
# define grid search
grid = dict(n_neighbors=n_neighbors,weights=weights,metric=metric)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
grid_result = grid_search.fit(X, y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# NOTES:
# https://vitalflux.com/k-nearest-neighbors-explained-with-python-examples/
# https://vitalflux.com/overfitting-underfitting-concepts-interview-questions/
# Larger value of K ==> model may underfit
# Smaller value of K ==> the model may overfit.
#%%Support Vector Machine (SVM)
# example of grid searching key hyperparametres for SVC
from sklearn.datasets import make_blobs
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
# define dataset
X, y = make_blobs(n_samples=1000, centers=2, n_features=100, cluster_std=20)
# define model and parameters
model = SVC()
kernel = ['poly', 'rbf', 'sigmoid']
C = [50, 10, 1.0, 0.1, 0.01]
gamma = ['scale']
# define grid search
grid = dict(kernel=kernel,C=C,gamma=gamma)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
grid_result = grid_search.fit(X, y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# NOTES:
# https://stats.stackexchange.com/questions/31066/what-is-the-influence-of-c-in-svms-with-linear-kernel
# SVM terms: hyperplane, C and soft margins
# hyperplane that can min(max(dist)) of the suppor vectors from tne hyperplane
# High C ==> increase overfitting
# Low C ==> increase underfitting
scoring_refit_recall = {'scoring': 'recall'
,'refit': 'recall'}
# But if C is a regularization parameter, why does a high C increase
# overfitting, when generally speaking regularization is done to
# mitigate overfitting, i.e., by creating a more general model?
# C is a regularisation parameter, but it is essentially attached to
# the data misfit term (the sum of the slack variables) rather than
# the regularisation term (the margin bit), so a larger value of C
# means less regularisation, rather than more. Alternatively you can
# view the usual representation of the rgularisation parameter
# as 1/C.
scoring_refit_recall = {'scoring': 'precision'
,'refit': 'precision'}
scoring_refit_mcc = {'scoring': mcc_score_fn
,'refit': 'mcc'}
#n_jobs = 10 # my desktop has 12 cores
#cv = {'cv': 10}#%%
njobs = {'n_jobs': 10}
skf_cv = StratifiedKFold(n_splits = 10, shuffle = True)
#%% GSCV: RandomForest
gs_rf = GridSearchCV(estimator=RandomForestClassifier(n_jobs=-1, oob_score = True
#,class_weight = {1: 10/11, 0: 1/11}
)
, param_grid=[{'max_depth': [4, 6, 8, 10, None]
, 'max_features': ['auto', 'sqrt']
, 'min_samples_leaf': [2, 4, 8]
, 'min_samples_split': [10, 20]}]
, cv = skf_cv
, **njobs
, **scoring_refit_recall
#, **scoring_refit_mcc
#, scoring = scoring_fn, refit = False
)
#gs_rf.fit(X_train, y_train)
#gs_rf_fit = gs_rf.fit(X_train y_train)
gs_rf.fit(X, y)
gs_rf_fit = gs_rf.fit(X, y)
gs_rf_res = gs_rf_fit.cv_results_
print('Best model:\n', gs_rf.best_params_)
print('Best models score:\n', gs_rf.best_score_)
print('Check mean models score:\n', mean(gs_rf_res['mean_test_score']))
#%% Proof of concept: manual inspection to see how best score is calcualted!
# SATISFIED!
# Best_model example: recall, Best model's score: 0.8059288537549408
# {'max_depth': 4, 'max_features': 'sqrt', 'min_samples_leaf': 2, 'min_samples_split': 10}
# Best model example: mcc, Best models score: 0.42504894661702863
# {'max_depth': 4, 'max_features': 'auto', 'min_samples_leaf': 4, 'min_samples_split': 20}
# Best model example: precision, Best models score: 0.7144745254745255
# {'max_depth': 6, 'max_features': 'sqrt', 'min_samples_leaf': 8, 'min_samples_split': 10}
best_model = [{'max_depth': 6, 'max_features': 'sqrt', 'min_samples_leaf': 8, 'min_samples_split': 10 }]
gs_results_df = pd.DataFrame(gs_rf_res)
gs_results_df.shape
gs_best_df = gs_results_df.loc[gs_results_df['params'].isin(best_model)]
gs_best_df.shape
gs_best_df_test = gs_best_df.filter(like = 'test_', axis = 1)
gs_best_df_test.shape
gs_best_df_test_recall = gs_best_df_test.filter(like = '_score', axis = 1)
gs_best_df_test_recall.shape
f = gs_best_df_test_recall.filter(like='split', axis = 1)
f.shape
#gs_best_df_test_mcc = gs_best_df_test.filter(like = '_mcc', axis = 1)
#f = gs_best_df_test_mcc.filter(like='split', axis = 1)
f.iloc[:,[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]].mean(axis = 1)
# recall: 0.801186 vs 0.8059288537549408
# mcc: 0.425049 vs 0.42504894661702863
# precision: 0.714475 vs 0.7144745254745255
#%%
#%% Check the scores:
print([(len(train), len(test)) for train, test in skf_cv.split(X, y)])
gs_rf_fit.cv_results_
#its the weighted average!?
#%%
#C is a regularization parameter that controls the trade off
#between the achieving a low training error and a low testing
# error that is the ability to generalize your classifier to unseen data.
# C Parameter is used for controlling the outliers:
# low C implies ==> we are allowing more outliers
# high C implies we are allowing fewer outliers.