remove redundant stuff

This commit is contained in:
Tanushree Tunstall 2022-05-22 23:31:58 +01:00
parent 80e6b3af96
commit c4142f441d
3 changed files with 0 additions and 723 deletions

View file

@ -1,154 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 16 05:59:12 2022
@author: tanu
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 11:09:50 2022
@author: tanu
"""
#%% Logistic Regression + hyperparam + FS: BaseEstimator: ClfSwitcher()
model_lr = LogisticRegression(**rs)
model_rfecv = RFECV(estimator = model_lr
, cv = rskf_cv
#, cv = 10
, scoring = 'matthews_corrcoef'
)
param_grid2 = [
{
#'clf__estimator': [LogisticRegression(**rs)],
#'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
'C': np.logspace(0, 4, 10),
'penalty': ['none', 'l1', 'l2', 'elasticnet'],
'max_iter': list(range(100,800,100)),
'solver': ['saga']
},
{
#'clf__estimator': [LogisticRegression(**rs)],
#'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
'C': np.logspace(0, 4, 10),
'penalty': ['l2', 'none'],
'max_iter': list(range(100,800,100)),
'solver': ['newton-cg', 'lbfgs', 'sag']
},
{
#'clf__estimator': [LogisticRegression(**rs)],
#'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
'C': np.logspace(0, 4, 10),
'penalty': ['l1', 'l2'],
'max_iter': list(range(100,800,100)),
'solver': ['liblinear']
}
]
#-------------------------------------------------------------------------------
# Grid search CV + FS
gscv_lr = GridSearchCV(model_lr
, param_grid2
, scoring = mcc_score_fn, refit = 'mcc'
, cv = skf_cv
, return_train_score = False
, verbose = 3
, **njobs)
#------------------------------------------------------------------------------
# Create pipeline
pipeline = Pipeline([('pre', MinMaxScaler())
#, ('feature_selection', sfs_selector)
, ('feature_selection', model_rfecv )
, ('clf', gscv_lr)])
# Fit
lr_fs_fit = pipeline.fit(X,y)
#lr_fs_fit_be_mod = lr_fs_fit.best_params_
#lr_fs_fit_be_res = lr_fs_fit.cv_results_
dir(lr_fs_fit)
print('Best model:\n', lr_fs_fit_be_mod)
print('Best models score:\n', lr_fs_fit.best_score_, ':' , round(lr_fs_fit.best_score_, 2))
pipeline.predict(X_bts)
lr_fs_fit.predict(X_bts)
test_predict = pipeline.predict(X_bts)
print(test_predict)
print(np.array(y_bts))
#y_btsf = np.array(y_bts)
print(accuracy_score(y_bts, test_predict))
print(matthews_corrcoef(y_bts, test_predict))
######################################
# Blind test
######################################
# See how it does on the BLIND test
#print('\nBlind test score, mcc:', ))
test_predict = lr_fs_fit.predict(X_bts)
print(test_predict)
print(np.array(y_bts))
y_btsf = np.array(y_bts)
print(accuracy_score(y_bts, test_predict))
print(matthews_corrcoef(y_bts, test_predict))
# create a dict with all scores
lr_bts_dict = {#'best_model': list(gscv_lr_fit_be_mod.items())
'bts_fscore':None
, 'bts_mcc':None
, 'bts_precision':None
, 'bts_recall':None
, 'bts_accuracy':None
, 'bts_roc_auc':None
, 'bts_jaccard':None }
lr_bts_dict
lr_bts_dict['bts_fscore'] = round(f1_score(y_bts, test_predict),2)
lr_bts_dict['bts_mcc'] = round(matthews_corrcoef(y_bts, test_predict),2)
lr_bts_dict['bts_precision'] = round(precision_score(y_bts, test_predict),2)
lr_bts_dict['bts_recall'] = round(recall_score(y_bts, test_predict),2)
lr_bts_dict['bts_accuracy'] = round(accuracy_score(y_bts, test_predict),2)
lr_bts_dict['bts_roc_auc'] = round(roc_auc_score(y_bts, test_predict),2)
lr_bts_dict['bts_jaccard'] = round(jaccard_score(y_bts, test_predict),2)
lr_bts_dict
# Create a df from dict with all scores
lr_bts_df = pd.DataFrame.from_dict(lr_bts_dict,orient = 'index')
lr_bts_df.columns = ['Logistic_Regression']
print(lr_bts_df)
# d2 = {'best_model_params': lis(gscv_lr_fit_be_mod.items() )}
# d2
# def Merge(dict1, dict2):
# res = {**dict1, **dict2}
# return res
# d3 = Merge(d2, lr_bts_dict)
# d3
# Create df with best model params
model_params = pd.Series(['best_model_params', list(lr_fs_fit_be_mod.items() )])
model_params_df = model_params.to_frame()
model_params_df
model_params_df.columns = ['Logistic_Regression']
model_params_df.columns
# Combine the df of scores and the best model params
lr_bts_df.columns
lr_output = pd.concat([model_params_df, lr_bts_df], axis = 0)
lr_output
# Format the combined df
# Drop the best_model_params row from lr_output
lr_df = lr_output.drop([0], axis = 0)
lr_df
#FIXME: tidy the index of the formatted df
###############################################################################

View file

@ -1,253 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 16 05:59:12 2022
@author: tanu
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 11:09:50 2022
@author: tanu
"""
#%% Logistic Regression + hyperparam + FS: BaseEstimator: ClfSwitcher()
# model_lr = LogisticRegression(**rs)
# model_rfecv = RFECV(estimator = model_lr
# , cv = skf_cv
# #, cv = 10
# , min_features_to_select = 1 # default
# , scoring = 'matthews_corrcoef'
# )
# param_grid2 = [
# {
# #'clf': [LogisticRegression(**rs)],
# #'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
# 'C': np.logspace(0, 4, 10),
# 'penalty': ['none', 'l1', 'l2', 'elasticnet'],
# 'max_iter': list(range(100,800,100)),
# 'solver': ['saga']
# },
# {
# #'clf': [LogisticRegression(**rs)],
# #'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
# 'C': np.logspace(0, 4, 10),
# 'penalty': ['l2', 'none'],
# 'max_iter': list(range(100,800,100)),
# 'solver': ['newton-cg', 'lbfgs', 'sag']
# },
# {
# #'clf': [LogisticRegression(**rs)],
# #'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
# 'C': np.logspace(0, 4, 10),
# 'penalty': ['l1', 'l2'],
# 'max_iter': list(range(100,800,100)),
# 'solver': ['liblinear']
# }
# ]
# #-------------------------------------------------------------------------------
# # Grid search CV + FS
# gscv_lr = GridSearchCV(estimator = model_lr
# , param_grid = param_grid2
# , scoring = mcc_score_fn, refit = 'mcc'
# , cv = skf_cv
# , return_train_score = False
# , verbose = 3
# , **njobs)
#------------------------------------------------------------------------------
################
# NOTE: GS is going into pipeline,
# Cannot get BEST model out
################
# Create pipeline
# pipeline = Pipeline([('pre', MinMaxScaler())
# #, ('fs', sfs_selector)
# , ('fs', model_rfecv )
# , ('clf', gscv_lr)])
# # Fit # dont assign fit
# #lr_fs_fit = pipeline.fit(X,y)
# pipeline.fit(X,y)
# pipeline.best_params_
# #https://github.com/scikit-learn/scikit-learn/issues/7536
# n_fs = gscv_lr.best_estimator_.n_features_in_
# n_fs
# sel_features = X.columns[pipeline.named_steps['fs'].get_support()]
# print('\nNo. of features selected with RFECV for model'
# , pipeline.named_steps['clf'].estimator
# , ':', n_fs
# , '\nThese are:', sel_features
# )
##############################################################
# THIS ONE
#########
# Make Pipeline go into GS with FS
#########
# step 1: specify model
#modLR = LogisticRegression(**rs)
# step 2: specify fs
#model_rfecv = RFECV(estimator = model_lr
# , cv = skf_cv
#, min_features_to_select = 1 # default
#, scoring = 'matthews_corrcoef'
#)
# step 3: specify param grid as dict
param_grid2 = [
{'fs__min_features_to_select': [1]
, 'fs__cv': [skf_cv]
},
# {
# #'clf': [LogisticRegression(**rs)],
# 'clf__C': np.logspace(0, 4, 10),
# 'clf__penalty': ['none', 'l1', 'l2', 'elasticnet'],
# 'clf__max_iter': list(range(100,800,100)),
# 'clf__solver': ['saga']
# },
# {
# #'clf': [LogisticRegression(**rs)],
# 'clf__C': np.logspace(0, 4, 10),
# 'clf__penalty': ['l2', 'none'],
# 'clf__max_iter': list(range(100,800,100)),
# 'clf__solver': ['newton-cg', 'lbfgs', 'sag']
# },
# {
# #'clf': [LogisticRegression(**rs)],
# 'clf__C': np.logspace(0, 4, 10),
# 'clf__penalty': ['l1', 'l2'],
# 'clf__max_iter': list(range(100,800,100)),
# 'clf__solver': ['liblinear']
# }
{ #'clf': [LogisticRegression(**rs)],
'clf__C': np.logspace(0, 4, 10),
'clf__penalty': ['l2'],
'clf__max_iter': [100],
'clf__solver': ['liblinear']
},
{ #'clf': [LogisticRegression(**rs)],
'clf__C': np.logspace(0, 4, 10),
'clf__penalty': ['l2'],
'clf__max_iter':[100],
'clf__solver': ['saga']
}
]
# step 4: create pipeline
pipeline = Pipeline([
('pre', MinMaxScaler())
#, ('fs', model_rfecv)
, ('fs', RFECV(LogisticRegression(**rs), scoring = 'matthews_corrcoef'))
, ('clf', LogisticRegression(**rs))])
# step 5: Perform Gridsearch CV
gs_final = GridSearchCV(pipeline
, param_grid2
, cv = skf_cv
, scoring = mcc_score_fn, refit = 'mcc'
, verbose = 1
, return_train_score = False
, **njobs)
#fit
gs_final.fit(X,y)
gs_final.best_params_
gs_final.best_score_
gs_final.best_estimator_
# assign the fit
#gsfit = gs_final.fit(X,y)
#gsfit.best_estimator_
#gsfit.best_params_
#gsfit.best_score_
test_predict = gs_final.predict(X_bts)
print(test_predict)
print('\nMCC on Blind test:' , round(matthews_corrcoef(y_bts, test_predict),2))
print('\nAccuracy on Blind test:', round(accuracy_score(y_bts, test_predict),2))
# Now get the features out
all_features = gs_final.feature_names_in_
#all_features = gsfit.feature_names_in_
sel_features = X.columns[gs_final.best_estimator_.named_steps['fs'].get_support()]
n_sf = gs_final.best_estimator_.named_steps['fs'].n_features_
# get model name
model_name = gs_final.best_estimator_.named_steps['clf']
b_model_params = gs_final.best_params_
print('\n========================================'
, '\nRunning model:'
, '\nModel name:', model_name
, '\n==============================================='
, '\nRunning feature selection with RFECV for model'
, '\nTotal no. of features in model:', len(all_features)
, '\nThese are:\n', all_features, '\n\n'
, '\nNo of features for best model: ', n_sf
, '\nThese are:', sel_features, '\n\n'
, '\nBest Model hyperparams:', b_model_params
)
######################################
# Blind test
######################################
# See how it does on the BLIND test
#print('\nBlind test score, mcc:', ))
#test_predict = gscv_lr_fit.predict(X_bts)
test_predict = gs_final.predict(X_bts)
print(test_predict)
print(accuracy_score(y_bts, test_predict))
print(matthews_corrcoef(y_bts, test_predict))
# create a dict with all scores
lr_bts_dict = {#'best_model': list(gscv_lr_fit_be_mod.items())
'bts_fscore':None
, 'bts_mcc':None
, 'bts_precision':None
, 'bts_recall':None
, 'bts_accuracy':None
, 'bts_roc_auc':None
, 'bts_jaccard':None }
lr_bts_dict
lr_bts_dict['bts_fscore'] = round(f1_score(y_bts, test_predict),2)
lr_bts_dict['bts_mcc'] = round(matthews_corrcoef(y_bts, test_predict),2)
lr_bts_dict['bts_precision'] = round(precision_score(y_bts, test_predict),2)
lr_bts_dict['bts_recall'] = round(recall_score(y_bts, test_predict),2)
lr_bts_dict['bts_accuracy'] = round(accuracy_score(y_bts, test_predict),2)
lr_bts_dict['bts_roc_auc'] = round(roc_auc_score(y_bts, test_predict),2)
lr_bts_dict['bts_jaccard'] = round(jaccard_score(y_bts, test_predict),2)
lr_bts_dict

View file

@ -1,316 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 20 00:36:17 2022
@author: tanu
"""
# pnca [ numerical ONLY + NO oversampling]
# LR: hyperparm
{'clf__estimator': LogisticRegression(penalty='l1', random_state=42, solver='saga'),
'clf__estimator__C': 1.0,
'clf__estimator__max_iter': 100,
'clf__estimator__penalty': 'l1',
'clf__estimator__solver': 'saga'}
Logistic_Regression
bts_fscore 0.70
bts_mcc 0.29
bts_precision 0.57
bts_recall 0.92
bts_accuracy 0.61
bts_roc_auc 0.61
bts_jaccard 0.54
# LR: FS + hyperparam
{'bts_fscore': 0.71,
'bts_mcc': 0.34,
'bts_precision': 0.61,
'bts_recall': 0.87,
'bts_accuracy': 0.65,
'bts_roc_auc': 0.65,
'bts_jaccard': 0.55}
#######################################################################
# RF: hyperparam [~45]
Best model:
{'clf__estimator': RandomForestClassifier(class_weight='balanced', max_depth=4, max_features=None,
min_samples_leaf=2, min_samples_split=15,
n_estimators=10, n_jobs=10, oob_score=True,
random_state=42), 'clf__estimator__class_weight': 'balanced', 'clf__estimator__criterion': 'gini', 'clf__estimator__max_depth': 4, 'clf__estimator__max_features': None, 'clf__estimator__min_samples_leaf': 2, 'clf__estimator__min_samples_split': 15, 'clf__estimator__n_estimators': 10}
Best models score:
0.3329374281771619 : 0.33
RF
bts_fscore 0.69
bts_mcc 0.37
bts_precision 0.67
bts_recall 0.72
bts_accuracy 0.68
bts_roc_auc 0.68
bts_jaccard 0.53
#######################################################################
# ABC: hyperparam
{'clf__estimator': AdaBoostClassifier(n_estimators=2, random_state=42),
'clf__estimator__n_estimators': 2}
ABC
1 [(clf__estimator, AdaBoostClassifier(n_estimat...
bts_fscore 0.71
bts_mcc 0.36
bts_precision 0.63
bts_recall 0.83
bts_accuracy 0.67
bts_roc_auc 0.67
bts_jaccard 0.56
#######################################################################
# BC: hyperparam
{'clf__estimator': BaggingClassifier(n_estimators=200, n_jobs=10, oob_score=True, random_state=42),
'clf__estimator__n_estimators': 200}
BC
0 best_model_params
1 [(clf__estimator, BaggingClassifier(n_estimato...
bts_fscore 0.72
bts_mcc 0.37
bts_precision 0.64
bts_recall 0.82
bts_accuracy 0.68
bts_roc_auc 0.68
bts_jaccard 0.56
#######################################################################
# BNB: hyperparam
{'clf__estimator': BernoulliNB(alpha=1, binarize=None),
'clf__estimator__alpha': 1,
'clf__estimator__binarize': None,
'clf__estimator__class_prior': None,
'clf__estimator__fit_prior': True}
BNB
1 [(clf__estimator, BernoulliNB(alpha=1, binariz...
bts_fscore 0.72
bts_mcc 0.35
bts_precision 0.6
bts_recall 0.92
bts_accuracy 0.65
bts_roc_auc 0.65
bts_jaccard 0.56
#######################################################################
# DT: hyperparam
{'clf__estimator': DecisionTreeClassifier(class_weight='balanced', criterion='entropy',
max_depth=2, random_state=42),
'clf__estimator__class_weight': 'balanced',
'clf__estimator__criterion': 'entropy',
'clf__estimator__max_depth': 2,
'clf__estimator__max_features': None,
'clf__estimator__min_samples_leaf': 1,
'clf__estimator__min_samples_split': 2}
DT
1 [(clf__estimator, DecisionTreeClassifier(class...
bts_fscore 0.72
bts_mcc 0.42
bts_precision 0.69
bts_recall 0.76
bts_accuracy 0.71
bts_roc_auc 0.71
bts_jaccard 0.57
#######################################################################
# GBC: hyperparam
{'clf__estimator': GradientBoostingClassifier(learning_rate=0.01, max_depth=7, random_state=42,
subsample=0.5),
'clf__estimator__learning_rate': 0.01,
'clf__estimator__max_depth': 7,
'clf__estimator__n_estimators': 100,
'clf__estimator__subsample': 0.5}
GBC
1 [(clf__estimator, GradientBoostingClassifier(l...
bts_fscore 0.71
bts_mcc 0.33
bts_precision 0.6
bts_recall 0.88
bts_accuracy 0.64
bts_roc_auc 0.65
bts_jaccard 0.55
#######################################################################
# GNB: hyperparam
{'clf__estimator': GaussianNB(var_smoothing=0.006579332246575682),
'clf__estimator__priors': None,
'clf__estimator__var_smoothing': 0.006579332246575682}
GNB
1 [(clf__estimator, GaussianNB(var_smoothing=0.0...
bts_fscore 0.72
bts_mcc 0.46
bts_precision 0.73
bts_recall 0.71
bts_accuracy 0.73
bts_roc_auc 0.73
bts_jaccard 0.57
#######################################################################
# GPC: hyperparam
{'clf__estimator': GaussianProcessClassifier(kernel=1**2 * Matern(length_scale=1, nu=1.5),
random_state=42),
'clf__estimator__kernel': 1**2 * Matern(length_scale=1, nu=1.5)}
ConvergenceWarning: The optimal value found for dimension 0 of parameter k2__alpha is close to the specified upper bound 100000.0. Increasing the bound and calling fit again may find a better value.
warnings.warn(
GPC
1 [(clf__estimator, GaussianProcessClassifier(ke...
bts_fscore 0.73
bts_mcc 0.38
bts_precision 0.6
bts_recall 0.92
bts_accuracy 0.66
bts_roc_auc 0.66
bts_jaccard 0.58
#######################################################################
# KNN: hyperparam
Best model:
{'clf__estimator': KNeighborsClassifier(metric='euclidean', n_jobs=10, n_neighbors=11,
weights='distance'), 'clf__estimator__metric': 'euclidean', 'clf__estimator__n_neighbors': 11, 'clf__estimator__weights': 'distance'}
1 [(clf__estimator, KNeighborsClassifier(metric=...
bts_fscore 0.69
bts_mcc 0.26
bts_precision 0.58
bts_recall 0.85
bts_accuracy 0.62
bts_roc_auc 0.62
bts_jaccard 0.52
Best model:
{'clf__estimator': KNeighborsClassifier(metric='euclidean', n_jobs=10, n_neighbors=29), 'clf__estimator__metric': 'euclidean', 'clf__estimator__n_neighbors': 29, 'clf__estimator__weights': 'uniform'}
KNN
1 [(clf__estimator, KNeighborsClassifier(metric=...
bts_fscore 0.73
bts_mcc 0.37
bts_precision 0.6
bts_recall 0.92
bts_accuracy 0.65
bts_roc_auc 0.65
bts_jaccard 0.57
#######################################################################
# MLP: hyperparam
#constant lr, tried others as well, but comes back with constant
{'clf__estimator': MLPClassifier(hidden_layer_sizes=3, max_iter=500, random_state=42,
solver='lbfgs'),
'clf__estimator__hidden_layer_sizes': 3,
'clf__estimator__learning_rate': 'constant',
'clf__estimator__solver': 'lbfgs'}
1 [(clf__estimator, MLPClassifier(hidden_layer_s...
bts_fscore 0.71
bts_mcc 0.34
bts_precision 0.61
bts_recall 0.86
bts_accuracy 0.65
bts_roc_auc 0.65
bts_jaccard 0.55
#######################################################################
# QDA: hyperparam
Best model:
{'clf__estimator': QuadraticDiscriminantAnalysis()}
QDA
1 [(clf__estimator, QuadraticDiscriminantAnalysi...
bts_fscore 0.66
bts_mcc 0.33
bts_precision 0.67
bts_recall 0.65
bts_accuracy 0.67
bts_roc_auc 0.67
bts_jaccard 0.49
#######################################################################
# RC: hyperparam
Best model:
{'clf__estimator': RidgeClassifier(alpha=0.8, random_state=42)
, 'clf__estimator__alpha': 0.8}
Ridge Classifier
1 [(clf__estimator, RidgeClassifier(alpha=0.8, r...
bts_fscore 0.71
bts_mcc 0.31
bts_precision 0.59
bts_recall 0.88
bts_accuracy 0.64
bts_roc_auc 0.64
bts_jaccard 0.55
#######################################################################
# SVC: hyperparam
Best model:
{'clf__estimator': SVC(C=10, kernel='linear', random_state=42), 'clf__estimator__C': 10, 'clf__estimator__gamma': 'scale', 'clf__estimator__kernel': 'linear'}
SVC
1 [(clf__estimator, SVC(C=10, kernel='linear', r...
bts_fscore 0.71
bts_mcc 0.31
bts_precision 0.57
bts_recall 0.93
bts_accuracy 0.62
bts_roc_auc 0.62
bts_jaccard 0.55
Best model:
{'clf__estimator': SVC(C=10, gamma='auto', random_state=42), 'clf__estimator__C': 10, 'clf__estimator__gamma': 'auto', 'clf__estimator__kernel': 'rbf'}
Best models score:
SVC
1 [(clf__estimator, SVC(C=10, gamma='auto', rand...
bts_fscore 0.71
bts_mcc 0.32
bts_precision 0.58
bts_recall 0.93
bts_accuracy 0.63
bts_roc_auc 0.63
bts_jaccard 0.56
Best model:
{'clf__estimator': SVC(C=50, gamma='auto', kernel='sigmoid', random_state=42), 'clf__estimator__C': 50, 'clf__estimator__gamma': 'auto', 'clf__estimator__kernel': 'sigmoid'}
SVC
1 [(clf__estimator, SVC(C=50, gamma='auto', kern...
bts_fscore 0.72
bts_mcc 0.33
bts_precision 0.58
bts_recall 0.93
bts_accuracy 0.63
bts_roc_auc 0.63
bts_jaccard 0.56
#######################################################################
# XGB: hyperparam
Best model:
{'clf__estimator': XGBClassifier(base_score=None, booster=None, colsample_bylevel=None,
colsample_bynode=None, colsample_bytree=None,
enable_categorical=False, gamma=None, gpu_id=None,
importance_type=None, interaction_constraints=None,
learning_rate=0.01, max_delta_step=None, max_depth=6,
max_features='auto', min_child_weight=None, min_samples_leaf=4,
missing=nan, monotone_constraints=None, n_estimators=100,
n_jobs=10, num_parallel_tree=None, predictor=None,
random_state=42, reg_alpha=None, reg_lambda=None,
scale_pos_weight=None, subsample=None, tree_method=None,
validate_parameters=None, verbosity=None), 'clf__estimator__learning_rate': 0.01, 'clf__estimator__max_depth': 6, 'clf__estimator__max_features': 'auto', 'clf__estimator__min_samples_leaf': 4}
XGBoost
0 best_model_params
1 [(clf__estimator, XGBClassifier(base_score=Non...
bts_fscore 0.68
bts_mcc 0.31
bts_precision 0.63
bts_recall 0.73
bts_accuracy 0.65
bts_roc_auc 0.65
bts_jaccard 0.51