copy of ML dir to an FS-only version
This commit is contained in:
parent
52cc16f3fa
commit
80e6b3af96
23 changed files with 3115 additions and 243 deletions
165
UQ_LR_FS_p2.py
165
UQ_LR_FS_p2.py
|
@ -12,6 +12,8 @@ Created on Tue Mar 15 11:09:50 2022
|
|||
|
||||
@author: tanu
|
||||
"""
|
||||
|
||||
# similar to _p1 but with Clf_Switcher
|
||||
#%% Import libraries, data, and scoring func: UQ_pnca_ML.py
|
||||
rs = {'random_state': 42}
|
||||
njobs = {'n_jobs': 10}
|
||||
|
@ -21,25 +23,17 @@ class ClfSwitcher(BaseEstimator):
|
|||
def __init__(
|
||||
self,
|
||||
estimator = SGDClassifier(),
|
||||
#feature = RFECV(SGDClassifier())
|
||||
):
|
||||
"""
|
||||
A Custom BaseEstimator that can switch between classifiers.
|
||||
:param estimator: sklearn object - The classifier
|
||||
"""
|
||||
self.estimator = estimator
|
||||
#self.feature = feature
|
||||
|
||||
def fit(self, X, y=None, **kwargs):
|
||||
self.estimator.fit(X, y)
|
||||
#self.feature.fit(X, y)
|
||||
return self
|
||||
|
||||
# def transform(self, X, y=None):
|
||||
# #self.estimator.transform(X, y)
|
||||
# self.feature.transform(X)
|
||||
# return self
|
||||
|
||||
|
||||
def predict(self, X, y=None):
|
||||
return self.estimator.predict(X)
|
||||
|
||||
|
@ -52,35 +46,49 @@ class ClfSwitcher(BaseEstimator):
|
|||
#%%
|
||||
parameters = [
|
||||
|
||||
# {'fs__feature__min_features_to_select': [1]
|
||||
# , 'fs__feature__scoring': ['matthews_corrcoef']
|
||||
# , 'fs__feature__cv': [skf_cv]},
|
||||
|
||||
{'fs__min_features_to_select': [1]
|
||||
#, 'fs__scoring': ['matthews_corrcoef']
|
||||
, 'fs__cv': [skf_cv]},
|
||||
|
||||
{
|
||||
'clf__estimator': [LogisticRegression(**rs)],
|
||||
#'clf__estimator__C': np.logspace(0, 4, 10),
|
||||
'clf__estimator__penalty': ['none', 'l1', 'l2', 'elasticnet'],
|
||||
'clf__estimator__max_iter': list(range(100,800,100)),
|
||||
'clf__estimator__solver': ['saga']
|
||||
}#,
|
||||
# {
|
||||
# 'clf__estimator': [MODEL2(**rs)],
|
||||
# 'clf__estimator': [LogisticRegression(**rs)],
|
||||
# 'clf__estimator__C': np.logspace(0, 4, 10),
|
||||
# 'clf__estimator__penalty': ['none', 'l1', 'l2', 'elasticnet'],
|
||||
# 'clf__estimator__max_iter': list(range(100,800,100)),
|
||||
# 'clf__estimator__solver': ['saga']
|
||||
# },
|
||||
# {
|
||||
# 'clf__estimator': [LogisticRegression(**rs)],
|
||||
# 'clf__estimator__C': np.logspace(0, 4, 10),
|
||||
# 'clf__estimator__penalty': ['l2', 'none'],
|
||||
# 'clf__estimator__max_iter': list(range(100,800,100)),
|
||||
# 'clf__estimator__solver': ['newton-cg', 'lbfgs', 'sag']
|
||||
# },
|
||||
# {
|
||||
# 'clf__estimator': [LogisticRegression(**rs)],
|
||||
# 'clf__estimator__C': np.logspace(0, 4, 10),
|
||||
# 'clf__estimator__penalty': ['l1', 'l2'],
|
||||
# 'clf__estimator__max_iter': list(range(100,800,100)),
|
||||
# 'clf__estimator__solver': ['liblinear']
|
||||
# }
|
||||
|
||||
{'fs__min_features_to_select': [1,2]},
|
||||
{'classifier': [LogisticRegression()],
|
||||
#'classifier__C': np.logspace(0, 4, 10),
|
||||
'classifier__C': [2, 2.8],
|
||||
'classifier__max_iter': [100],
|
||||
'classifier__penalty': ['l1', 'l2'],
|
||||
'classifier__solver': ['saga']
|
||||
|
||||
}
|
||||
]
|
||||
#%% Create pipeline
|
||||
pipeline = Pipeline([
|
||||
('pre', MinMaxScaler())
|
||||
, ('fs', RFECV(LogisticRegression(**rs), scoring = 'matthews_corrcoef'))#cant be my mcc_fn
|
||||
# , ('fs', ClfSwitcher())
|
||||
, ('clf', ClfSwitcher())
|
||||
# ('pre', MinMaxScaler())
|
||||
('fs', RFECV(LogisticRegression(**rs), scoring = 'matthews_corrcoef'))#cant be my mcc_fn
|
||||
#, ('clf', ClfSwitcher())
|
||||
, ('classifier', ClfSwitcher())
|
||||
|
||||
])
|
||||
|
||||
#%%
|
||||
|
@ -95,81 +103,66 @@ gscv_lr = GridSearchCV(pipeline
|
|||
|
||||
# Fit
|
||||
gscv_lr.fit(X, y)
|
||||
gscv_lr.best_estimator_
|
||||
gscv_lr.best_params_
|
||||
gscv_lr.best_score_
|
||||
|
||||
# Blind test
|
||||
test_predict = gscv_lr.predict(X_bts)
|
||||
print(test_predict)
|
||||
print('\nMCC on Blind test:' , round(matthews_corrcoef(y_bts, test_predict),2))
|
||||
print('\nAccuracy on Blind test:', round(accuracy_score(y_bts, test_predict),2))
|
||||
|
||||
|
||||
|
||||
####
|
||||
gscv_lr_fit = gscv_lr.fit(X, y)
|
||||
gscv_lr_fit_be_mod = gscv_lr_fit.best_params_
|
||||
gscv_lr_fit_be_res = gscv_lr_fit.cv_results_
|
||||
gscv_lr_fit.best_score_
|
||||
|
||||
#%% Grid search i.e hyperparameter tuning and refitting on mcc
|
||||
|
||||
param_grid2 = [
|
||||
|
||||
{'fs__min_features_to_select': [1]
|
||||
, 'fs__cv': [skf_cv]
|
||||
},
|
||||
|
||||
|
||||
{
|
||||
#'clf__estimator': [LogisticRegression(**rs)],
|
||||
'clf__C': np.logspace(0, 4, 10),
|
||||
'clf__penalty': ['l2'],
|
||||
'clf__max_iter': list(range(100,200,100)),
|
||||
#'clf__solver': ['newton-cg', 'lbfgs', 'sag']
|
||||
'clf__solver': ['sag']
|
||||
|
||||
},
|
||||
{
|
||||
#'clf__estimator': [LogisticRegression(**rs)],
|
||||
'clf__C': np.logspace(0, 4, 10),
|
||||
'clf__penalty': ['l1', 'l2'],
|
||||
'clf__max_iter': list(range(100,200,100)),
|
||||
'clf__solver': ['liblinear']
|
||||
}
|
||||
|
||||
]
|
||||
# step 4: create pipeline
|
||||
pipeline = Pipeline([
|
||||
('pre', MinMaxScaler())
|
||||
#, ('fs', model_rfecv)
|
||||
, ('fs', RFECV(LogisticRegression(**rs), scoring = 'matthews_corrcoef'))
|
||||
, ('clf', LogisticRegression(**rs))])
|
||||
|
||||
# step 5: Perform Gridsearch CV
|
||||
gs_final = GridSearchCV(pipeline
|
||||
, param_grid2
|
||||
, cv = skf_cv
|
||||
, scoring = mcc_score_fn, refit = 'mcc'
|
||||
, verbose = 1
|
||||
, return_train_score = False
|
||||
, **njobs)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#%% Fit
|
||||
mod_fs_fit = mod_fs.fit(X, y)
|
||||
mod_fs_fbm = mod_fs_fit.best_params_
|
||||
mod_fs_fbmr = mod_fs_fit.cv_results_
|
||||
mod_fs_fbs = mod_fs_fit.best_score_
|
||||
print('Best model:\n', mod_fs_fbm)
|
||||
print('Best models score:\n', mod_fs_fbs, ':' , round(mod_fs_fbs, 2))
|
||||
print('Best model:\n', gscv_lr_fit_be_mod)
|
||||
print('Best models score:\n', gscv_lr_fit.best_score_, ':'
|
||||
, round(gscv_lr_fit.best_score_, 2))
|
||||
|
||||
#print('\nMean test score from fit results:', round(mean(mod_fs_fbmr['mean_test_mcc']),2))
|
||||
print('\nMean test score from fit results:', round(np.nanmean(mod_fs_fbmr['mean_test_mcc']),2))
|
||||
print('\nMean test score from fit results:'
|
||||
, round(np.nanmean(gscv_lr_fit_be_res['mean_test_mcc']),2))
|
||||
|
||||
#%% print selected features
|
||||
# Now get the features out
|
||||
all_features = gscv_lr.feature_names_in_
|
||||
#all_features = gsfit.feature_names_in_
|
||||
|
||||
sel_features = X.columns[gscv_lr.best_estimator_.named_steps['fs'].get_support()]
|
||||
n_sf = gscv_lr.best_estimator_.named_steps['fs'].n_features_
|
||||
|
||||
# get model name
|
||||
model_name = gscv_lr.best_estimator_.named_steps['clf']
|
||||
b_model_params = gscv_lr.best_params_
|
||||
|
||||
print('\n========================================'
|
||||
, '\nRunning model:'
|
||||
, '\nModel name:', model_name
|
||||
, '\n==============================================='
|
||||
, '\nRunning feature selection with RFECV for model'
|
||||
, '\nTotal no. of features in model:', len(all_features)
|
||||
, '\nThese are:\n', all_features, '\n\n'
|
||||
, '\nNo of features for best model: ', n_sf
|
||||
, '\nThese are:', sel_features, '\n\n'
|
||||
, '\nBest Model hyperparams:', b_model_params
|
||||
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
###############################################################################
|
||||
#%% Blind test
|
||||
######################################
|
||||
# Blind test
|
||||
######################################
|
||||
test_predict = mod_fs_fit.predict(X_bts)
|
||||
test_predict = gscv_lr.predict(X_bts)
|
||||
print(test_predict)
|
||||
print('\nMCC on Blind test:' , round(matthews_corrcoef(y_bts, test_predict),2))
|
||||
print('\nAccuracy on Blind test:', round(accuracy_score(y_bts, test_predict),2))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue