added UQ_LR_FS.py scrip for LR feature selection. SO far this is manual. cannot get it to be part of pipeline
This commit is contained in:
parent
37bda41f44
commit
c1d27f5a92
1 changed files with 241 additions and 0 deletions
241
uq_ml_models/UQ_LR_FS.py
Normal file
241
uq_ml_models/UQ_LR_FS.py
Normal file
|
@ -0,0 +1,241 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon May 16 05:59:12 2022
|
||||
|
||||
@author: tanu
|
||||
"""
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Mar 15 11:09:50 2022
|
||||
|
||||
@author: tanu
|
||||
"""
|
||||
#%% Import libs
|
||||
rs = {'random_state': 42}
|
||||
njobs = {'n_jobs': 10}
|
||||
|
||||
scoring_fn = ({'accuracy' : make_scorer(accuracy_score)
|
||||
, 'fscore' : make_scorer(f1_score)
|
||||
, 'mcc' : make_scorer(matthews_corrcoef)
|
||||
, 'precision' : make_scorer(precision_score)
|
||||
, 'recall' : make_scorer(recall_score)
|
||||
, 'roc_auc' : make_scorer(roc_auc_score)
|
||||
, 'jaccard' : make_scorer(jaccard_score)
|
||||
})
|
||||
|
||||
mcc_score_fn = {'mcc': make_scorer(matthews_corrcoef)}
|
||||
jacc_score_fn = {'jcc': make_scorer(jaccard_score)}
|
||||
#%% Get data
|
||||
y.to_frame().value_counts().plot(kind = 'bar')
|
||||
blind_test_df['dst_mode'].to_frame().value_counts().plot(kind = 'bar')
|
||||
|
||||
# %% Logistic Regression + FS + hyperparameter
|
||||
# https://www.tomasbeuzen.com/post/scikit-learn-gridsearch-pipelines/
|
||||
# from sklearn.feature_selection import SelectKBest, mutual_info_classif
|
||||
|
||||
# # Create pipeline
|
||||
# pipe = Pipeline([
|
||||
# ('pre', MinMaxScaler())
|
||||
# , ('fs', RFECV( LogisticRegression(**rs), cv = skf_cv, scoring = 'matthews_corrcoef', **njobs,verbose = 3))
|
||||
# #, ('fs', SelectKBest(mutual_info_classif, k=5))
|
||||
# , ('clf', LogisticRegression(**rs))
|
||||
# ])
|
||||
|
||||
# # Create search space
|
||||
# param_grid = [{'fs__step': [1]},
|
||||
|
||||
# {
|
||||
# 'clf': [LogisticRegression(**rs)],
|
||||
# #'clf__C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
|
||||
# 'clf__C': np.logspace(0, 4, 10),
|
||||
# 'clf__penalty': ['none', 'l1', 'l2', 'elasticnet'],
|
||||
# 'clf__max_iter': list(range(100,800,100)),
|
||||
# 'clf__solver': ['saga']
|
||||
# },
|
||||
# {
|
||||
# 'clf': [LogisticRegression(**rs)],
|
||||
# #'clf__C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
|
||||
# 'clf__C': np.logspace(0, 4, 10),
|
||||
# 'clf__penalty': ['l2', 'none'],
|
||||
# 'clf__max_iter': list(range(100,800,100)),
|
||||
# 'clf__solver': ['newton-cg', 'lbfgs', 'sag']
|
||||
# },
|
||||
# {
|
||||
# 'clf': [LogisticRegression(**rs)],
|
||||
# #'clf__C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
|
||||
# 'clf__C': np.logspace(0, 4, 10),
|
||||
# 'clf__penalty': ['l1', 'l2'],
|
||||
# 'clf__max_iter': list(range(100,800,100)),
|
||||
# 'clf__solver': ['liblinear']
|
||||
# }]
|
||||
|
||||
# # Run Grid search
|
||||
# gscv_fs_lr = GridSearchCV(pipe
|
||||
# , param_grid
|
||||
# , cv = skf_cv
|
||||
# , scoring = mcc_score_fn, refit = 'mcc'
|
||||
# , verbose = 3)
|
||||
|
||||
# gscv_fs_lr_fit = gscv_fs_lr.fit(X, y)
|
||||
# gscv_fs_lr_fit_be_mod = gscv_fs_lr_fit.best_params_
|
||||
# gscv_fs_lr_fit_be_res = gscv_fs_lr_fit.cv_results_
|
||||
|
||||
# print('Best model:\n', gscv_fs_lr_fit_be_mod)
|
||||
# print('Best models score:\n', gscv_fs_lr_fit.best_score_, ':' , round(gscv_fs_lr_fit.best_score_, 2))
|
||||
|
||||
# #print('\nMean test score from fit results:', round(mean(gscv_fs_lr_fit_be_res['mean_test_mcc']),2))
|
||||
# print('\nMean test score from fit results:', round(np.nanmean(gscv_fs_lr_fit_be_res['mean_test_mcc']),2))
|
||||
|
||||
##############################################################################
|
||||
#MANUAL
|
||||
|
||||
#%% Logistic Regression + hyperparam + FS: BaseEstimator: ClfSwitcher()
|
||||
model_lr = LogisticRegression(**rs)
|
||||
model_rfecv = RFECV(estimator = model_lr
|
||||
, cv = rskf_cv
|
||||
#, cv = 10
|
||||
, scoring = 'matthews_corrcoef'
|
||||
)
|
||||
|
||||
# model_rfecv = SequentialFeatureSelector(estimator = model_lr
|
||||
# , n_features_to_select = 'auto'
|
||||
# , tol = None
|
||||
# # , cv = 10
|
||||
# , cv = rskf_cv
|
||||
# # , direction ='backward'
|
||||
# , direction ='forward'
|
||||
# , **njobs)
|
||||
|
||||
param_grid2 = [
|
||||
{
|
||||
#'clf__estimator': [LogisticRegression(**rs)],
|
||||
#'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
|
||||
'C': np.logspace(0, 4, 10),
|
||||
'penalty': ['none', 'l1', 'l2', 'elasticnet'],
|
||||
'max_iter': list(range(100,800,100)),
|
||||
'solver': ['saga']
|
||||
},
|
||||
{
|
||||
#'clf__estimator': [LogisticRegression(**rs)],
|
||||
#'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
|
||||
'C': np.logspace(0, 4, 10),
|
||||
'penalty': ['l2', 'none'],
|
||||
'max_iter': list(range(100,800,100)),
|
||||
'solver': ['newton-cg', 'lbfgs', 'sag']
|
||||
},
|
||||
{
|
||||
#'clf__estimator': [LogisticRegression(**rs)],
|
||||
#'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
|
||||
'C': np.logspace(0, 4, 10),
|
||||
'penalty': ['l1', 'l2'],
|
||||
'max_iter': list(range(100,800,100)),
|
||||
'solver': ['liblinear']
|
||||
}
|
||||
|
||||
]
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# Grid search CV + FS
|
||||
gscv_lr = GridSearchCV(model_lr
|
||||
, param_grid2
|
||||
, scoring = mcc_score_fn, refit = 'mcc'
|
||||
, cv = skf_cv
|
||||
, return_train_score = False
|
||||
, verbose = 3
|
||||
, **njobs)
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Create pipeline
|
||||
pipeline = Pipeline([('pre', MinMaxScaler())
|
||||
#, ('feature_selection', sfs_selector)
|
||||
, ('feature_selection', model_rfecv )
|
||||
, ('clf', gscv_lr)])
|
||||
|
||||
# Fit
|
||||
lr_fs_fit = pipeline.fit(X,y)
|
||||
lr_fs_fit_be_mod = lr_fs_fit.best_params_
|
||||
lr_fs_fit_be_res = lr_fs_fit.cv_results_
|
||||
|
||||
print('Best model:\n', lr_fs_fit_be_mod)
|
||||
print('Best models score:\n', lr_fs_fit.best_score_, ':' , round(lr_fs_fit.best_score_, 2))
|
||||
|
||||
pipeline.predict(X_bts)
|
||||
lr_fs.predict(X_bts)
|
||||
|
||||
test_predict = pipeline.predict(X_bts)
|
||||
print(test_predict)
|
||||
print(np.array(y_bts))
|
||||
#y_btsf = np.array(y_bts)
|
||||
|
||||
print(accuracy_score(y_bts, test_predict))
|
||||
print(matthews_corrcoef(y_bts, test_predict))
|
||||
|
||||
|
||||
######################################
|
||||
# Blind test
|
||||
######################################
|
||||
# See how it does on the BLIND test
|
||||
#print('\nBlind test score, mcc:', ))
|
||||
|
||||
test_predict = lr_fs_fit.predict(X_bts)
|
||||
print(test_predict)
|
||||
print(np.array(y_bts))
|
||||
y_btsf = np.array(y_bts)
|
||||
|
||||
print(accuracy_score(y_bts, test_predict))
|
||||
print(matthews_corrcoef(y_bts, test_predict))
|
||||
|
||||
# create a dict with all scores
|
||||
lr_bts_dict = {#'best_model': list(gscv_lr_fit_be_mod.items())
|
||||
'bts_fscore':None
|
||||
, 'bts_mcc':None
|
||||
, 'bts_precision':None
|
||||
, 'bts_recall':None
|
||||
, 'bts_accuracy':None
|
||||
, 'bts_roc_auc':None
|
||||
, 'bts_jaccard':None }
|
||||
lr_bts_dict
|
||||
lr_bts_dict['bts_fscore'] = round(f1_score(y_bts, test_predict),2)
|
||||
lr_bts_dict['bts_mcc'] = round(matthews_corrcoef(y_bts, test_predict),2)
|
||||
lr_bts_dict['bts_precision'] = round(precision_score(y_bts, test_predict),2)
|
||||
lr_bts_dict['bts_recall'] = round(recall_score(y_bts, test_predict),2)
|
||||
lr_bts_dict['bts_accuracy'] = round(accuracy_score(y_bts, test_predict),2)
|
||||
lr_bts_dict['bts_roc_auc'] = round(roc_auc_score(y_bts, test_predict),2)
|
||||
lr_bts_dict['bts_jaccard'] = round(jaccard_score(y_bts, test_predict),2)
|
||||
lr_bts_dict
|
||||
|
||||
# Create a df from dict with all scores
|
||||
lr_bts_df = pd.DataFrame.from_dict(lr_bts_dict,orient = 'index')
|
||||
lr_bts_df.columns = ['Logistic_Regression']
|
||||
print(lr_bts_df)
|
||||
|
||||
# d2 = {'best_model_params': lis(gscv_lr_fit_be_mod.items() )}
|
||||
# d2
|
||||
# def Merge(dict1, dict2):
|
||||
# res = {**dict1, **dict2}
|
||||
# return res
|
||||
# d3 = Merge(d2, lr_bts_dict)
|
||||
# d3
|
||||
|
||||
# Create df with best model params
|
||||
model_params = pd.Series(['best_model_params', list(lr_fs_fit_be_mod.items() )])
|
||||
model_params_df = model_params.to_frame()
|
||||
model_params_df
|
||||
model_params_df.columns = ['Logistic_Regression']
|
||||
model_params_df.columns
|
||||
|
||||
# Combine the df of scores and the best model params
|
||||
lr_bts_df.columns
|
||||
lr_output = pd.concat([model_params_df, lr_bts_df], axis = 0)
|
||||
lr_output
|
||||
|
||||
# Format the combined df
|
||||
# Drop the best_model_params row from lr_output
|
||||
lr_df = lr_output.drop([0], axis = 0)
|
||||
lr_df
|
||||
|
||||
#FIXME: tidy the index of the formatted df
|
||||
|
||||
###############################################################################
|
Loading…
Add table
Add a link
Reference in a new issue