#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon May 16 05:59:12 2022 @author: tanu """ #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Mar 15 11:09:50 2022 @author: tanu """ #%% Logistic Regression + hyperparam + FS: BaseEstimator: ClfSwitcher() model_lr = LogisticRegression(**rs) model_rfecv = RFECV(estimator = model_lr , cv = rskf_cv #, cv = 10 , scoring = 'matthews_corrcoef' ) param_grid2 = [ { #'clf__estimator': [LogisticRegression(**rs)], #'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000], 'C': np.logspace(0, 4, 10), 'penalty': ['none', 'l1', 'l2', 'elasticnet'], 'max_iter': list(range(100,800,100)), 'solver': ['saga'] }, { #'clf__estimator': [LogisticRegression(**rs)], #'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000], 'C': np.logspace(0, 4, 10), 'penalty': ['l2', 'none'], 'max_iter': list(range(100,800,100)), 'solver': ['newton-cg', 'lbfgs', 'sag'] }, { #'clf__estimator': [LogisticRegression(**rs)], #'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000], 'C': np.logspace(0, 4, 10), 'penalty': ['l1', 'l2'], 'max_iter': list(range(100,800,100)), 'solver': ['liblinear'] } ] #------------------------------------------------------------------------------- # Grid search CV + FS gscv_lr = GridSearchCV(model_lr , param_grid2 , scoring = mcc_score_fn, refit = 'mcc' , cv = skf_cv , return_train_score = False , verbose = 3 , **njobs) #------------------------------------------------------------------------------ # Create pipeline pipeline = Pipeline([('pre', MinMaxScaler()) #, ('feature_selection', sfs_selector) , ('feature_selection', model_rfecv ) , ('clf', gscv_lr)]) # Fit lr_fs_fit = pipeline.fit(X,y) #lr_fs_fit_be_mod = lr_fs_fit.best_params_ #lr_fs_fit_be_res = lr_fs_fit.cv_results_ dir(lr_fs_fit) print('Best model:\n', lr_fs_fit_be_mod) print('Best models score:\n', lr_fs_fit.best_score_, ':' , round(lr_fs_fit.best_score_, 2)) pipeline.predict(X_bts) lr_fs_fit.predict(X_bts) test_predict = pipeline.predict(X_bts) print(test_predict) print(np.array(y_bts)) #y_btsf = np.array(y_bts) print(accuracy_score(y_bts, test_predict)) print(matthews_corrcoef(y_bts, test_predict)) ###################################### # Blind test ###################################### # See how it does on the BLIND test #print('\nBlind test score, mcc:', )) test_predict = lr_fs_fit.predict(X_bts) print(test_predict) print(np.array(y_bts)) y_btsf = np.array(y_bts) print(accuracy_score(y_bts, test_predict)) print(matthews_corrcoef(y_bts, test_predict)) # create a dict with all scores lr_bts_dict = {#'best_model': list(gscv_lr_fit_be_mod.items()) 'bts_fscore':None , 'bts_mcc':None , 'bts_precision':None , 'bts_recall':None , 'bts_accuracy':None , 'bts_roc_auc':None , 'bts_jaccard':None } lr_bts_dict lr_bts_dict['bts_fscore'] = round(f1_score(y_bts, test_predict),2) lr_bts_dict['bts_mcc'] = round(matthews_corrcoef(y_bts, test_predict),2) lr_bts_dict['bts_precision'] = round(precision_score(y_bts, test_predict),2) lr_bts_dict['bts_recall'] = round(recall_score(y_bts, test_predict),2) lr_bts_dict['bts_accuracy'] = round(accuracy_score(y_bts, test_predict),2) lr_bts_dict['bts_roc_auc'] = round(roc_auc_score(y_bts, test_predict),2) lr_bts_dict['bts_jaccard'] = round(jaccard_score(y_bts, test_predict),2) lr_bts_dict # Create a df from dict with all scores lr_bts_df = pd.DataFrame.from_dict(lr_bts_dict,orient = 'index') lr_bts_df.columns = ['Logistic_Regression'] print(lr_bts_df) # d2 = {'best_model_params': lis(gscv_lr_fit_be_mod.items() )} # d2 # def Merge(dict1, dict2): # res = {**dict1, **dict2} # return res # d3 = Merge(d2, lr_bts_dict) # d3 # Create df with best model params model_params = pd.Series(['best_model_params', list(lr_fs_fit_be_mod.items() )]) model_params_df = model_params.to_frame() model_params_df model_params_df.columns = ['Logistic_Regression'] model_params_df.columns # Combine the df of scores and the best model params lr_bts_df.columns lr_output = pd.concat([model_params_df, lr_bts_df], axis = 0) lr_output # Format the combined df # Drop the best_model_params row from lr_output lr_df = lr_output.drop([0], axis = 0) lr_df #FIXME: tidy the index of the formatted df ###############################################################################