#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed May 18 06:03:24 2022 @author: tanu """ #%% RandomForest + hyperparam: BaseEstimator: ClfSwitcher() class ClfSwitcher(BaseEstimator): def __init__( self, estimator = SGDClassifier(), ): """ A Custom BaseEstimator that can switch between classifiers. :param estimator: sklearn object - The classifier """ self.estimator = estimator def fit(self, X, y=None, **kwargs): self.estimator.fit(X, y) return self def predict(self, X, y=None): return self.estimator.predict(X) def predict_proba(self, X): return self.estimator.predict_proba(X) def score(self, X, y): return self.estimator.score(X, y) parameters = [ { 'clf__estimator': [GaussianNB()] , 'clf__estimator__priors': [None] , 'clf__estimator__var_smoothing': np.logspace(0,-9, num=100) } ] # Create pipeline pipeline = Pipeline([ ('pre', MinMaxScaler()), ('clf', ClfSwitcher()), ]) # Grid search i.e hyperparameter tuning and refitting on mcc gscv_gnb = GridSearchCV(pipeline , parameters #, scoring = 'f1', refit = 'f1' , scoring = mcc_score_fn, refit = 'mcc' , cv = skf_cv , **njobs , return_train_score = False , verbose = 3) # Fit gscv_gnb_fit = gscv_gnb.fit(X, y) gscv_gnb_fit_be_mod = gscv_gnb_fit.best_params_ gscv_gnb_fit_be_res = gscv_gnb_fit.cv_results_ print('Best model:\n', gscv_gnb_fit_be_mod) print('Best models score:\n', gscv_gnb_fit.best_score_, ':' , round(gscv_gnb_fit.best_score_, 2)) print('\nMean test score from fit results:', round(mean(gscv_gnb_fit_be_res['mean_test_mcc']),2)) print('\nMean test score from fit results:', round(np.nanmean(gscv_gnb_fit_be_res['mean_test_mcc']),2)) ###################################### # Blind test ###################################### # See how it does on the BLIND test #print('\nBlind test score, mcc:', ) test_predict = gscv_gnb_fit.predict(X_bts) print(test_predict) print(np.array(y_bts)) y_btsf = np.array(y_bts) print(accuracy_score(y_btsf, test_predict)) print(matthews_corrcoef(y_btsf, test_predict)) # create a dict with all scores gnb_bts_dict = {#'best_model': list(gscv_gnb_fit_be_mod.items()) 'bts_fscore' : None , 'bts_mcc' : None , 'bts_precision': None , 'bts_recall' : None , 'bts_accuracy' : None , 'bts_roc_auc' : None , 'bts_jaccard' : None } gnb_bts_dict gnb_bts_dict['bts_fscore'] = round(f1_score(y_bts, test_predict),2) gnb_bts_dict['bts_mcc'] = round(matthews_corrcoef(y_bts, test_predict),2) gnb_bts_dict['bts_precision'] = round(precision_score(y_bts, test_predict),2) gnb_bts_dict['bts_recall'] = round(recall_score(y_bts, test_predict),2) gnb_bts_dict['bts_accuracy'] = round(accuracy_score(y_bts, test_predict),2) gnb_bts_dict['bts_roc_auc'] = round(roc_auc_score(y_bts, test_predict),2) gnb_bts_dict['bts_jaccard'] = round(jaccard_score(y_bts, test_predict),2) gnb_bts_dict # Create a df from dict with all scores gnb_bts_df = pd.DataFrame.from_dict(gnb_bts_dict,orient = 'index') gnb_bts_df.columns = ['GNB'] print(gnb_bts_df) # Create df with best model params model_params = pd.Series(['best_model_params', list(gscv_gnb_fit_be_mod.items() )]) model_params_df = model_params.to_frame() model_params_df model_params_df.columns = ['GNB'] model_params_df.columns # Combine the df of scores and the best model params gnb_bts_df.columns gnb_output = pd.concat([model_params_df, gnb_bts_df], axis = 0) gnb_output # Format the combined df # Drop the best_model_params row from gnb_output gnb_df = gnb_output.drop([0], axis = 0) gnb_df #FIXME: tidy the index of the formatted df ###############################################################################