ML_AI_training/imports.py

245 lines
No EOL
7.9 KiB
Python
Executable file

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 6 13:41:54 2022
@author: tanu
"""
import os, sys
import pandas as pd
import numpy as np
import pprint as pp
from copy import deepcopy
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neural_network import MLPClassifier
from xgboost import XGBClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.compose import make_column_transformer
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score
from sklearn.metrics import roc_auc_score, roc_curve, f1_score, matthews_corrcoef
from sklearn.metrics import jaccard_score
from sklearn.metrics import make_scorer
from sklearn.metrics import classification_report
from sklearn.metrics import average_precision_score
from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import RFE
from sklearn.feature_selection import RFECV
import itertools
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
print(np.__version__)
print(pd.__version__)
from statistics import mean, stdev, median, mode
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import Pipeline
#from sklearn.datasets import make_classification
from sklearn.model_selection import cross_validate, cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.ensemble import AdaBoostClassifier
from imblearn.combine import SMOTEENN
from imblearn.under_sampling import EditedNearestNeighbours
from sklearn.model_selection import GridSearchCV
from sklearn.base import BaseEstimator
scoring_fn = ({'accuracy' : make_scorer(accuracy_score)
, 'fscore' : make_scorer(f1_score)
, 'mcc' : make_scorer(matthews_corrcoef)
, 'precision' : make_scorer(precision_score)
, 'recall' : make_scorer(recall_score)
, 'roc_auc' : make_scorer(roc_auc_score)
, 'jcc' : make_scorer(jaccard_score)
})
rs = {'random_state': 42}
njobs = {'n_jobs': 10}
skf_cv = StratifiedKFold(n_splits = 10
#, shuffle = False, random_state= None)
, shuffle = True,**rs)
rskf_cv = RepeatedStratifiedKFold(n_splits = 10
, n_repeats=3
#, shuffle = False, random_state= None)
#, shuffle = True
,**rs)
#my_mcc = make_scorer({'mcc':make_scorer(matthews_corrcoef})
mcc_score_fn = {'mcc': make_scorer(matthews_corrcoef)}
#%%
homedir = os.path.expanduser("~")
os.chdir(homedir + "/git/ML_AI_training/")
# my function
from MultClassPipe import MultClassPipeline
from MultClassPipe2 import MultClassPipeline2
from loopity_loop import MultClassPipeSKFLoop
from MultClassPipe3 import MultClassPipeSKFCV
#gene = 'pncA'
#drug = 'pyrazinamide'
#==============
# directories
#==============
datadir = homedir + '/git/Data/'
indir = datadir + drug + '/input/'
outdir = datadir + drug + '/output/'
#=======
# input
#=======
infile_ml1 = outdir + gene.lower() + '_merged_df3.csv'
#infile_ml2 = outdir + gene.lower() + '_merged_df2.csv'
my_df = pd.read_csv(infile_ml1)
my_df.dtypes
my_df_cols = my_df.columns
geneL_basic = ['pncA']
geneL_na = ['gid']
geneL_na_ppi2 = ['rpoB']
geneL_ppi2 = ['alr', 'embB', 'katG']
#%% get cols
mycols = my_df.columns
# change from numberic to
num_type = ['int64', 'float64']
cat_type = ['object', 'bool']
if my_df['active_aa_pos'].dtype in num_type:
my_df['active_aa_pos'] = my_df['active_aa_pos'].astype(object)
my_df['active_aa_pos'].dtype
# FIXME: if this is not structural, remove from source..
# Drop NA where numerical cols have them
if gene.lower() in geneL_na_ppi2:
#D1148 get rid of
na_index = my_df['mutationinformation'].index[my_df['mcsm_na_affinity'].apply(np.isnan)]
my_df = my_df.drop(index=na_index)
# FIXME: either impute or remove!
# for embb (L114M, F115L, V123L, V125I, V131M) delete for now
if gene.lower() in ['embb']:
na_index = my_df['mutationinformation'].index[my_df['ligand_distance'].apply(np.isnan)]
my_df = my_df.drop(index=na_index)
#%%============================================================================
# Target1: mutation_info_labels, convert to
dm_om_map = {'DM': 1, 'OM': 0} # pnca, OM is minority, other genes: DM is minority
my_df['mutation_class'] = my_df['mutation_info_labels'].map(dm_om_map)
my_df['mutation_class'].value_counts()
my_df['mutation_info_labels']. value_counts()
#%%
# GET X
common_cols_stabiltyN = ['ligand_distance'
, 'ligand_affinity_change'
, 'duet_stability_change'
, 'ddg_foldx'
, 'deepddg'
, 'ddg_dynamut2']
# Build stability columns ~ gene
if gene.lower() in geneL_basic:
x_stabilityN = common_cols_stabiltyN
if gene.lower() in geneL_ppi2:
x_stabilityN = common_cols_stabiltyN + ['mcsm_ppi2_affinity'
, 'interface_dist']
if gene.lower() in geneL_na:
x_stabilityN = common_cols_stabiltyN + ['mcsm_na_affinity']
if gene.lower() in geneL_na_ppi2:
x_stabilityN = common_cols_stabiltyN + ['mcsm_na_affinity'] + ['mcsm_ppi2_affinity', 'interface_dist']
X_strFN = ['asa'
, 'rsa'
, 'kd_values'
, 'rd_values']
X_evolFN = ['consurf_score'
, 'snap2_score'
, 'snap2_accuracy_pc']
# X_genomicFN = ['af'
# , 'or_mychisq'
# , 'or_logistic'
# , 'or_fisher'
# , 'pval_fisher']
#%% Construct numerical and categorical column names
numerical_FN = x_stabilityN + X_strFN + X_evolFN
# separate ones for foldx?
categorical_FN = ['ss_class'
, 'wt_prop_water'
# , 'lineage_labels' # misleading if using merged_df3
, 'mut_prop_water'
, 'wt_prop_polarity'
, 'mut_prop_polarity'
, 'wt_calcprop'
, 'mut_calcprop'
, 'active_aa_pos']
#%% extracting dfs based on numerical, categorical column names
#----------------------------------
# WITHOUT the target var included
#----------------------------------
num_df = my_df[numerical_FN]
num_df.shape
cat_df = my_df[categorical_FN]
cat_df.shape
all_df = my_df[numerical_FN + categorical_FN]
all_df.shape
#------------------------------
# WITH the target var included:
#'wtgt': with target
#------------------------------
num_df_wtgt = my_df[numerical_FN + ['mutation_class']]
num_df_wtgt.shape
cat_df_wtgt = my_df[categorical_FN + ['mutation_class']]
cat_df_wtgt.shape
all_df_wtgt = my_df[numerical_FN + categorical_FN + ['mutation_class']]
all_df_wtgt.shape
#%%
#%% Get train-test split and scoring functions
X = num_df_wtgt[numerical_FN]
y = num_df_wtgt['mutation_class']
X_train, X_test, y_train, y_test = train_test_split(X
,y
, test_size = 0.33
, random_state = 2
, shuffle = True
, stratify = y)