multiple changes

This commit is contained in:
Tanushree Tunstall 2022-07-02 10:25:42 +01:00
parent 2fda32901b
commit dccd3c8eb2
7 changed files with 82 additions and 122 deletions

View file

@ -80,7 +80,7 @@ homedir = os.path.expanduser("~")
sys.path.append(homedir + '/git/LSHTM_analysis/scripts/ml/ml_functions') sys.path.append(homedir + '/git/LSHTM_analysis/scripts/ml/ml_functions')
sys.path sys.path
############################################################################### ###############################################################################
outdir = homedir + '/git/LSHTM_ML/output/combined/ outdir = homedir + '/git/LSHTM_ML/output/combined/'
#==================== #====================
# Import ML functions # Import ML functions

View file

@ -15,13 +15,15 @@ homedir = os.path.expanduser("~")
sys.path.append(homedir + '/git/LSHTM_analysis/scripts/ml/ml_functions') sys.path.append(homedir + '/git/LSHTM_analysis/scripts/ml/ml_functions')
sys.path sys.path
############################################################################### ###############################################################################
outdir = homedir + '/git/LSHTM_ML/output/combined/ outdir = homedir + '/git/LSHTM_ML/output/combined/'
#==================== #====================
# Import ML functions # Import ML functions
#==================== #====================
#from MultClfs import * #from MultClfs import *
from MultClfs_logo_skf import * #from MultClfs_logo_skf import *
from MultClfs_logo_skf_split import *
from GetMLData import * from GetMLData import *
from SplitTTS import * from SplitTTS import *
@ -29,73 +31,59 @@ from SplitTTS import *
from ml_data_combined import * from ml_data_combined import *
############################################################################### ###############################################################################
#ml_genes = ["pncA", "embB", "katG", "rpoB", "gid"] print('\nUsing data with 5 genes:', len(cm_input_df5))
###############################################################################
ml_gene_drugD = {'pncA' : 'pyrazinamide'
, 'embB' : 'ethambutol'
, 'katG' : 'isoniazid'
, 'rpoB' : 'rifampicin'
, 'gid' : 'streptomycin'
}
gene_dataD={}
split_types = ['70_30', '80_20', 'sl'] split_types = ['70_30', '80_20', 'sl']
split_data_types = ['actual', 'complete'] split_data_types = ['actual', 'complete']
for gene, drug in ml_gene_drugD.items(): for split_type in split_types:
print ('\nGene:', gene for data_type in split_data_types:
, '\nDrug:', drug)
gene_low = gene.lower()
gene_dataD[gene_low] = getmldata(gene, drug
, data_combined_model = False # this means it doesn't include 'gene_name' as a feauture as a single gene-target shouldn't have it.
, use_or = False
, omit_all_genomic_features = False
, write_maskfile = False
, write_outfile = False)
for split_type in split_types: out_filename = outdir + 'cm_' + split_type + '_' + data_type + '.csv'
for data_type in split_data_types: print(out_filename)
out_filename = outdir + gene.lower()+ '_' + split_type + '_' + data_type + '.csv' tempD = split_tts(cm_input_df5
tempD=split_tts(gene_dataD[gene_low] , data_type = data_type
, data_type = data_type , split_type = split_type
, split_type = split_type , oversampling = True
, oversampling = True , dst_colname = 'dst'
, dst_colname = 'dst' , target_colname = 'dst_mode'
, target_colname = 'dst_mode' , include_gene_name = True
, include_gene_name = True )
) paramD = {
paramD = { 'baseline_paramD': { 'input_df' : tempD['X']
'baseline_paramD': { 'input_df' : tempD['X'] , 'target' : tempD['y']
, 'target' : tempD['y'] , 'var_type' : 'mixed'
, 'var_type' : 'mixed' , 'resampling_type' : 'none'}
, 'resampling_type': 'none'} , 'smnc_paramD' : { 'input_df' : tempD['X_smnc']
, 'smnc_paramD': { 'input_df' : tempD['X_smnc'] , 'target' : tempD['y_smnc']
, 'target' : tempD['y_smnc'] , 'var_type' : 'mixed'
, 'var_type' : 'mixed' , 'resampling_type' : 'smnc'}
, 'resampling_type' : 'smnc'} , 'ros_paramD' : { 'input_df' : tempD['X_ros']
, 'ros_paramD': { 'input_df' : tempD['X_ros'] , 'target' : tempD['y_ros']
, 'target' : tempD['y_ros'] , 'var_type' : 'mixed'
, 'var_type' : 'mixed' , 'resampling_type' : 'ros'}
, 'resampling_type' : 'ros'} , 'rus_paramD' : { 'input_df' : tempD['X_rus']
, 'rus_paramD' : { 'input_df' : tempD['X_rus'] , 'target' : tempD['y_rus']
, 'target' : tempD['y_rus'] , 'var_type' : 'mixed'
, 'var_type' : 'mixed' , 'resampling_type' : 'rus'}
, 'resampling_type' : 'rus'} , 'rouC_paramD' : { 'input_df' : tempD['X_rouC']
, 'rouC_paramD' : { 'input_df' : tempD['X_rouC'] , 'target' : tempD['y_rouC']
, 'target' : tempD['y_rouC'] , 'var_type' : 'mixed'
, 'var_type' : 'mixed' , 'resampling_type' : 'rouC'}
, 'resampling_type': 'rouC'} }
}
mmDD = {}
mmDD = {} for k, v in paramD.items():
for k, v in paramD.items(): scoresD = MultModelsCl_logo_skf(**paramD[k]
scoresD = MultModelsCl_logo_skf(**paramD[k] XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXX mmDD[k] = scoresD
mmDD[k] = scoresD
# Extracting the dfs from within the dict and concatenating to output as one df # Extracting the dfs from within the dict and concatenating to output as one df
for k, v in mmDD.items(): for k, v in mmDD.items():
out_wf= pd.concat(mmDD, ignore_index = True) out_wf= pd.concat(mmDD, ignore_index = True)
out_wf_f = out_wf.sort_values(by = ['resampling', 'source_data', 'MCC'], ascending = [True, True, False], inplace = False) out_wf_f = out_wf.sort_values(by = ['resampling', 'source_data', 'MCC'], ascending = [True, True, False], inplace = False)
out_wf_f.to_csv(('/home/tanu/git/Data/ml_combined/genes/'+out_filename), index = False) out_wf_f.to_csv(('/home/tanu/git/Data/ml_combined/genes/'+out_filename), index = False)

View file

@ -344,12 +344,7 @@ def MultModelsCl(input_df, target, skf_cv
mm_skf_scoresD[model_name]['bts_roc_auc'] = round(roc_auc_score(blind_test_target, bts_predict),2) mm_skf_scoresD[model_name]['bts_roc_auc'] = round(roc_auc_score(blind_test_target, bts_predict),2)
mm_skf_scoresD[model_name]['bts_jcc'] = round(jaccard_score(blind_test_target, bts_predict),2) mm_skf_scoresD[model_name]['bts_jcc'] = round(jaccard_score(blind_test_target, bts_predict),2)
#mm_skf_scoresD[model_name]['diff_mcc'] = train_test_diff_MCC #mm_skf_scoresD[model_name]['diff_mcc'] = train_test_diff_MCC
#ADD: target numbers for bts
yc2 = Counter(blind_test_target)
yc2_ratio = yc2[0]/yc2[1]
mm_skf_scoresD[model_name]['n_test_size'] = len(blind_test_df)
mm_skf_scoresD[model_name]['n_testY_ratio']= round(yc2_ratio,2)
#return(mm_skf_scoresD) #return(mm_skf_scoresD)
#============================ #============================

View file

@ -357,12 +357,6 @@ def MultModelsCl_logo(input_df
mm_skf_scoresD[model_name]['bts_roc_auc'] = round(roc_auc_score(blind_test_target, bts_predict),2) mm_skf_scoresD[model_name]['bts_roc_auc'] = round(roc_auc_score(blind_test_target, bts_predict),2)
mm_skf_scoresD[model_name]['bts_jcc'] = round(jaccard_score(blind_test_target, bts_predict),2) mm_skf_scoresD[model_name]['bts_jcc'] = round(jaccard_score(blind_test_target, bts_predict),2)
#mm_skf_scoresD[model_name]['diff_mcc'] = train_test_diff_MCC #mm_skf_scoresD[model_name]['diff_mcc'] = train_test_diff_MCC
#ADD: target numbers for bts
yc2 = Counter(blind_test_target)
yc2_ratio = yc2[0]/yc2[1]
mm_skf_scoresD[model_name]['n_test_size'] = len(blind_test_df)
mm_skf_scoresD[model_name]['n_testY_ratio']= round(yc2_ratio,2)
#return(mm_skf_scoresD) #return(mm_skf_scoresD)
#============================ #============================

View file

@ -369,12 +369,7 @@ def MultModelsCl_logo_skf(input_df
mm_skf_scoresD[model_name]['bts_roc_auc'] = round(roc_auc_score(blind_test_target, bts_predict),2) mm_skf_scoresD[model_name]['bts_roc_auc'] = round(roc_auc_score(blind_test_target, bts_predict),2)
mm_skf_scoresD[model_name]['bts_jcc'] = round(jaccard_score(blind_test_target, bts_predict),2) mm_skf_scoresD[model_name]['bts_jcc'] = round(jaccard_score(blind_test_target, bts_predict),2)
#mm_skf_scoresD[model_name]['diff_mcc'] = train_test_diff_MCC #mm_skf_scoresD[model_name]['diff_mcc'] = train_test_diff_MCC
#ADD: target numbers for bts
yc2 = Counter(blind_test_target)
yc2_ratio = yc2[0]/yc2[1]
mm_skf_scoresD[model_name]['n_test_size'] = len(blind_test_df)
mm_skf_scoresD[model_name]['n_testY_ratio']= round(yc2_ratio,2)
#return(mm_skf_scoresD) #return(mm_skf_scoresD)
#============================ #============================

View file

@ -67,4 +67,8 @@ if 'gene_name' in colnames_combined_df:
print("\nGene name included") print("\nGene name included")
else: else:
('\nGene name NOT included') ('\nGene name NOT included')
omit_gene_alr = ['alr']
cm_input_df5 = combined_df[~combined_df['gene_name'].isin(omit_gene_alr)]
############################################################################## ##############################################################################

View file

@ -51,17 +51,17 @@ split_data_types = ['actual', 'complete']
fs_models = [('AdaBoost Classifier' , AdaBoostClassifier(**rs) ) fs_models = [('AdaBoost Classifier' , AdaBoostClassifier(**rs) )
, ('Decision Tree' , DecisionTreeClassifier(**rs) ) , ('Decision Tree' , DecisionTreeClassifier(**rs) )
, ('Extra Tree' , ExtraTreeClassifier(**rs) ) #, ('Extra Tree' , ExtraTreeClassifier(**rs) )
, ('Extra Trees' , ExtraTreesClassifier(**rs) ) #, ('Extra Trees' , ExtraTreesClassifier(**rs) )
, ('Gradient Boosting' , GradientBoostingClassifier(**rs) ) #, ('Gradient Boosting' , GradientBoostingClassifier(**rs) )
, ('LDA' , LinearDiscriminantAnalysis() ) #, ('LDA' , LinearDiscriminantAnalysis() )
, ('Logistic Regression' , LogisticRegression(**rs) ) #, ('Logistic Regression' , LogisticRegression(**rs) )
, ('Logistic RegressionCV' , LogisticRegressionCV(cv = 3, **rs)) #, ('Logistic RegressionCV' , LogisticRegressionCV(cv = 3, **rs))
, ('Passive Aggresive' , PassiveAggressiveClassifier(**rs, **njobs) ) #, ('Passive Aggresive' , PassiveAggressiveClassifier(**rs, **njobs) )
, ('Random Forest' , RandomForestClassifier(**rs, n_estimators = 1000 ) ) #, ('Random Forest' , RandomForestClassifier(**rs, n_estimators = 1000 ) )
, ('Ridge Classifier' , RidgeClassifier(**rs) ) #, ('Ridge Classifier' , RidgeClassifier(**rs) )
, ('Ridge ClassifierCV' , RidgeClassifierCV(cv = 3) ) #, ('Ridge ClassifierCV' , RidgeClassifierCV(cv = 3) )
, ('Stochastic GDescent' , SGDClassifier(**rs, **njobs) ) #, ('Stochastic GDescent' , SGDClassifier(**rs, **njobs) )
] ]
for gene, drug in ml_gene_drugD.items(): for gene, drug in ml_gene_drugD.items():
@ -78,7 +78,7 @@ for gene, drug in ml_gene_drugD.items():
for split_type in split_types: for split_type in split_types:
for data_type in split_data_types: for data_type in split_data_types:
# unused per-split outfile # unused per-split outfile
out_filename = outdir + gene.lower() + '_'+split_type+'_' + data_type + '.json' #out_filename = outdir + gene.lower() + '_'+split_type+'_' + data_type + '.json'
tempD=split_tts(gene_dataD[gene_low] tempD=split_tts(gene_dataD[gene_low]
, data_type = data_type , data_type = data_type
, split_type = split_type , split_type = split_type
@ -122,41 +122,25 @@ for gene, drug in ml_gene_drugD.items():
, '\nModel func:' , model_fn) , '\nModel func:' , model_fn)
#, '\nList of models:', models) #, '\nList of models:', models)
index = index+1 index = index+1
#out_fsD[model_name] = {}
out_fsD[model_name] = {} current_model = {}
# current_model = {}
for k, v in paramD.items(): for k, v in paramD.items():
# out_filename = (gene.lower() + '_' + split_type + '_' + data_type + '_' + k + '.json') out_filename = (gene.lower() + '_' + split_type + '_' + data_type + '_' + model_name + '_' + k + '.json')
fsD_params=paramD[k] fsD_params=paramD[k]
# print("XXXXXX THIS: ", len(fsD_params['input_df']) )
# print("XXXXXX THIS: ", out_filename )
# current_model[k] = fsgs_rfecv( #out_fsD[model_name][k] = fsgs_rfecv(
out_fsD[model_name][k] = fsgs_rfecv( thingg = foo(
)
current_model[k] = fsgs_rfecv(
**fsD_params **fsD_params
, param_gridLd = [{'fs__min_features_to_select': [1]}] , param_gridLd = [{'fs__min_features_to_select': [1]}]
, blind_test_df = tempD['X_bts'] , blind_test_df = tempD['X_bts']
, blind_test_target = tempD['y_bts'] , blind_test_target = tempD['y_bts']
, estimator = model_fn , estimator = model_fn
, use_fs = False # uses estimator as the RFECV parameter for fs. Set to TRUE if you want to supply custom_fs as shown below , use_fs = False # uses estimator as the RFECV parameter for fs. Set to TRUE if you want to supply custom_fs as shown below
# NOTE: IS THIS CORRECT?!?
, custom_fs = RFECV(DecisionTreeClassifier(**rs), cv = skf_cv, scoring = 'matthews_corrcoef') , custom_fs = RFECV(DecisionTreeClassifier(**rs), cv = skf_cv, scoring = 'matthews_corrcoef')
, cv_method = skf_cv , cv_method = skf_cv
) )
# write per-resampler outfile here with open(out_filename, 'w') as f:
# with open(out_filename, 'w') as f: f.write(json.dumps(current_model)
# f.write(json.dumps(current_model
# , cls = NpEncoder )
# )
# write per-split outfile here
with open(out_filename, 'w') as f:
f.write(json.dumps(out_fsD
#, cls = NpEncoder
))
#%%############################################################################
# # Read output json
# testF = outdir + 'pnca_70_30_actual.json'
# testF = outdir + 'pnca_70_30_complete.json'
# with open(testF, 'r') as f:
# data = json.load(f)