#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' Created on Tue Aug 6 12:56:03 2019 @author: tanu ''' #======================================================================= # Task: combining all dfs to a single one # Input: 8 dfs #1) .lower()'_complex_mcsm_norm.csv' #2) .lower()_foldx.csv' #3) .lower()_dssp.csv' #4) .lower()_kd.csv' #5) .lower()_rd.csv' #6) 'ns' + .lower()_snp_info.csv' #7) .lower()_af_or.csv' #8) .lower() _af_or_kinship.csv # combining order #Merge1 = 1 + 2 #Merge2 = 3 + 4 #Merge3 = Merge2 + 5 #Merge4 = Merge1 + Merge3 #Merge5 = 6 + 7 #Merge6 = Merge5 + 8 #Merge7 = Merge4 + Merge6 # Output: single csv of all 8 dfs combined # useful link # https://stackoverflow.com/questions/23668427/pandas-three-way-joining-multiple-dataframes-on-columns #======================================================================= #%% load packages import sys, os import pandas as pd from pandas import DataFrame import numpy as np #from varname import nameof import argparse #======================================================================= #%% specify input and curr dir homedir = os.path.expanduser('~') # set working dir os.getcwd() os.chdir(homedir + '/git/LSHTM_analysis/scripts') os.getcwd() # FIXME: local imports #from combining import combine_dfs_with_checks from combining_FIXME import detect_common_cols from reference_dict import oneletter_aa_dict # CHECK DIR STRUC THERE! #======================================================================= #%% command line args arg_parser = argparse.ArgumentParser() arg_parser.add_argument('-d', '--drug', help='drug name', default = 'pyrazinamide') arg_parser.add_argument('-g', '--gene', help='gene name', default = 'pncA') # case sensitive arg_parser.add_argument('--datadir', help = 'Data Directory. By default, it assmumes homedir + git/Data') arg_parser.add_argument('-i', '--input_dir', help = 'Input dir containing pdb files. By default, it assmumes homedir + + input') arg_parser.add_argument('-o', '--output_dir', help = 'Output dir for results. By default, it assmes homedir + + output') arg_parser.add_argument('--debug', action ='store_true', help = 'Debug Mode') args = arg_parser.parse_args() #======================================================================= #%% variable assignment: input and output #drug = 'pyrazinamide' #gene = 'pncA' gene_match = gene + '_p.' drug = args.drug gene = args.gene datadir = args.datadir indir = args.input_dir outdir = args.output_dir #%%======================================================================= #============== # directories #============== if not datadir: datadir = homedir + '/' + 'git/Data' if not indir: indir = datadir + '/' + drug + '/input' if not outdir: outdir = datadir + '/' + drug + '/output' #======= # input #======= in_filename_mcsm = gene.lower() + '_complex_mcsm_norm.csv' in_filename_foldx = gene.lower() + '_foldx.csv' in_filename_dssp = gene.lower() + '_dssp.csv' in_filename_kd = gene.lower() + '_kd.csv' in_filename_rd = gene.lower() + '_rd.csv' in_filename_snpinfo = 'ns' + gene.lower() + '_snp_info_f.csv' in_filename_afor = gene.lower() + '_af_or.csv' in_filename_afor_kin = gene.lower() + '_af_or_kinship.csv' infile_mcsm = outdir + '/' + in_filename_mcsm infile_foldx = outdir + '/' + in_filename_foldx infile_dssp = outdir + '/' + in_filename_dssp infile_kd = outdir + '/' + in_filename_kd infile_rd = outdir + '/' + in_filename_rd infile_snpinfo = outdir + '/' + in_filename_snpinfo infile_afor = outdir + '/' + in_filename_afor infile_afor_kin = outdir + '/' + in_filename_afor_kin print('\nInput path:', indir , '\nOutput path:', outdir, '\n' , '\nInput filename mcsm:', infile_mcsm , '\nInput filename foldx:', infile_foldx, '\n' , '\nInput filename dssp:', infile_dssp , '\nInput filename kd:', infile_kd , '\nInput filename rd', infile_rd , '\n' , '\nInput filename snp info:', infile_snpinfo, '\n' , '\nInput filename af or:', infile_afor , '\nInput filename afor kinship:', infile_afor_kin , '\n============================================================') #======= # output #======= out_filename_comb = gene.lower() + '_all_params.csv' outfile_comb = outdir + '/' + out_filename_comb print('Output filename:', outfile_comb , '\n===================================================================') o_join = 'outer' l_join = 'left' r_join = 'right' i_join = 'inner' # end of variable assignment for input and output files #%%============================================================================ print('===================================' , '\nFirst merge: mcsm + foldx' , '\n===================================') mcsm_df = pd.read_csv(infile_mcsm, sep = ',') #mcsm_df.columns = mcsm_df.columns.str.lower() foldx_df = pd.read_csv(infile_foldx , sep = ',') #mcsm_foldx_dfs = combine_dfs_with_checks(mcsm_df, foldx_df, my_join = o_join) merging_cols_m1 = detect_common_cols(mcsm_df, foldx_df) mcsm_foldx_dfs = pd.merge(mcsm_df, foldx_df, on = merging_cols_m1, how = o_join) ncols_m1 = len(mcsm_foldx_dfs.columns) print('\n\nResult of first merge:', mcsm_foldx_dfs.shape , '\n===================================================================') mcsm_foldx_dfs[merging_cols_m1].apply(len) mcsm_foldx_dfs[merging_cols_m1].apply(len) == len(mcsm_foldx_dfs) #%%============================================================================ print('===================================' , '\nSecond merge: dssp + kd' , '\n===================================') dssp_df = pd.read_csv(infile_dssp, sep = ',') kd_df = pd.read_csv(infile_kd, sep = ',') rd_df = pd.read_csv(infile_rd, sep = ',') #dssp_kd_dfs = combine_dfs_with_checks(dssp_df, kd_df, my_join = o_join) merging_cols_m2 = detect_common_cols(dssp_df, kd_df) dssp_kd_dfs = pd.merge(dssp_df, kd_df, on = merging_cols_m2, how = o_join) print('\n\nResult of second merge:', dssp_kd_dfs.shape , '\n===================================================================') #%%============================================================================ print('===================================' , '\nThird merge: second merge + rd_df' , '\ndssp_kd_dfs + rd_df' , '\n===================================') #dssp_kd_rd_dfs = combine_dfs_with_checks(dssp_kd_dfs, rd_df, my_join = o_join) merging_cols_m3 = detect_common_cols(dssp_df, kd_df) dssp_kd_rd_dfs = pd.merge(dssp_kd_dfs, rd_df, on = merging_cols_m3, how = o_join) ncols_m3 = len(dssp_kd_rd_dfs.columns) print('\n\nResult of Third merge:', dssp_kd_rd_dfs.shape , '\n===================================================================') dssp_kd_rd_dfs[merging_cols_m3].apply(len) dssp_kd_rd_dfs[merging_cols_m3].apply(len) == len(dssp_kd_rd_dfs) #%%============================================================================ print('=======================================' , '\nFourth merge: First merge + Third merge' , '\nmcsm_foldx_dfs + dssp_kd_rd_dfs' , '\n=======================================') #combined_df = combine_dfs_with_checks(mcsm_foldx_dfs, dssp_kd_rd_dfs, my_join = i_join) merging_cols_m4 = detect_common_cols(mcsm_foldx_dfs, dssp_kd_rd_dfs) combined_df = pd.merge(mcsm_foldx_dfs, dssp_kd_rd_dfs, on = merging_cols_m4, how = i_join) combined_df_expected_cols = ncols_m1 + ncols_m3 - len(merging_cols_m4) if len(combined_df) == len(mcsm_df) and len(combined_df.columns) == combined_df_expected_cols: print('PASS: successfully combined 5 dfs' , '\nNo. of rows combined_df:', len(combined_df) , '\nNo. of cols combined_df:', len(combined_df.columns)) else: sys.exit('FAIL: check individual df merges') print('\nResult of Fourth merge:', combined_df.shape , '\n===================================================================') combined_df[merging_cols_m4].apply(len) combined_df[merging_cols_m4].apply(len) == len(combined_df) #%%============================================================================ # OR merges: TEDIOUSSSS!!!! #%% print('===================================' , '\nFifth merge: afor_df + afor_kin_df' , '\n===================================') # OR combining afor_df = pd.read_csv(infile_afor, sep = ',') #afor_df.columns = afor_df.columns.str.lower() afor_kin_df = pd.read_csv(infile_afor_kin, sep = ',') afor_kin_df.columns = afor_kin_df.columns.str.lower() merging_cols_m5 = detect_common_cols(afor_df, afor_kin_df) print('Dim of afor_df:', afor_df.shape , '\nDim of afor_kin_df:', afor_kin_df.shape) # finding if ALL afor_kin_df muts are present in afor_df # i.e all kinship muts should be PRESENT in mycalcs_present if len(afor_kin_df[afor_kin_df['mutation'].isin(afor_df['mutation'])]) == afor_kin_df.shape[0]: print('PASS: ALL or_kinship muts are present in my or list') else: nf_muts = len(afor_kin_df[~afor_kin_df['mutation'].isin(afor_df['mutation'])]) nf_muts_df = afor_kin_df[~afor_kin_df['mutation'].isin(afor_df['mutation'])] print('FAIL:', nf_muts, 'muts present in afor_kin_df NOT present in my or list' , '\nsee "nf_muts_df" created containing not found(nf) muts') sys.exit() # Now checking how many afor_df muts are NOT present in afor_kin_df common_muts = len(afor_df[afor_df['mutation'].isin(afor_kin_df['mutation'])]) extra_muts_myor = afor_kin_df.shape[0] - common_muts print('==========================================' , '\nmy or calcs has', extra_muts_myor, 'extra mutations' , '\n==========================================') print('Expected cals for merging with outer_join...') expected_rows = afor_df.shape[0] + extra_muts_myor expected_cols = afor_df.shape[1] + afor_kin_df.shape[1] - len(merging_cols_m5) ors_df = pd.merge(afor_df, afor_kin_df, on = merging_cols_m5, how = o_join) if ors_df.shape[0] == expected_rows and ors_df.shape[1] == expected_cols: print('PASS: OR dfs successfully combined! PHEWWWW!') else: print('FAIL: could not combine OR dfs' , '\nCheck expected rows and cols calculation and join type') print('Dim of merged ors_df:', ors_df.shape) ors_df[merging_cols_m5].apply(len) ors_df[merging_cols_m5].apply(len) == len(ors_df) #%%============================================================================ # formatting ors_df ors_df.columns # Dropping unncessary columns: already removed in ealier preprocessing #cols_to_drop = ['reference_allele', 'alternate_allele', 'symbol' ] cols_to_drop = ['n_miss'] print('Dropping', len(cols_to_drop), 'columns:\n' , cols_to_drop) ors_df.drop(cols_to_drop, axis = 1, inplace = True) print('Reordering', ors_df.shape[1], 'columns' , '\n===============================================') cols = ors_df.columns column_order = ['mutation' , 'mutationinformation' , 'wild_type' , 'position' , 'mutant_type' #, 'chr_num_allele' #old , 'ref_allele' , 'alt_allele' , 'mut_info_f1' , 'mut_info_f2' , 'mut_type' , 'gene_id' #, 'gene_number' #old , 'gene_name' #, 'mut_region' #, 'reference_allele' #, 'alternate_allele' , 'chromosome_number' , 'af' , 'af_kin' , 'est_chisq' , 'or_mychisq' , 'or_fisher' , 'or_logistic' , 'or_kin' , 'pval_chisq' , 'pval_fisher' , 'pval_logistic' , 'pwald_kin' , 'ci_low_fisher' , 'ci_hi_fisher' , 'ci_low_logistic' , 'ci_hi_logistic' , 'beta_logistic' , 'beta_kin' , 'se_logistic' , 'se_kin' , 'zval_logistic' , 'logl_h1_kin' , 'l_remle_kin' #, 'wt_3let' # old #, 'mt_3let' # old #, 'symbol' #, 'n_miss' ] if ( (len(column_order) == ors_df.shape[1]) and (DataFrame(column_order).isin(ors_df.columns).all().all())): print('PASS: Column order generated for all:', len(column_order), 'columns' , '\nColumn names match, safe to reorder columns' , '\nApplying column order to df...' ) ors_df_ordered = ors_df[column_order] else: print('FAIL: Mismatch in no. of cols to reorder' , '\nNo. of cols in df to reorder:', ors_df.shape[1] , '\nNo. of cols order generated for:', len(column_order)) sys.exit() print('\nResult of Sixth merge:', ors_df_ordered.shape , '\n===================================================================') #%%============================================================================ print('===================================' , '\nSixth merge: Fourth + Fifth merge' , '\ncombined_df + ors_df_ordered' , '\n===================================') #combined_df_all = combine_dfs_with_checks(combined_df, ors_df_ordered, my_join = i_join) merging_cols_m6 = detect_common_cols(combined_df, ors_df_ordered) print('Dim of df1:', combined_df.shape , '\nDim of df2:', ors_df_ordered.shape , '\nNo. of merging_cols:', len(merging_cols_m6)) print('Checking mutations in the two dfs:' , '\nmuts in df1 but NOT in df2:' , combined_df['mutationinformation'].isin(ors_df_ordered['mutationinformation']).sum() , '\nmuts in df2 but NOT in df1:' , ors_df_ordered['mutationinformation'].isin(combined_df['mutationinformation']).sum()) #print('\nNo. of common muts:', np.intersect1d(combined_df['mutationinformation'], ors_df_ordered['mutationinformation']) ) combined_df_all = pd.merge(combined_df, ors_df, on = merging_cols_m6, how = l_join) combined_df_all.shape # populate mut_info_f1 combined_df_all['mut_info_f1'].isna().sum() combined_df_all['mut_info_f1'] = combined_df_all['position'].astype(str) + combined_df_all['wild_type'] + '>' + combined_df_all['position'].astype(str) + combined_df_all['mutant_type'] combined_df_all['mut_info_f1'].isna().sum() # populate mut_type combined_df_all['mut_type'].isna().sum() #mut_type_word = combined_df_all['mut_type'].value_counts() mut_type_word = 'missense' # FIXME, should be derived combined_df_all['mut_type'].fillna(mut_type_word, inplace = True) combined_df_all['mut_type'].isna().sum() # populate gene_id combined_df_all['gene_id'].isna().sum() #gene_id_word = combined_df_all['gene_id'].value_counts() gene_id_word = 'Rv2043c' # FIXME, should be derived combined_df_all['gene_id'].fillna(gene_id_word, inplace = True) combined_df_all['gene_id'].isna().sum() # populate gene_name combined_df_all['gene_name'].isna().sum() combined_df_all['gene_name'].value_counts() combined_df_all['gene_name'].fillna(gene, inplace = True) combined_df_all['gene_name'].isna().sum() # FIXME: DIM # only with left join! outdf_expected_rows = len(combined_df) outdf_expected_cols = len(combined_df.columns) + len(ors_df_ordered.columns) - len(merging_cols_m6) #if combined_df_all.shape[1] == outdf_expected_cols and combined_df_all.shape[0] == outdf_expected_rows: if combined_df_all.shape[1] == outdf_expected_cols and combined_df_all['mutationinformation'].nunique() == outdf_expected_rows: print('PASS: Df dimension match' , '\nDim of combined_df_all with join type:', l_join , '\n', combined_df_all.shape , '\n===============================================================') else: print('FAIL: Df dimension mismatch' , 'Cannot generate expected dim. See details of merge performed' , '\ndf1 dim:', combined_df.shape , '\ndf2 dim:', ors_df.shape , '\nGot:', combined_df_all.shape , '\nmuts in df1 but NOT in df2:' , combined_df['mutationinformation'].isin(ors_df['mutationinformation']).sum() , '\nmuts in df2 but NOT in df1:' , ors_df['mutationinformation'].isin(combined_df['mutationinformation']).sum()) sys.exit() #%% IMPORTANT: check if mutation related info is all populated after this merge # FIXME: should get fixed with JP's resolved dataset!? check_nan = combined_df_all.isna().sum(axis = 0) # relevant mut cols check_mut_cols = merging_cols_m5 + merging_cols_m6 count_na_mut_cols = combined_df_all[check_mut_cols].isna().sum().reset_index().rename(columns = {'index': 'col_name', 0: 'na_count'}) if (count_na_mut_cols['na_count'].sum() > 0).any(): # FIXME: static override, generate 'mutation' from variable na_muts_n = combined_df_all['mutation'].isna().sum() baz = combined_df_all[combined_df_all['mutation'].isna()] baz = baz[check_mut_cols] print(na_muts_n, 'mutations have missing \'mutation\' info.' , '\nFetching these from reference dict...') lookup_dict = dict() for k, v in oneletter_aa_dict.items(): lookup_dict[k] = v['three_letter_code_lower'] print(lookup_dict) wt_3let = combined_df_all['wild_type'].map(lookup_dict).str.capitalize() #print(wt_3let) pos = combined_df_all['position'].astype(str) #print(pos) mt_3let = combined_df_all['mutant_type'].map(lookup_dict).str.capitalize() #print(mt_3let) baz['mutation'] = 'pnca_p.' + wt_3let + pos + mt_3let print(combined_df_all['mutation']) # populate mut_info_f2 combined_df_all['mut_info_f2'] = combined_df_all['mutation'].str.replace(gene_match.lower(), 'p.', regex = True) #%% merge #merging_cols_m7 = detect_common_cols(combined_df_all, baz) baz2 = baz[['mutationinformation', 'mut_info_f2']] baz2 = baz2.drop_duplicates() merging_cols_m7 = detect_common_cols(combined_df_all, baz2) combined_df_all2 = pd.merge(combined_df_all, baz2 #, on = merging_cols_m7 , on = 'mutationinformation' , how = o_join) #%%============================================================================ output_cols = combined_df_all.columns print('Output cols:', output_cols) #%%============================================================================ # write csv print('Writing file: combined output of all params needed for plotting and ML') combined_df_all.to_csv(outfile_comb, index = False) print('\nFinished writing file:' , '\nNo. of rows:', combined_df_all.shape[0] , '\nNo. of cols:', combined_df_all.shape[1]) #======================================================================= #%% incase you FIX the the function: combine_dfs_with_checks #def main(): # print('Reading input files:') #mcsm_df = pd.read_csv(infile_mcsm, sep = ',') #mcsm_df.columns = mcsm_df.columns.str.lower() #foldx_df = pd.read_csv(infile_foldx , sep = ',') #dssp_df = pd.read_csv(infile_dssp, sep = ',') #dssp_df.columns = dssp_df.columns.str.lower() #kd_df = pd.read_csv(infile_kd, sep = ',') #kd_df.columns = kd_df.columns.str.lower() #rd_df = pd.read_csv(infile_kd, sep = ',') #if __name__ == '__main__': # main() #======================================================================= #%% end of script