From 9534fc57d49220b6fdaccb10a11a4a9f41b205df Mon Sep 17 00:00:00 2001 From: Tanushree Tunstall Date: Mon, 21 Jun 2021 11:53:56 +0100 Subject: [PATCH] added deepddg data to combining_df.py --- scripts/combining_dfs.py | 61 ++++++++++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 8 deletions(-) diff --git a/scripts/combining_dfs.py b/scripts/combining_dfs.py index ee6214a..ccb3ff5 100755 --- a/scripts/combining_dfs.py +++ b/scripts/combining_dfs.py @@ -131,7 +131,7 @@ in_filename_foldx = gene.lower() + '_foldx.csv' in_filename_dssp = gene.lower() + '_dssp.csv' in_filename_kd = gene.lower() + '_kd.csv' in_filename_rd = gene.lower() + '_rd.csv' -#in_filename_deepddg = gene.lower() + '_complex_ddg_results.txt' # change to decent filename and put it in the correct dir +in_filename_deepddg = gene.lower() + '_complex_ddg_results.txt' # change to decent filename and put it in the correct dir in_filename_snpinfo = 'ns' + gene.lower() + '_snp_info_f.csv' # gwas f info in_filename_afor = gene.lower() + '_af_or.csv' @@ -143,7 +143,7 @@ infile_foldx = outdir + in_filename_foldx infile_dssp = outdir + in_filename_dssp infile_kd = outdir + in_filename_kd infile_rd = outdir + in_filename_rd -#infile_deepddg = outdir + in_filename_deepddg +infile_deepddg = outdir + 'deep_ddg/' + in_filename_deepddg infile_snpinfo = outdir + '/' + in_filename_snpinfo infile_afor = outdir + '/' + in_filename_afor @@ -203,6 +203,37 @@ print('\n\nResult of first merge:', mcsm_foldx_dfs.shape , '\n===================================================================') mcsm_foldx_dfs[merging_cols_m1].apply(len) mcsm_foldx_dfs[merging_cols_m1].apply(len) == len(mcsm_foldx_dfs) +#%% +print('===================================' + , '\nSecond merge: mcsm_foldx_dfs + deepddg' + , '\n===================================') + +deepddg_df = pd.read_csv(infile_deepddg, sep = ' ') +deepddg_df.columns + +deepddg_df.rename(columns = {'#chain' : 'chain_id' + , 'WT' : 'wild_type_deepddg' + , 'ResID' : 'position' + , 'Mut' : 'mutant_type_deepddg'} + , inplace = True) + +deepddg_df['mutationinformation'] = deepddg_df['wild_type_deepddg'] + deepddg_df['position'].map(str) + deepddg_df['mutant_type_deepddg'] + +# add deepddg outcome column: <0--> Destabilising, >0 --> Stabilising +deepddg_df['deepddg_outcome'] = np.where(deepddg_df['deepddg'] < 0, 'Destabilising', 'Stabilising') +deepddg_df['deepddg_outcome'].value_counts() + +# drop extra columns to allow clean merging +deepddg_short_df = deepddg_df.drop(['chain_id', 'wild_type_deepddg', 'position', 'mutant_type_deepddg'], axis = 1) + +# rearrange columns +deepddg_short_df.columns +deepddg_short_df = deepddg_short_df[["mutationinformation", "deepddg", "deepddg_outcome"]] + +mcsm_foldx_deepddg_dfs = pd.merge(mcsm_foldx_dfs, deepddg_short_df, on = 'mutationinformation', how = l_join) +mcsm_foldx_deepddg_dfs['deepddg_outcome'].value_counts() + +ncols_deepddg_merge = len(mcsm_foldx_deepddg_dfs.columns) #%%============================================================================ print('===================================' , '\nSecond merge: dssp + kd' @@ -240,10 +271,15 @@ print('=======================================' , '\nmcsm_foldx_dfs + dssp_kd_rd_dfs' , '\n=======================================') #combined_df = combine_dfs_with_checks(mcsm_foldx_dfs, dssp_kd_rd_dfs, my_join = i_join) -merging_cols_m4 = detect_common_cols(mcsm_foldx_dfs, dssp_kd_rd_dfs) -combined_df = pd.merge(mcsm_foldx_dfs, dssp_kd_rd_dfs, on = merging_cols_m4, how = i_join) +#merging_cols_m4 = detect_common_cols(mcsm_foldx_dfs, dssp_kd_rd_dfs) +#combined_df = pd.merge(mcsm_foldx_dfs, dssp_kd_rd_dfs, on = merging_cols_m4, how = i_join) +#combined_df_expected_cols = ncols_m1 + ncols_m3 - len(merging_cols_m4) -combined_df_expected_cols = ncols_m1 + ncols_m3 - len(merging_cols_m4) +# with deepddg values +merging_cols_m4 = detect_common_cols(mcsm_foldx_deepddg_dfs, dssp_kd_rd_dfs) +combined_df = pd.merge(mcsm_foldx_deepddg_dfs, dssp_kd_rd_dfs, on = merging_cols_m4, how = i_join) + +combined_df_expected_cols = ncols_deepddg_merge + ncols_m3 - len(merging_cols_m4) if len(combined_df) == len(mcsm_df) and len(combined_df.columns) == combined_df_expected_cols: print('PASS: successfully combined 5 dfs' @@ -256,15 +292,24 @@ print('\nResult of Fourth merge:', combined_df.shape , '\n===================================================================') combined_df[merging_cols_m4].apply(len) combined_df[merging_cols_m4].apply(len) == len(combined_df) - #%%============================================================================ +# Format the combined df columns +combined_df_colnames = combined_df.columns -#deepddg_df = pd.read_csv(infile_deepddg, sep = ' ') +# check redundant columns +combined_df['chain'].equals(combined_df['chain_id']) +combined_df['wild_type'].equals(combined_df['wild_type_kd']) # has nan +combined_df['wild_type'].equals(combined_df['wild_type_dssp']) +#sanity check +foo = combined_df[['wild_type', 'wild_type_kd', 'wt_3letter_caps', 'wt_aa_3lower', 'mut_aa_3lower']] +# Drop cols +cols_to_drop = ['chain_id', 'wild_type_kd', 'wild_type_dssp', 'wt_3letter_caps' ] +combined_df_clean = combined_df.drop(cols_to_drop, axis = 1) +del(foo) #%%============================================================================ # Output columns - out_filename_stab_struc = gene.lower() + '_comb_stab_struc_params.csv' outfile_stab_struc = outdir + '/' + out_filename_stab_struc print('Output filename:', outfile_stab_struc