combined and output all ors

This commit is contained in:
Tanushree Tunstall 2020-06-23 17:34:54 +01:00
parent d8b272b0ae
commit a9498f8e08
2 changed files with 185 additions and 144 deletions

View file

@ -34,18 +34,19 @@ from reference_dict import my_aa_dict # CHECK DIR STRUC THERE!
#=======================================================================
#%% command line args
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-d', '--drug', help = 'drug name', default = None)
arg_parser.add_argument('-g', '--gene', help = 'gene name', default = None) # case sensitive
arg_parser.add_argument('-d', '--drug', help = 'drug name', default = 'pyrazinamide')
arg_parser.add_argument('-g', '--gene', help = 'gene name', default = 'pncA') # case sensitive
args = arg_parser.parse_args()
#=======================================================================
#%% variable assignment: input and output
drug = 'pyrazinamide'
gene = 'pncA'
gene_match = gene + '_p.'
#drug = 'pyrazinamide'
#gene = 'pncA'
#gene_match = gene + '_p.'
# cmd variables
#drug = args.drug
#gene = args.gene
drug = args.drug
gene = args.gene
gene_match = gene + '_p.'
#==========
# dir
@ -57,9 +58,7 @@ outdir = datadir + '/' + drug + '/' + 'output'
# input
#=======
in_filename_afor = gene.lower() + '_af_or.csv'
# FIXME
in_filename_afor_kin = gene.lower() + '_af_or_kinship.csv'
# needs to contain OR. it only has beta!
infile1 = outdir + '/' + in_filename_afor
infile2 = outdir + '/' + in_filename_afor_kin
@ -78,7 +77,7 @@ print('Output file:', outfile
, '\n===================================================================')
del(in_filename_afor, in_filename_afor_kin, outfile, datadir, outdir)
del(in_filename_afor, in_filename_afor_kin, datadir, outdir)
#%% end of variable assignment for input and output files
#=======================================================================
#%% format mutations
@ -169,12 +168,14 @@ print(ndiff1)
ndiff2 = afor_kin_df_nrows - afor_kin_df['mutationinformation'].isin(afor_df['mutationinformation']).sum()
print(ndiff2)
#%% combining dfs
# Define join type
#my_join = 'inner'
#my_join = 'right'
##my_join = 'left'
#my_join = 'left'
my_join = 'outer'
fail = False
# sanity check: how many muts from afor_kin_df are in afor_df. should be a complete subset
if ndiff2 == 0:
print('PASS: all muts in afor_kin_df are present in afor_df'
@ -182,64 +183,31 @@ if ndiff2 == 0:
combined_df = pd.merge(afor_df, afor_kin_df, on = merging_cols, how = my_join)
if my_join == 'outer':
if my_join == ('outer' or 'left') :
print('combing with:', my_join)
expected_rows = afor_df_nrows + ndiff1
expected_cols = (afor_df_ncols + afor_kin_df_ncols) - ncommon_cols
if len(combined_df) == expected_rows and len(combined_df.columns) == expected_cols:
print('PASS: successfully combined dfs with:', my_join, 'join')
else:
print('FAIL: ', my_join, 'join')
print('\nExpected no. of rows:', expected_rows
, '\nGot:', len(combined_df)
, '\nExpected no. of cols:', expected_cols
, '\nGot:', len(combined_df.columns))
elif my_join == 'inner':
if my_join == ('inner' or 'right'):
print('combing with:', my_join)
expected_rows = afor_kin_df_nrows
expected_cols = (afor_df_ncols + afor_kin_df_ncols) - ncommon_cols
if len(combined_df) == expected_rows and len(combined_df.columns) == expected_cols:
print('PASS: successfully combined dfs with:', my_join, 'join')
else:
print('FAIL: ', my_join, 'join')
print('\nExpected no. of rows:', expected_rows
, '\nGot:', len(combined_df)
, '\nExpected no. of cols:', expected_cols
, '\nGot:', len(combined_df.columns))
elif my_join == 'left':
expected_rows = afor_df_nrows + ndiff1
expected_cols = (afor_df_ncols + afor_kin_df_ncols) - ncommon_cols
if len(combined_df) == expected_rows and len(combined_df.columns) == expected_cols:
print('PASS: successfully combined dfs with:', my_join, 'join')
else:
print('FAIL: ', my_join, 'join')
print('\nExpected no. of rows:', expected_rows
, '\nGot:', len(combined_df)
, '\nExpected no. of cols:', expected_cols
, '\nGot:', len(combined_df.columns))
elif my_join == 'right':
expected_rows = afor_kin_df_nrows
expected_cols = (afor_df_ncols + afor_kin_df_ncols) - ncommon_cols
if len(combined_df) == expected_rows and len(combined_df.columns) == expected_cols:
print('PASS: successfully combined dfs with:', my_join, 'join')
else:
print('FAIL: ', my_join, 'join')
print('\nExpected no. of rows:', expected_rows
, '\nGot:', len(combined_df)
, '\nExpected no. of cols:', expected_cols
, '\nGot:', len(combined_df.columns))
expected_cols = afor_df_ncols + afor_kin_df_ncols - ncommon_cols
if len(combined_df) == expected_rows and len(combined_df.columns) == expected_cols:
print('PASS: successfully combined dfs with:', my_join, 'join')
else:
print('FAIL: failed to combine dfs, expected rows and cols not matched')
print('FAIL: combined_df\'s expected rows and cols not matched')
fail = True # BAD practice! just a placeholder to avoid code duplication
print('\nExpected no. of rows:', expected_rows
, '\nGot:', len(combined_df)
, '\nExpected no. of cols:', expected_cols
, '\nGot:', len(combined_df.columns))
if fail:
sys.exit('ERROR: combined_df may be incorrectly combined')
else:
print('FAIL: numbers mismatch, mutations present in afor_kin_df but not in afor_df')
sys.exit('ERROR: Not all mutations in the kinship_df are present in the df with other ORs')
#%% check duplicate cols: ones containing suffix '_x' or '_y'
# should only be position
@ -256,78 +224,73 @@ combined_or_df['position'].head()
# recheck
foo = combined_or_df.filter(regex = r'.*_x|_y', axis = 1)
print(foo.columns) # should only be position
print(foo.columns) # should only be empty
combined_or_df['af'].head()
combined_or_df.rename(columns = {'af': 'af_kin'}, inplace = True)
combined_or_df['af_kin']
#%% calculate OR for kinship
combined_or_df['or_kin'] = np.exp(combined_or_df['beta'])
#%% rearraging columns
print('Dim of df prefromatting:', combined_or_df.shape)
# drop duplicate columns
#if combined_or_df['alternate_allele'].equals(combined_or_df['alt_allele0']):
# combined_or_df.drop('alternate_allele', axis = 1, inplace = True)
combined_or_df2 = combined_or_df.T.drop_duplicates().T# changes dtypes in cols
dup_cols = set(combined_or_df.columns).difference(combined_or_df2.columns)
#tot_diff is equal to n_diff
# drop some not required cols
combined_or_df.drop(list(dup_cols), axis = 1, inplace = True)
print(combined_or_df.columns)
combined_or_df.drop(['chromosome_text', 'chr', 'symbol', '_merge', ], axis = 1, inplace = True)
combined_or_df.rename(columns = {'ref_allele1': 'reference_allele'}, inplace = True)
print(combined_or_df.columns)
#%% reorder columns
#https://stackoverflow.com/questions/13148429/how-to-change-the-order-of-dataframe-columns
# check af: curiosity
# setting column's order
output_df = combined_or_df[['mutation', 'wild_type', 'position', 'mutant_type', 'mutationinformation'
, 'chr_num_allele', 'ref_allele', 'alt_allele'
, 'mut_info', 'mut_type', 'gene_id', 'gene_number', 'mut_region'
, 'reference_allele', 'alternate_allele', 'chromosome_number'
, 'afs', 'af_kin', 'ors_logistic', 'ors_chi_cus', 'or_kin', 'ors_fisher'
, 'pvals_logistic', 'pvals_fisher', 'p_wald', 'ci_lb_fisher', 'ci_ub_fisher'
, 'beta', 'se', 'logl_H1', 'l_remle','stat_chi', 'pvals_chi', 'n_diff' , 'n_miss']]
output_df = combined_or_df[['mutation',
'mutationinformation',
'wild_type',
'position',
'mutant_type',
'chr_num_allele',
'ref_allele',
'alt_allele',
'mut_info',
'mut_type',
'gene_id',
'gene_number',
'mut_region',
'reference_allele',
'alternate_allele',
'chromosome_number',
'af',
'af_kin',
'or_kin',
'or_logistic',
'or_mychisq',
'est_chisq',
'or_fisher',
'ci_low_logistic',
'ci_hi_logistic',
'ci_low_fisher',
'ci_hi_fisher',
'pwald_kin',
'pval_logistic',
'pval_fisher',
'pval_chisq',
'beta_logistic',
'beta_kin',
'se_logistic',
'se_kin',
'zval_logistic',
'logl_H1_kin',
'l_remle_kin',
'n_diff',
'tot_diff',
'n_miss']]
#%% output combined or df
#===============
# writing file
#===============
print('Writing file...')
#combined_or_df.to_csv(outfile, header = True, index = False)
output_df.to_csv(outfile, header = True, index = False)
print('Finished writing file:', outfile
, '\nNo. of rows:', len(combined_or_df)
, '\nNo. of cols:', len(combined_or_df.columns)
, '\n=========================================================')
#%% practice
df = pd.DataFrame()
column_names = ['x','y','z','mean']
for col in column_names:
df[col] = np.random.randint(0,100, size=10000)
df.head()
# drop duplicate col with dup values not necessarily colnames
df['xdup'] = df['x']
df
df = df.T.drop_duplicates().T
#import math
math.exp(0)
df['expX'] = np.exp(df['x']) # math doesn't understand series dtype
df
#%%
# sanity check after rearranging
if combined_or_df.shape == output_df.shape and set(combined_or_df.columns) == set(output_df.columns):
print('PASS: Successfully formatted df with rearranged columns')
else:
sys.exit('FAIL: something went wrong when rearranging columns!')
#%% write file
print('\n====================================================================='
, '\nWriting output file:\n', outfile
, '\nNo.of rows:', len(output_df)
, '\nNo. of cols:', len(output_df.columns))
output_df.to_csv(outfile, index = False)

View file

@ -14,7 +14,7 @@ Created on Wed Jun 10 11:13:49 2020
#%% specify dirs
import os, sys
import pandas as pd
#import numpy as np
import numpy as np
import re
import argparse
@ -26,8 +26,8 @@ from find_missense import find_missense
#%% command line args
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-d', '--drug', help = 'drug name', default = None)
arg_parser.add_argument('-g', '--gene', help = 'gene name (case sensitive)', default = None) # case sensitive
arg_parser.add_argument('-d', '--drug', help = 'drug name', default = 'pyrazinamide')
arg_parser.add_argument('-g', '--gene', help = 'gene name (case sensitive)', default = 'pncA') # case sensitive
arg_parser.add_argument('-s', '--start_coord', help = 'start of coding region (cds) of gene', default = 2288681) # pnca cds
arg_parser.add_argument('-e', '--end_coord', help = 'end of coding region (cds) of gene', default = 2289241) # pnca cds
@ -37,6 +37,8 @@ args = arg_parser.parse_args()
#%% variables
#gene = 'pncA'
#drug = 'pyrazinamide'
#start_cds = 2288681
#end_cds = 2289241
# cmd variables
gene = args.gene
@ -94,7 +96,7 @@ or_df.columns
info_df2 = pd.read_csv(gene_info, sep = '\t', header = 0) #303, 10
mis_mut_cover = (info_df2['chromosome_number'].nunique()/info_df2['chromosome_number'].count()) * 100
print('*****RESULT*****'
, '\nPercentage of missense mut in pncA:', mis_mut_cover
, '\nPercentage of MISsense mut in pncA:', mis_mut_cover
, '\n*****RESULT*****') #65.7%
# large file
@ -117,8 +119,6 @@ genomic_pos_min = info_df['chromosome_number'].min()
genomic_pos_max = info_df['chromosome_number'].max()
# genomic coord for pnca coding region
#start_cds = 2288681
#end_cds = 2289241
cds_len = (end_cds-start_cds) + 1
pred_prot_len = (cds_len/3) - 1
@ -134,6 +134,7 @@ if genomic_pos_min <= start_cds and genomic_pos_max >= end_cds:
print ('PASS: coding region for gene included in snp_info.txt')
else:
print('FAIL: coding region for gene not included in info file snp_info.txt')
sys.exit('ERROR: coding region of gene not included in the info file')
#%% Extracting ref allele and alt allele as single letters
# info_df has some of these params as more than a single letter, which means that
@ -162,7 +163,7 @@ del(orig_len, ncols_add)
# check dtypes
or_df.dtypes
info_df.dtypes
or_df.info()
#or_df.info()
# pandas documentation where it mentions: "Pandas uses the object dtype for storing strings"
# check how many unique chr_num in info_df are in or_df
@ -175,23 +176,22 @@ or_df['chromosome_number'].isin(genomic_pos_df['chr_pos']).sum() #182
if or_df['chromosome_number'].isin(genomic_pos_df['chr_pos']).sum() == len(or_df):
print('PASS: all genomic locs in or_df have meta datain info.txt')
else:
print('FAIL: some genomic locs or_df chr number DO NOT have meta data in snp_info.txt')
sys.exit('FAIL: some genomic locs or_df chr number DO NOT have meta data in snp_info.txt')
#%% Perform merge
#join_type = 'inner'
#join_type = 'outer'
join_type = 'left'
#join_type = 'right'
#my_join = 'inner'
#my_join = 'outer'
my_join = 'left'
#my_join = 'right'
#dfm1 = pd.merge(or_df, info_df, on ='chromosome_number', how = join_type, indicator = True) # not unique!
dfm1 = pd.merge(or_df, info_df, on = ['chromosome_number', 'ref_allele', 'alt_allele'], how = join_type, indicator = True)
#dfm1 = pd.merge(or_df, info_df, on ='chromosome_number', how = my_join, indicator = True) # not unique!
dfm1 = pd.merge(or_df, info_df, on = ['chromosome_number', 'ref_allele', 'alt_allele'], how = my_join, indicator = True)
dfm1['_merge'].value_counts()
# count no. of missense mutations ONLY
dfm1.snp_info.str.count(r'(missense.*)').sum()
dfm2 = pd.merge(or_df, info_df2, on = ['chromosome_number', 'ref_allele', 'alt_allele'], how = join_type, indicator = True)
dfm2 = pd.merge(or_df, info_df2, on = ['chromosome_number', 'ref_allele', 'alt_allele'], how = my_join, indicator = True)
dfm2['_merge'].value_counts()
# count no. of nan
@ -241,12 +241,90 @@ else:
, '\nGot no. of cols:', len(dfm2_mis.columns))
sys.exit()
#%% formatting data for output
print('no of cols preformatting data:', len(dfm2_mis.columns))
#1) Add column: OR for kinship calculated from beta coeff
print('converting beta coeff to OR by exponent function\n:'
, dfm2_mis['beta'].head())
dfm2_mis['or_kin'] = np.exp(dfm2_mis['beta'])
print(dfm2_mis['or_kin'].head())
#2) rename af column
dfm2_mis.rename(columns = {'af': 'af_kin'
, 'beta': 'beta_kin'
, 'p_wald': 'pwald_kin'
, 'se': 'se_kin', 'logl_H1': 'logl_H1_kin'
, 'l_remle': 'l_remle_kin'}, inplace = True)
#3) drop some not required cols (including duplicate if you want)
#3a) drop duplicate columns
dfm2_mis2 = dfm2_mis.T.drop_duplicates().T #changes dtypes in cols, so not used
dup_cols = set(dfm2_mis.columns).difference(dfm2_mis2.columns)
print('Duplicate columns identified:', dup_cols)
dup_cols = {'alt_allele0', 'ps'} # didn't want to remove tot_diff
print('removing duplicate columns: kept one of the dup_cols i.e tot_diff')
dfm2_mis.drop(list(dup_cols), axis = 1, inplace = True)
print(dfm2_mis.columns)
#3b) other not useful columns
dfm2_mis.drop(['chromosome_text', 'chr', 'symbol', '_merge', ], axis = 1, inplace = True)
dfm2_mis.rename(columns = {'ref_allele1': 'reference_allele'}, inplace = True)
print(dfm2_mis.columns)
#4) reorder columns
orkin_linked = dfm2_mis[['mutationinformation',
'wild_type',
'position',
'mutant_type',
'chr_num_allele',
'ref_allele',
'alt_allele',
'mut_info',
'mut_type',
'gene_id',
'gene_number',
'mut_region',
'reference_allele',
'alternate_allele',
'chromosome_number',
#'afs
'af_kin',
'or_kin',
# 'ors_logistic',
# 'ors_chi_cus',
# 'ors_fisher',
'pwald_kin',
# 'pvals_logistic',
# 'pvals_fisher',
# 'ci_lb_fisher',
# 'ci_ub_fisher' ,
'beta_kin',
'se_kin',
'logl_H1_kin',
'l_remle_kin',
# 'stat_chi',
# 'pvals_chi',
'n_diff',
'tot_diff',
'n_miss']]
# sanity check after reassigning columns
if orkin_linked.shape == dfm2_mis.shape and set(orkin_linked.columns) == set(dfm2_mis.columns):
print('PASS: Successfully formatted df with rearranged columns')
else:
sys.exit('FAIL: something went wrong when rearranging columns!')
#%% write file
print('Writing output file:\n', outfile_or_kin
print('\n====================================================================='
, '\nWriting output file:\n', outfile_or_kin
, '\nNo.of rows:', len(dfm2_mis)
, '\nNo. of cols:', len(dfm2_mis.columns))
dfm2_mis.to_csv(outfile_or_kin, index = False)
orkin_linked.to_csv(outfile_or_kin, index = False)
#%% diff b/w allele0 and 1: or_df
#https://stackoverflow.com/questions/40348541/pandas-diff-with-string