From eb021349fe159d9642de613dec3725d5e8384f9d Mon Sep 17 00:00:00 2001 From: Tanushree Tunstall Date: Mon, 23 Mar 2020 13:33:25 +0000 Subject: [PATCH] bug fixes and massive clean up of data extraction script --- .../scripts/plotting/logolas_logoplot.R | 7 - meta_data_analysis/.Rhistory | 10 +- meta_data_analysis/pnca_data_extraction.py | 1255 +++++++++++------ 3 files changed, 818 insertions(+), 454 deletions(-) diff --git a/mcsm_analysis/pyrazinamide/scripts/plotting/logolas_logoplot.R b/mcsm_analysis/pyrazinamide/scripts/plotting/logolas_logoplot.R index 45eaa37..44062eb 100644 --- a/mcsm_analysis/pyrazinamide/scripts/plotting/logolas_logoplot.R +++ b/mcsm_analysis/pyrazinamide/scripts/plotting/logolas_logoplot.R @@ -68,10 +68,6 @@ table(my_df$position == my_df$Position) c1 = unique(my_df$Position) # 130 nrow(my_df) # 3092 - - - - #FIXME #!!! RESOLVE !!! # get freq count of positions and add to the df @@ -99,9 +95,6 @@ my_data_snp = my_df[my_df$occurrence!=1,] #3072, 36...3019 u = unique(my_data_snp$Position) #96 - - - ######################################################################## # end of data extraction and cleaning for plots # ######################################################################## diff --git a/meta_data_analysis/.Rhistory b/meta_data_analysis/.Rhistory index 28b1797..ec7b60d 100644 --- a/meta_data_analysis/.Rhistory +++ b/meta_data_analysis/.Rhistory @@ -1,8 +1,3 @@ -, stringsAsFactors = F) -x = as.numeric(grepl(i,raw_data$all_muts_pza)) -# DV: pyrazinamide 0 or 1 -y = as.numeric(raw_data$pyrazinamide) -table(y,x) # run glm model model = glm(y ~ x, family = binomial) #model = glm(y ~ x, family = binomial(link = "logit")) @@ -510,3 +505,8 @@ outdir = paste0("../mcsm_analysis/",drug,"/Data/") outFile = "meta_data_with_AFandOR.csv" output_filename = paste0(outdir, outFile) output_filename +pnca_snps_or = read.csv(file.choose() +, stringsAsFactors = F +, header = T) +View(pnca_snps_or) +View(pnca_snps_or) diff --git a/meta_data_analysis/pnca_data_extraction.py b/meta_data_analysis/pnca_data_extraction.py index 25a596e..7588122 100755 --- a/meta_data_analysis/pnca_data_extraction.py +++ b/meta_data_analysis/pnca_data_extraction.py @@ -14,73 +14,76 @@ Created on Tue Aug 6 12:56:03 2019 # load libraries import os, sys import pandas as pd -#import numpy as np +import numpy as np #from pandas.api.types import is_string_dtype #from pandas.api.types import is_numeric_dtype -# to create dir -#my_dir = os.path.expanduser('~/some_dir') -#make sure mcsm_analysis/ exists -#or specify the output directory -#==================================================== -# TASK: extract ALL pncA mutations from GWAS data #======================================================== -#%% -#################### +# TASK: extract ALL pncA_p. mutations from GWAS data +# Input data file has the following format: each row = unique sample id +# id,country,lineage,sublineage,drtype,pyrazinamide,dr_mutations_pyrazinamide,other_mutations_pyrazinamide... +# 0,sampleID,USA,lineage2,lineage2.2.1,Drug-resistant,0.0,WT,pncA_p.POS; pncA_c.POS... +# where multiple mutations and multiple mutation types are separated by ';'. We are interested in the +# protein coding region i.e mutation with the 'p.' format. + +# the script splits the mutations on the ';' and extracts protein coding muts only +# where each row is a separate mutation +# sample ids AND mutations are NOT unique, but the COMBINATION (sample id + mutation) = unique + +# output files: +# 0) pnca_common_ids.csv +# 1) pnca_ambiguous_muts.csv +# 2) pnca_mcsm_snps.csv +# 3) pnca_metadata.csv +# 4) pnca_comp_snps.csv +# 5) pnca_all_muts_msa.csv +# 6) pnca_mutational_positons.csv +#======================================================== +#%% specify homedir as python doesn't recognise tilde +homedir = os.path.expanduser('~') + # my working dir os.getcwd() -homedir = os.path.expanduser('~') # spyder/python doesn't recognise tilde os.chdir(homedir + '/git/LSHTM_analysis/meta_data_analysis') os.getcwd() -#%% + +# import aa dict from reference_dict import my_aa_dict #CHECK DIR STRUC THERE! -#%% -############# specify variables for input and output paths and filenames -drug = "pyrazinamide" -gene = "pnca" +#======================================================== -datadir = homedir + "/git/Data" -basedir = datadir + "/" + drug + "/input" +#drug = 'pyrazinamide' +gene = 'pncA' +gene_match = gene + '_p.' -# input -inpath = "/original" -in_filename = "/original_tanushree_data_v2.csv" -infile = basedir + inpath + in_filename -#print(infile) +#%% specify variables for input and output paths and filenames -# output: several output files +#======= +# input dir +#======= +indir = 'git/Data/pyrazinamide/input/original' + +#========= +# output dir +#========= +# several output files # output filenames in respective sections at the time of outputting files -outpath = "/processed" -outdir = basedir + outpath -#print(outdir) - -if not os.path.exists(datadir): - print('Error!', datadir, 'does not exist. Please ensure it exists. Dir struc specified in README.md') - os.makedirs(datadir) - die() - -if not os.path.exists(outdir): - print('Error!', outdir, 'does not exist.Please ensure it exists. Dir struc specified in README.md') - exit() - -else: - print('Dir exists: Carrying on') - -################## end of variable assignment for input and output files -#%% +outdir = 'git/Data/pyrazinamide/output' +#%%end of variable assignment for input and output files #============================================================================== -############ -# STEP 1: Read file original_tanushree_data_v2.csv -############ -#data_file = data_dir + '/input/original/original_tanushree_data_v2.csv' -meta_data = pd.read_csv(infile, sep = ',') +#%% Read files + +in_filename = 'original_tanushree_data_v2.csv' +infile = homedir + '/' + indir + '/' + in_filename +print('Reading input master file:', infile) + +master_data = pd.read_csv(infile, sep = ',') # column names -list(meta_data.columns) +#list(master_data.columns) # extract elevant columns to extract from meta data related to the pyrazinamide -meta_data = meta_data[['id' +meta_data = master_data[['id' ,'country' ,'lineage' ,'sublineage' @@ -90,131 +93,290 @@ meta_data = meta_data[['id' , 'other_mutations_pyrazinamide' ]] -# checks +del(master_data) + +# checks and results total_samples = meta_data['id'].nunique() +print('RESULT: Total samples:', total_samples) +print('======================================================================') # counts NA per column meta_data.isna().sum() - +print('No. of NAs/column:' + '\n', meta_data.isna().sum()) +print('======================================================================') # glance meta_data.head() # equivalent of table in R # pyrazinamide counts meta_data.pyrazinamide.value_counts() +print('RESULT: Sus and Res samples:\n', meta_data.pyrazinamide.value_counts()) +print('======================================================================') -clear variables -#del(basedir, datadir, inpath, infile) +# clear variables +del(indir, in_filename,infile) +#del(outdir) +#%% +# !!!IMPORTANT!!! sanity check: +# This is to find out how many samples have 1 and more than 1 mutation,so you +# can use it to check if your data extraction process for dr_muts +# and other_muts has worked correctly AND also to check the dim of the +# final formatted data. +# This will have: unique COMBINATION of sample id and pncA_p.mutations + +#======== +# First: counting pncA_p. mutations in dr_mutations_pyrazinamide column +#======== +print('Now counting WT & pncA_p. muts within the column: dr_mutations_pyrazinamide') + +# drop na and extract a clean df +clean_df = meta_data.dropna(subset=['dr_mutations_pyrazinamide']) + +# sanity check: count na +na_count = meta_data['dr_mutations_pyrazinamide'].isna().sum() + +if len(clean_df) == (total_samples - na_count): + print('PASS: clean_df extracted: length is', len(clean_df), + '\nNo.of NA s=', na_count, '/', total_samples) +else: + print('FAIL: dropping NA failed') +print('======================================================================') + +dr_pnca_count = 0 +wt = 0 +id_dr = [] +id2_dr = [] + +for i, id in enumerate(clean_df.id): +# print (i, id) +# id_dr.append(id) +# count_pnca_dr = clean_df.dr_mutations_pyrazinamide.iloc[i].count('pncA_p.') #works 2201 + count_pnca_dr = clean_df.dr_mutations_pyrazinamide.iloc[i].count(gene_match) #works 2201 + if count_pnca_dr > 0: + id_dr.append(id) + if count_pnca_dr > 1: + id2_dr.append(id) +# print(id, count_pnca_dr) + dr_pnca_count = dr_pnca_count + count_pnca_dr + count_wt = clean_df.dr_mutations_pyrazinamide.iloc[i].count('WT') + wt = wt + count_wt + +print('RESULTS:') +print('Total WT in dr_mutations_pyrazinamide:', wt) +print('Total matches of', gene_match, 'in dr_mutations_pyrazinamide:', dr_pnca_count) +print('Total samples with > 1', gene_match, 'muts in dr_mutations_pyrazinamide:', len(id2_dr) ) +print('======================================================================') + +del(i, id, wt, id2_dr, clean_df, na_count, count_pnca_dr, count_wt) + +#======== +# Second: counting pncA_p. mutations in dr_mutations_pyrazinamide column +#======== +print('Now counting WT & pncA_p. muts within the column: other_mutations_pyrazinamide') + +# drop na and extract a clean df +clean_df = meta_data.dropna(subset=['other_mutations_pyrazinamide']) + +# sanity check: count na +na_count = meta_data['other_mutations_pyrazinamide'].isna().sum() + +if len(clean_df) == (total_samples - na_count): + print('PASS: clean_df extracted: length is', len(clean_df), + '\nNo.of NA s=', na_count, '/', total_samples) +else: + print('FAIL: dropping NA failed') +print('======================================================================') + +other_pnca_count = 0 +wt_other = 0 +id_other = [] +id2_other = [] + +for i, id in enumerate(clean_df.id): +# print (i, id) +# id_other.append(id) +# count_pnca_other = clean_df.other_mutations_pyrazinamide.iloc[i].count('pncA_p.') + count_pnca_other = clean_df.other_mutations_pyrazinamide.iloc[i].count(gene_match) + if count_pnca_other > 0: + id_other.append(id) + if count_pnca_other > 1: + id2_other.append(id) +# print(id, count_pnca_other) + other_pnca_count = other_pnca_count + count_pnca_other + count_wt = clean_df.other_mutations_pyrazinamide.iloc[i].count('WT') + wt_other = wt_other + count_wt +print('RESULTS:') +print('Total WT in other_mutations_pyrazinamide:', wt_other) +print('Total matches of', gene_match, 'in other_mutations_pyrazinamide:', other_pnca_count) +print('Total samples with > 1', gene_match, 'muts in other_mutations_pyrazinamide:', len(id2_other) ) +print('======================================================================') + +print('Predicting total no. of rows in your curated df:', dr_pnca_count + other_pnca_count ) +expected_rows = dr_pnca_count + other_pnca_count + +del(i, id, wt_other, clean_df, na_count, id2_other, count_pnca_other, count_wt) #%% ############ -# STEP 2: extract entries containing selected genes: -# pyrazinamide: pnca_p. -# in the dr_mutations and other mutations" -# as we are interested in the mutations in the protein coding region only -# (corresponding to a structure) -# and drop the entries with NA +# extracting dr and other muts separately along with the common cols ############# -meta_pza = meta_data.loc[meta_data.dr_mutations_pyrazinamide.str.contains('pncA_p.*', na = False)] -#2163 {RESULT: samples with dr_muts} -dr_id = meta_pza['id'].unique() -dr_id = pd.Series(dr_id) +print('======================================================================') +print('Extracting dr_muts in a dr_mutations_pyrazinamide with other meta_data') +print('gene to extract:', gene_match ) -meta_pza = meta_data.loc[meta_data.other_mutations_pyrazinamide.str.contains('pncA_p.*', na = False)] -#526 (RESULT: samples with other_muts) -other_id = meta_pza['id'].unique() -other_id = pd.Series(other_id) - -# FIXME: See if the sample ids are unique in each -# find any common IDs -dr_id.isin(other_id).sum() - -del(meta_pza) - -########################## -# pyrazinamide: pnca_p. -########################## -meta_data_pnca = meta_data[['id' +#=============== +# dr mutations: extract pncA_p. entries with meta data and ONLY dr_muts col +#=============== +# FIXME: replace pyrazinamide with variable containing the drug name +# !!! important !!! +meta_data_dr = meta_data[['id' ,'country' ,'lineage' ,'sublineage' ,'drtype' , 'pyrazinamide' , 'dr_mutations_pyrazinamide' + ]] +print("expected dim should be:", len(meta_data), (len(meta_data.columns)-1) ) +print("actual dim:", meta_data_dr.shape ) +print('======================================================================') + +# Extract within this the gene of interest using string match +#meta_pnca_dr = meta_data.loc[meta_data.dr_mutations_pyrazinamide.str.contains('pncA_p.*', na = False)] +meta_pnca_dr = meta_data_dr.loc[meta_data_dr.dr_mutations_pyrazinamide.str.contains(gene_match, na = False)] + +dr_id = meta_pnca_dr['id'].unique() +print('RESULT: No. of samples with dr muts in pncA:', len(dr_id)) +print('checking RESULT:', + '\nexpected len =', len(id_dr), + '\nactual len =', len(meta_pnca_dr) ) + +if len(id_dr) == len(meta_pnca_dr): + print('PASS: lengths match') +else: + print('FAIL: length mismatch') +print('======================================================================') + +dr_id = pd.Series(dr_id) + +#================= +# other mutations: extract pncA_p. entries +#================== +print('======================================================================') +print('Extracting dr_muts in a other_mutations_pyrazinamide with other meta_data') +# FIXME: replace pyrazinamide with variable containing the drug name +# !!! important !!! +meta_data_other = meta_data[['id' + ,'country' + ,'lineage' + ,'sublineage' + ,'drtype' + , 'pyrazinamide' , 'other_mutations_pyrazinamide' ]] -del(meta_data) +print("expected dim should be:", len(meta_data), (len(meta_data.columns)-1) ) +print("actual dim:", meta_data_other.shape ) +print('======================================================================') + +# Extract within this the gene of interest using string match +meta_pnca_other = meta_data_other.loc[meta_data_other.other_mutations_pyrazinamide.str.contains(gene_match, na = False)] + +other_id = meta_pnca_other['id'].unique() +print('RESULT: No. of samples with other muts:', len(other_id)) +print('checking RESULT:', + '\nexpected len =', len(id_other), + '\nactual len =', len(meta_pnca_other) ) + +if len(id_other) == len(meta_pnca_other): + print('PASS: lengths match') +else: + print('FAIL: length mismatch') +print('======================================================================') + +other_id = pd.Series(other_id) +#%% Find common IDs +print('Now extracting common_ids...') +common_mut_ids = dr_id.isin(other_id).sum() +print('RESULT: No. of common Ids:', common_mut_ids) # sanity checks +# check if True: should be since these are common +dr_id.isin(other_id).sum() == other_id.isin(dr_id).sum() -# dr_mutations only -meta_pnca_dr = meta_data_pnca.loc[meta_data_pnca.dr_mutations_pyrazinamide.str.contains('pncA_p.*', na = False)] -meta_pnca_dr['id'].nunique() -del(meta_pnca_dr) +# check if the 24 Ids that are common are indeed the same! +# bit of a tautology, but better safe than sorry! +common_ids = dr_id[dr_id.isin(other_id)] +common_ids = common_ids.reset_index() +common_ids.columns = ['index', 'id'] -# other mutations -meta_pnca_other = meta_data_pnca.loc[meta_data_pnca.other_mutations_pyrazinamide.str.contains('pncA_p.*', na = False)] -meta_pnca_other['id'].nunique() -del(meta_pnca_other) +common_ids2 = other_id[other_id.isin(dr_id)] +common_ids2 = common_ids2.reset_index() +common_ids2.columns = ['index', 'id2'] -# Now extract "all" mutations -meta_pnca_all = meta_data_pnca.loc[meta_data_pnca.dr_mutations_pyrazinamide.str.contains('pncA_p.*') | meta_data_pnca.other_mutations_pyrazinamide.str.contains('pncA_p.*') ] +# should be True +print(common_ids['id'].equals(common_ids2['id2'])) -meta_pnca_all['id'].nunique() #RESULT: pnca mutations in ALL samples} -pnca_samples = len(meta_pnca_all) +# good sanity check: use it later to check pnca_sample_counts +expected_pnca_samples = ( len(meta_pnca_dr) + len(meta_pnca_other) - common_mut_ids ) +print("expected no. of pnca samples:", expected_pnca_samples) +print('======================================================================') +#%% write file +#print(outdir) +out_filename0 = gene.lower() + '_' + 'common_ids.csv' +outfile0 = homedir + '/' + outdir + '/' + out_filename0 + +#FIXME: CHECK line len(common_ids) +print('Writing file: common ids:\n', + '\nFilename:', out_filename0, + '\nPath:', homedir +'/'+ outdir, + '\nExpected no. of rows:', len(common_ids) ) + +common_ids.to_csv(outfile0) +print('======================================================================') + +# clear variables +del(dr_id, other_id, meta_data_dr, meta_data_other, common_ids, common_mut_ids, common_ids2) + +#%% Now extract "all" pncA mutations: i.e 'pncA_p.*' +print("extracting all pncA mutations from dr_ and other_ cols using string match:", gene_match) +#meta_pnca_all = meta_data.loc[meta_data.dr_mutations_pyrazinamide.str.contains(gene_match) | meta_data.other_mutations_pyrazinamide.str.contains(gene_match) ] +meta_pnca_all = meta_data.loc[meta_data.dr_mutations_pyrazinamide.str.contains(gene_match, na = False) | meta_data.other_mutations_pyrazinamide.str.contains(gene_match, na = False) ] +print('======================================================================') + +extracted_pnca_samples = meta_pnca_all['id'].nunique() +print("RESULT: actual no. of pnca samples extracted:", extracted_pnca_samples) +print('======================================================================') + +# sanity check: length of pnca samples +print('Performing sanity check:') +if extracted_pnca_samples == expected_pnca_samples: + print('No. of pnca samples:', len(meta_pnca_all), + '\nPASS: expected & actual no. of pnca samples match') +else: + print("FAIL: Debug please!") +print('======================================================================') + +# count NA in pyrazinamide column pnca_na = meta_pnca_all['pyrazinamide'].isna().sum() -comp_pnca_samples = pnca_samples - pnca_na +print("No. of pnca samples without pza testing i.e NA in pza column:",pnca_na) -#=#=#=#=#=#=# -# COMMENT: use it later to check number of complete samples from LF data -#=#=#=#=#=#=# +# use it later to check number of complete samples from LF data +comp_pnca_samples = len(meta_pnca_all) - pnca_na +print("comp pnca samples tested for pza:", comp_pnca_samples) +print('======================================================================') -# sanity checks -foo1 = meta_pnca_all.dr_mutations_pyrazinamide.value_counts() -foo2 = meta_pnca_all.other_mutations_pyrazinamide.value_counts() +# Comment: This is still dirty data since these +# are samples that have pncA_p. muts, but can have others as well +# since the format for mutations is mut1; mut2, etc. +print('This is still dirty data: samples have pncA_p. muts, but may have others as well', + '\nsince the format for mutations is mut1; mut2, etc.') +print('======================================================================') -# more sanity checks -# !CAUTION!: muts will change depending on your gene +#%% tidy_split():Function to split mutations on specified delimiter: ';' +#https://stackoverflow.com/questions/41476150/removing-space-from-dataframe-columns-in-pandas -# dr muts : insert -meta_pnca_all.loc[meta_pnca_all.dr_mutations_pyrazinamide.str.contains('pncA_p.Gln10Pro')] # -meta_pnca_all.loc[meta_pnca_all.dr_mutations_pyrazinamide.str.contains('pncA_p.Phe106Leu')] # empty -meta_pnca_all.loc[meta_pnca_all.dr_mutations_pyrazinamide.str.contains('pncA_p.Val139Leu')] - -meta_pnca_all.loc[meta_pnca_all.dr_mutations_pyrazinamide.str.contains('pncA_p.')] # exists # rows -meta_pnca_all.loc[meta_pnca_all.dr_mutations_pyrazinamide.str.contains('pncA_p.')] # exists # rows - -# other_muts -meta_pnca_all.loc[meta_pnca_all.other_mutations_pyrazinamide.str.contains('pncA_p.Gln10Pro*')] # empty -meta_pnca_all.loc[meta_pnca_all.other_mutations_pyrazinamide.str.contains('pncA_p.Phe106Leu')] - -#=#=#=#=#=#=#=#=#=# -# FIXME -# COMMENTS: both mutations columns are separated by ; -# CHECK if there are mutations that exist both in dr and other_muts! -# doesn't make any sense for the same mut to exist in both, I would have thought! -#=#=#=#=#=#=#=#=#=# - -# remove not required variables -del(meta_data_pnca) - -############ -# STEP 3: split the columns of -# a) dr_mutation_... (;) as -# the column has snps related to multiple genes. -# useful links -# https://stackoverflow.com/questions/17116814/pandas-how-do-i-split-text-in-a-column-into-multiple-rows -# this one works beautifully -# https://stackoverflow.com/questions/12680754/split-explode-pandas-dataframe-string-entry-to-separate-rows -############ - -# sanity check: counts NA per column afer subsetted df: i.e in meta_pza(with pncA_p. extracted mutations) -meta_pnca_all.isna().sum() - -#=#=#=#=#=#=#=#=#=# -# COMMENT: no NA's in dr_mutations/other_mutations_columns -#=#=#=#=#=#=#=#=#=# +print('Performing tidy_spllit(): to separate the mutations into indivdual rows') # define the split function def tidy_split(df, column, sep='|', keep=False): """ @@ -239,7 +401,7 @@ def tidy_split(df, column, sep='|', keep=False): """ indexes = list() new_values = list() - #df = df.dropna(subset=[column])#<<<<<<-----see this incase you need to uncomment based on use case + #df = df.dropna(subset=[column])#!!!!-----see this incase you need to uncomment based on use case for i, presplit in enumerate(df[column].astype(str)): values = presplit.split(sep) if keep and len(values) > 1: @@ -251,132 +413,362 @@ def tidy_split(df, column, sep='|', keep=False): new_df = df.iloc[indexes, :].copy() new_df[column] = new_values return new_df - + +#%% end of tidy_split() +#========= +# DF1: dr_mutations_pyrazinamide +#========= ######## -# 3a: call tidy_split() on 'dr_mutations_pyrazinamide' column and remove leading white spaces -#https://stackoverflow.com/questions/41476150/removing-space-from-dataframe-columns-in-pandas +# tidy_split(): on 'dr_mutations_pyrazinamide' column and remove leading white spaces ######## -meta_pnca_WF0 = tidy_split(meta_pnca_all, 'dr_mutations_pyrazinamide', sep = ';') - +col_to_split1 = 'dr_mutations_pyrazinamide' +print ('Firstly, applying tidy split on dr df:', meta_pnca_dr.shape, + '\ncolumn name:', col_to_split1) +print('======================================================================') +# apply tidy_split() +dr_WF0 = tidy_split(meta_pnca_dr, col_to_split1, sep = ';') # remove leading white space else these are counted as distinct mutations as well -meta_pnca_WF0['dr_mutations_pyrazinamide'] = meta_pnca_WF0['dr_mutations_pyrazinamide'].str.lstrip() +dr_WF0['dr_mutations_pyrazinamide'] = dr_WF0['dr_mutations_pyrazinamide'].str.lstrip() -######## -# 3b: call function on 'other_mutations_pyrazinamide' column and remove leading white spaces -######## -meta_pnca_WF1 = tidy_split(meta_pnca_WF0, 'other_mutations_pyrazinamide', sep = ';') +# extract only the samples/rows with pncA_p. +dr_pnca_WF0 = dr_WF0.loc[dr_WF0.dr_mutations_pyrazinamide.str.contains(gene_match)] -# remove the leading white spaces in the column -meta_pnca_WF1['other_mutations_pyrazinamide'] = meta_pnca_WF1['other_mutations_pyrazinamide'].str.strip() +print('lengths after tidy split and extracting', gene_match, 'muts:', + '\nold length:' , len(meta_pnca_dr), + '\nlen after split:', len(dr_WF0), + '\ndr_pnca_WF0 length:', len(dr_pnca_WF0), + '\nexpected len:', dr_pnca_count) -########## -# Step 4: Reshape data so that all mutations are in one column and the -# annotations for the mutation reflect the column name -# LINK: http://www.datasciencemadesimple.com/reshape-wide-long-pandas-python-melt-function/ - -# data frame “df” is passed to melt() function -# id_vars is the variable which need to be left unaltered -# var_name are the column names so we named it as 'mutation_info' -# value_name are its values so we named it as 'mutation' -########## -meta_pnca_WF1.columns - -meta_pnca_LF0 = pd.melt(meta_pnca_WF1 - , id_vars = ['id', 'country', 'lineage', 'sublineage', 'drtype', 'pyrazinamide'] - , var_name = 'mutation_info' - , value_name = 'mutation') - -# sanity check: should be true -if len(meta_pnca_LF0) == len(meta_pnca_WF1)*2: - print('sanity check passed: Long format df has the expected length') +if len(dr_pnca_WF0) == dr_pnca_count: + print('PASS: length of dr_pnca_WF0 match with expected length') else: - print("Sanity check failed: Debug please!") + print('FAIL: lengths mismatch') +print('======================================================================') + +# count the freq of "dr_muts" samples +dr_muts_df = dr_pnca_WF0 [['id', 'dr_mutations_pyrazinamide']] +print("dim of dr_muts_df:", dr_muts_df.shape) + +# add freq column +dr_muts_df['dr_sample_freq'] = dr_muts_df.groupby('id')['id'].transform('count') +#dr_muts_df['dr_sample_freq'] = dr_muts_df.loc[dr_muts_df.groupby('id')].transform('count') +print("revised dim of dr_muts_df:", dr_muts_df.shape) + +c1 = dr_muts_df.dr_sample_freq.value_counts() +print("counting no. of sample frequency\n:", c1) +print('======================================================================') + +# sanity check: length of pnca samples +if len(dr_pnca_WF0) == c1.sum(): + print('PASS: WF data has expected length', + '\nlength of dr_pnca WFO:', c1.sum() ) +else: + print("FAIL: Debug please!") + +print('======================================================================') + +#!!! Important !!! +# Assign "column name" on which split was performed as an extra column +# This is so you can identify if mutations are dr_type or other in the final df +dr_df = dr_pnca_WF0.assign(mutation_info = 'dr_mutations_pyrazinamide') +print("dim of dr_df:", dr_df.shape) +print('======================================================================') +print('End of tidy split() on dr_muts, and added an extra column relecting mut_category') +print('======================================================================') + +#%% +#========= +# DF2: other_mutations_pyrazinamdie +#========= +######## +# tidy_split(): on 'other_mutations_pyrazinamide' column and remove leading white spaces +######## +col_to_split2 = 'other_mutations_pyrazinamide' +print ("applying second tidy split separately on df:", meta_pnca_other.shape, '\n' + , "column name:", col_to_split2) +print('======================================================================') + +# apply tidy_split() +other_WF1 = tidy_split(meta_pnca_other, col_to_split2, sep = ';') +# remove the leading white spaces in the column +other_WF1['other_mutations_pyrazinamide'] = other_WF1['other_mutations_pyrazinamide'].str.strip() + +# extract only the samples/rows with pncA_p. +other_pnca_WF1 = other_WF1.loc[other_WF1.other_mutations_pyrazinamide.str.contains(gene_match)] + +print('lengths after tidy split and extracting', gene_match, 'muts:', + '\nold length:' , len(meta_pnca_other), + '\nlen after split:', len(other_WF1), + '\nother_pnca_WF1 length:', len(other_pnca_WF1), + '\nexpected len:', other_pnca_count) + +if len(other_pnca_WF1) == other_pnca_count: + print('PASS: length of dr_pnca_WF0 match with expected length') +else: + print('FAIL: lengths mismatch') + +print('======================================================================') +# count the freq of "other muts" samples +other_muts_df = other_pnca_WF1 [['id', 'other_mutations_pyrazinamide']] +print("dim of other_muts_df:", other_muts_df.shape) + +# add freq column +other_muts_df['other_sample_freq'] = other_muts_df.groupby('id')['id'].transform('count') +print("revised dim of other_muts_df:", other_muts_df.shape) + +c2 = other_muts_df.other_sample_freq.value_counts() +print("counting no. of sample frequency\n:", c2) +print('======================================================================') +# sanity check: length of pnca samples +if len(other_pnca_WF1) == c2.sum(): + print('PASS: WF data has expected length', + '\nlength of other_pnca WFO:', c2.sum() ) +else: + print("FAIL: Debug please!") +print('======================================================================') + +#!!! Important !!! +# Assign "column name" on which split was performed as an extra column +# This is so you can identify if mutations are dr_type or other in the final df +other_df = other_pnca_WF1.assign(mutation_info = 'other_mutations_pyrazinamide') +print("dim of other_df:", other_df.shape) +print('======================================================================') +print('End of tidy split() on other_muts, and added an extra column relecting mut_category') +print('======================================================================') +#%% +#========== +# Concatentating the two dfs: equivalent of rbind in R +#========== +#!!! important !!! +# change column names to allow concat: +# dr_muts.. & other_muts : "mutation" +print('Now concatenating the two dfs by row') + +dr_df.columns +dr_df.rename(columns = {'dr_mutations_pyrazinamide': 'mutation'}, inplace = True) +dr_df.columns + +other_df.columns +other_df.rename(columns = {'other_mutations_pyrazinamide': 'mutation'}, inplace = True) +other_df.columns + +print('======================================================================') +print('Now appending the two dfs:', + '\ndr_df dim:', dr_df.shape, + '\nother_df dim:', other_df.shape, + '\ndr_df length:', len(dr_df), + '\nother_df length:', len(other_df), + '\nexpected length:', len(dr_df) + len(other_df) ) +print('======================================================================') + +# checking colnames before concat +print("checking colnames BEFORE concatenating the two dfs...") +if (set(dr_df.columns) == set(other_df.columns)): + print('PASS: column names match necessary for merging two dfs') +else: + print("FAIL: Debug please!") + +# concatenate (axis = 0): Rbind +pnca_LF0 = pd.concat([dr_df, other_df], ignore_index = True, axis = 0) + +# checking colnames and length after concat +print("checking colnames AFTER concatenating the two dfs...") +if (set(dr_df.columns) == set(pnca_LF0.columns)): + print('PASS: column names match') +else: + print("FAIL: Debug please!") + +print("checking length AFTER concatenating the two dfs...") + +if len(pnca_LF0) == len(dr_df) + len(other_df): + print("PASS:length of df after concat match") +else: + print("FAIL: length mismatch") +print('======================================================================') +#%% ########### -# Step 5: This is still dirty data. Filter LF data so that you only have -# mutations corresponding to pnca_p. +# This is hopefully clean data, but just double check +# Filter LF data so that you only have +# mutations corresponding to pncA_p.* (string match pattern) # this will be your list you run OR calcs ########### -meta_pnca_LF1 = meta_pnca_LF0[meta_pnca_LF0['mutation'].str.contains('pncA_p.*')] +print('length of pnca_LF0:', len(pnca_LF0), + '\nThis should be what you need. But just double check and extract', gene_match, + '\nfrom LF0 (concatenated data)') -# sanity checks -# unique samples -meta_pnca_LF1['id'].nunique() -if len(meta_pnca_all) == meta_pnca_LF1['id'].nunique(): - print("Sanity check passed: No of samples with pncA mutations match") +print('using string match:', gene_match) + +print('Double checking and creating df: pnca_LF1') +pnca_LF1 = pnca_LF0[pnca_LF0['mutation'].str.contains(gene_match)] + +if len(pnca_LF0) == len(pnca_LF1): + print('PASS: length of pnca_LF0 and pnca_LF1 match', + '\nconfirming extraction and concatenating worked correctly') else: - print("Sanity check failed: Debug please!") + print('FAIL: BUT NOT FATAL!', + '\npnca_LF0 and pnca_LF1 lengths differ', + '\nsuggesting error in extraction process' + ' use pnca_LF1 for downstreama analysis') +print('======================================================================') +print('length of dfs pre and post processing...', + '\npnca WF data (unique samples in each row):', extracted_pnca_samples, + '\npnca LF data (unique mutation in each row):', len(pnca_LF1)) +print('======================================================================') +#%% +# final sanity check +print('Verifying whether extraction process worked correctly...') +if len(pnca_LF1) == expected_rows: + print('PASS: extraction process performed correctly', + '\nexpected length:', expected_rows, + '\ngot:', len(pnca_LF1), + '\nRESULT: Total no. of mutant sequences for logo plot:', len(pnca_LF1) ) -# count if all the mutations are indeed in the protein coding region -# i.e begin with pnca_p -meta_pnca_LF1['mutation'].str.count('pncA_p.').sum() - -# should be true. -# and check against the length of the df, which should match -if len(meta_pnca_LF1) == meta_pnca_LF1['mutation'].str.count('pncA_p.').sum(): - print("Sanity check passed: Long format data containing pnca mutations indeed correspond to pncA_p region") else: - print("Sanity check failed: Debug please!") + print('FAIL: extraction process has bugs', + '\nexpected length:', expected_rows, + '\ngot:', len(pnca_LF1), + '\Debug please') +#%% +print('Perfmorning some more sanity checks...') +# From LF1: +# no. of unique muts +distinct_muts = pnca_LF1.mutation.value_counts() +print("Distinct mutations:", len(distinct_muts)) -########### -# Step 6: Filter dataframe with "na" in the drug column -# This is because for OR, you can't use the snps that have the -# NA in the specified drug column -# it creates problems when performing calcs in R inside the loop -# so best to filter it here -########### -# NOT NEEDED FOR all snps, only for extracting valid OR snps -del (meta_pnca_WF0, meta_pnca_WF1, meta_pnca_LF0, meta_pnca_all) +# no. of samples contributing the unique muta +pnca_LF1.id.nunique() +print("No.of samples contributing to distinct muts:", pnca_LF1.id.nunique() ) -########### -# Step 7: count unique pncA_p mutations (all and comp cases) -########### -meta_pnca_LF1['mutation'].nunique() -meta_pnca_LF1.groupby('mutation_info').nunique() +# no. of dr and other muts +mut_grouped = pnca_LF1.groupby('mutation_info').mutation.nunique() +print("No.of unique dr and other muts:", pnca_LF1.groupby('mutation_info').mutation.nunique() ) -meta_pnca_LF1['id'].nunique() -meta_pnca_LF1['mutation'].nunique() -meta_pnca_LF1.groupby('id').nunique() +# sanity check +if len(distinct_muts) == mut_grouped.sum() : + print("PASS:length of LF1 is as expected ") +else: + print('FAIL: Mistmatch in count of muts', + '\nexpected count:', len(distinct_muts), + '\nactual count:', mut_grouped.sum(), + '\nmuts should be distinct within dr* and other* type', + '\ninspecting ...') + muts_split = list(pnca_LF1.groupby('mutation_info')) + dr_muts = muts_split[0][1].mutation + other_muts = muts_split[1][1].mutation +# print('splitting muts by mut_info:', muts_split) + print('no.of dr_muts samples:', len(dr_muts)) + print('no. of other_muts samples', len(other_muts)) +#%% +# !!! IMPORTANT !!!! +# sanity check: There should not be any common muts +# i.e the same mutation cannot be classed as a 'drug' AND 'others' +if dr_muts.isin(other_muts).sum() & other_muts.isin(dr_muts).sum() > 0: + print('WARNING: Ambiguous muts detected in dr_ and other_ mutation category') +else: + print('PASS: NO ambiguous muts detected', + '\nMuts are unique to dr_ and other_ mutation class') -########### -# Step 8: convert all snps only (IN LOWERCASE) -# because my mcsm file intergated has lowercase -########### -# convert mutation to lower case as it needs to exactly match the dict key -#meta_pnca_LF1['mutation'] = meta_pnca_LF1.mutation.str.lower() # WARNINGS: suggested to use .loc -meta_pnca_LF1['mutation'] = meta_pnca_LF1.loc[:, 'mutation'].str.lower() +# inspect dr_muts and other muts +if dr_muts.isin(other_muts).sum() & other_muts.isin(dr_muts).sum() > 0: + print('Finding ambiguous muts...', + '\n==========================================================', + '\nTotal no. of samples in dr_muts present in other_muts:', dr_muts.isin(other_muts).sum(), + '\nThese are:\n', dr_muts[dr_muts.isin(other_muts)], + '\n==========================================================', + '\nTotal no. of samples in other_muts present in dr_muts:', other_muts.isin(dr_muts).sum(), + '\nThese are:\n', other_muts[other_muts.isin(dr_muts)], + '\n==========================================================') +else: + print('Error: ambiguous muts present, but extraction failed. Debug!') -########### -# Step 9 : Split 'mutation' column into three: wild_type, position and -# mutant_type separately. Then map three letter code to one from the -# referece_dict imported pncaeady. First convert to mutation to lowercase -# to allow to match entries from dict -########### +print('======================================================================') +print('Counting no. of ambiguous muts...') + +if dr_muts[dr_muts.isin(other_muts)].nunique() == other_muts[other_muts.isin(dr_muts)].nunique(): + common_muts = dr_muts[dr_muts.isin(other_muts)].value_counts().keys().tolist() + print('No. of ambigiuous muts detected:'+ str(len(common_muts)), + 'list of ambiguous mutations (see below):', *common_muts, sep = '\n') +else: + print('Error: ambiguous muts detected, but extraction failed. Debug!', + '\nNo. of ambiguous muts in dr:', len(dr_muts[dr_muts.isin(other_muts)].value_counts().keys().tolist() ), + '\nNo. of ambiguous muts in other:', len(other_muts[other_muts.isin(dr_muts)].value_counts().keys().tolist())) + +#%% clear variables +del(id_dr, id_other, meta_data, meta_pnca_dr, meta_pnca_other, mut_grouped, muts_split, other_WF1, other_df, other_muts_df, other_pnca_count, pnca_LF0, pnca_na) + +del(c1, c2, col_to_split1, col_to_split2, comp_pnca_samples, dr_WF0, dr_df, dr_muts_df, dr_pnca_WF0, dr_pnca_count, expected_pnca_samples, other_pnca_WF1) + +#%% end of data extraction and some files writing. Below are some more files writing. + + + + + + + + + + + + + + + + +#%%: write file: ambiguous muts +# uncomment as necessary +#print(outdir) +#dr_muts.to_csv('dr_muts.csv', header = True) +#other_muts.to_csv('other_muts.csv', header = True) + +out_filename1 = gene.lower() + '_' + 'ambiguous_muts.csv' +outfile1 = homedir + '/' + outdir + '/' + out_filename1 +print('Writing file: ambiguous muts...', + '\nFilename:', out_filename1, + '\nPath:', homedir +'/'+ outdir) + +#common_muts = ['pncA_p.Val180Phe','pncA_p.Gln10Pro'] # test +inspect = pnca_LF1[pnca_LF1['mutation'].isin(common_muts)] +inspect.to_csv(outfile1) + +print('Finished writing:', out_filename1, '\nExpected no. of rows (no. of samples with the ambiguous muts present):', dr_muts.isin(other_muts).sum() + other_muts.isin(dr_muts).sum()) +print('======================================================================') +del(out_filename1) +#%% +#=========== +# Split 'mutation' column into three: wild_type, position and +# mutant_type separately. Then map three letter code to one using +# reference_dict. +# First: Import reference dict +# Second: convert to mutation to lowercase for compatibility with dict +#=========== +from reference_dict import my_aa_dict # CHECK DIR STRUC THERE! +pnca_LF1['mutation'] = pnca_LF1.loc[:, 'mutation'].str.lower() #======= -# Step 9a: iterate through the dict, create a lookup dict i.e +# Iterate through the dict, create a lookup dict i.e # lookup_dict = {three_letter_code: one_letter_code}. # lookup dict should be the key and the value (you want to create a column for) # Then use this to perform the mapping separetly for wild type and mutant cols. -# The three letter code is extracted using a regex match from the dataframe and then converted +# The three letter code is extracted using a string match match from the dataframe and then converted # to 'pandas series'since map only works in pandas series #======= -# initialise a sub dict that is a lookup dict for three letter code to one lookup_dict = dict() for k, v in my_aa_dict.items(): lookup_dict[k] = v['one_letter_code'] - wt = meta_pnca_LF1['mutation'].str.extract('pnca_p.(\w{3})').squeeze() # converts to a series that map works on - meta_pnca_LF1['wild_type'] = wt.map(lookup_dict) - mut = meta_pnca_LF1['mutation'].str.extract('\d+(\w{3})$').squeeze() - meta_pnca_LF1['mutant_type'] = mut.map(lookup_dict) + wt = pnca_LF1['mutation'].str.extract('pnca_p.(\w{3})').squeeze() # converts to a series that map works on + pnca_LF1['wild_type'] = wt.map(lookup_dict) + mut = pnca_LF1['mutation'].str.extract('\d+(\w{3})$').squeeze() + pnca_LF1['mutant_type'] = mut.map(lookup_dict) -# extract position info from mutation column separetly using regex -meta_pnca_LF1['position'] = meta_pnca_LF1['mutation'].str.extract(r'(\d+)') +# extract position info from mutation column separetly using string match +pnca_LF1['position'] = pnca_LF1['mutation'].str.extract(r'(\d+)') # clear variables del(k, v, wt, mut, lookup_dict) - +print('======================================================================') #========= -# Step 9b: iterate through the dict, create a lookup dict that i.e +# iterate through the dict, create a lookup dict that i.e # lookup_dict = {three_letter_code: aa_prop_water} # Do this for both wild_type and mutant as above. #========= @@ -386,18 +778,18 @@ lookup_dict = dict() for k, v in my_aa_dict.items(): lookup_dict[k] = v['aa_prop_water'] #print(lookup_dict) - wt = meta_pnca_LF1['mutation'].str.extract('pnca_p.(\w{3})').squeeze() # converts to a series that map works on - meta_pnca_LF1['wt_prop_water'] = wt.map(lookup_dict) - mut = meta_pnca_LF1['mutation'].str.extract('\d+(\w{3})$').squeeze() - meta_pnca_LF1['mut_prop_water'] = mut.map(lookup_dict) + wt = pnca_LF1['mutation'].str.extract('pnca_p.(\w{3})').squeeze() # converts to a series that map works on + pnca_LF1['wt_prop_water'] = wt.map(lookup_dict) + mut = pnca_LF1['mutation'].str.extract('\d+(\w{3})$').squeeze() + pnca_LF1['mut_prop_water'] = mut.map(lookup_dict) # added two more cols # clear variables del(k, v, wt, mut, lookup_dict) - +print('======================================================================') #======== -# Step 9c: iterate through the dict, create a lookup dict that i.e +# iterate through the dict, create a lookup dict that i.e # lookup_dict = {three_letter_code: aa_prop_polarity} # Do this for both wild_type and mutant as above. #========= @@ -407,244 +799,223 @@ lookup_dict = dict() for k, v in my_aa_dict.items(): lookup_dict[k] = v['aa_prop_polarity'] #print(lookup_dict) - wt = meta_pnca_LF1['mutation'].str.extract('pnca_p.(\w{3})').squeeze() # converts to a series that map works on - meta_pnca_LF1['wt_prop_polarity'] = wt.map(lookup_dict) - mut = meta_pnca_LF1['mutation'].str.extract(r'\d+(\w{3})$').squeeze() - meta_pnca_LF1['mut_prop_polarity'] = mut.map(lookup_dict) + wt = pnca_LF1['mutation'].str.extract('pnca_p.(\w{3})').squeeze() # converts to a series that map works on + pnca_LF1['wt_prop_polarity'] = wt.map(lookup_dict) + mut = pnca_LF1['mutation'].str.extract(r'\d+(\w{3})$').squeeze() + pnca_LF1['mut_prop_polarity'] = mut.map(lookup_dict) # added two more cols # clear variables del(k, v, wt, mut, lookup_dict) +print('======================================================================') +#======== +# iterate through the dict, create a lookup dict that i.e +# lookup_dict = {three_letter_code: aa_taylor} +# Do this for both wild_type and mutant as above. +# caution: taylor mapping will create a list within a column +#========= +#lookup_dict = dict() + +#for k, v in my_aa_dict.items(): +# lookup_dict[k] = v['aa_taylor'] +# #print(lookup_dict) +# wt = pnca_LF1['mutation'].str.extract('pnca_p.(\w{3})').squeeze() # converts to a series that map works on +# pnca_LF1['wt_taylor'] = wt.map(lookup_dict) +# mut = pnca_LF1['mutation'].str.extract(r'\d+(\w{3})$').squeeze() +# pnca_LF1['mut_taylor'] = mut.map(lookup_dict) + +# added two more cols +# clear variables +#del(k, v, wt, mut, lookup_dict) +#print('======================================================================') + +#======== +# iterate through the dict, create a lookup dict that i.e +# lookup_dict = {three_letter_code: aa_calcprop} +# Do this for both wild_type and mutant as above. +#========= +lookup_dict = dict() + +for k, v in my_aa_dict.items(): + lookup_dict[k] = v['aa_calcprop'] + #print(lookup_dict) + wt = pnca_LF1['mutation'].str.extract('pnca_p.(\w{3})').squeeze() # converts to a series that map works on + pnca_LF1['wt_calcprop'] = wt.map(lookup_dict) + mut = pnca_LF1['mutation'].str.extract(r'\d+(\w{3})$').squeeze() + pnca_LF1['mut_calcprop'] = mut.map(lookup_dict) + +# added two more cols +# clear variables +del(k, v, wt, mut, lookup_dict) +print('======================================================================') ######## -# Step 10: combine the wild_type+poistion+mutant_type columns to generate +# combine the wild_type+poistion+mutant_type columns to generate # Mutationinformation (matches mCSM output field) # Remember to use .map(str) for int col types to allow string concatenation ######### -meta_pnca_LF1['Mutationinformation'] = meta_pnca_LF1['wild_type'] + meta_pnca_LF1.position.map(str) + meta_pnca_LF1['mutant_type'] +pnca_LF1['Mutationinformation'] = pnca_LF1['wild_type'] + pnca_LF1.position.map(str) + pnca_LF1['mutant_type'] +print('Created column: Mutationinformation') +print('======================================================================') +#%% Write file: mCSM muts +snps_only = pd.DataFrame(pnca_LF1['Mutationinformation'].unique()) +snps_only.head() +# assign column name +snps_only.columns = ['Mutationinformation'] -#=#=#=#=#=#=# -# Step 11: -# COMMENT: there is more processing in the older version of this script -# consult if necessary -# possibly due to the presence of true_wt -# since this file doesn't contain any true_wt, we won't need it(hopefully!) -#=#=#=#=#=#=# +# count how many positions this corresponds to +pos_only = pd.DataFrame(pnca_LF1['position'].unique()) -#%% -########### -# Step 12: Output files for only SNPs to run mCSM -########### - -#========= -# Step 12a: all SNPs to run mCSM -#========= -snps_only = pd.DataFrame(meta_pnca_LF1['Mutationinformation'].unique()) #336 -pos_only = pd.DataFrame(meta_pnca_LF1['position'].unique()) #131 - -# since 186 is not part of struc: it is one less -#FIXME! - -# assign meaningful colnames -#snps_only.rename({0 : 'all_pnca_snps'}, axis = 1, inplace = True) -#list(snps_only.columns) -snps_only.isna().sum() # should be 0 - -# output csv: all SNPS for mCSM analysis -# specify variable name for output file -gene = 'pnca' -#drug = 'pyrazinamide' -my_fname1 = '_snps' -nrows = len(snps_only) - -#output_file_path = '/home/tanu/git/Data/input/processed/pyrazinamide/' -#output_file_path = work_dir + '/Data/' -#output_file_path = data_dir + '/input/processed/' + drug + '/' -print(outdir) - -if not os.path.exists(outdir): - print( outdir, 'does not exist. Creating') - os.makedirs(outdir) - exit() - -#out_filename = gene + my_fname1 + str(nrows) + '.csv' -out_filename = '/' + gene + my_fname1 + '.csv' -outfile = outdir + out_filename -print(outfile) #<<<- check - -# write to csv: without column or row names -# Bad practice: numbers at the start of a filename -snps_only.to_csv(out_filename, header = False, index = False) - -#========= -# Step 12b: all snps with annotation -#========= -# all snps, selected cols -#pnca_snps_ALL = meta_pnca_LF1[['id','country','lineage', 'sublineage' -# , 'drtype', 'pyrazinamide' -# , 'mutation_info', 'mutation', 'Mutationinformation']] - -#len(pnca_snps_ALL) - -# sanity check -#meta_pnca_LF1['mutation'].nunique() - -# output csv: WITH column but WITHOUT row names(all snps with meta data) -# specify variable name for output file -#gene = 'pnca' -#drug = 'pyrazinamide' -#my_fname2 = '_snps_with_metadata_' -#nrows = len(pnca_snps_ALL) - -#output_file_path = '/home/tanu/git/Data/input/processed/pyrazinamide/' -#output_file_path = work_dir + '/Data/' -#output_file_path = data_dir + '/input/processed/' + drug + '/' -print(outdir) - -if not os.path.exists(outdir): - print( outdir, 'does not exist. Creating') - os.makedirs(outdir) - exit() - -#out_filename = gene + my_fname2 + str(nrows) + '.csv' -out_filename = '/' + gene + my_fname1 + '.csv' -outfile = outdir + out_filename -print(outfile) #<<<- check - -# write out file -#pnca_snps_ALL.to_csv(outfile, header = True, index = False) - -#========= -# Step 12c: comp snps for OR calcs with annotation -#========= -# remove all NA's from pyrazinamide column from LF1 - -# counts NA per column -meta_pnca_LF1.isna().sum() - -# remove NA -meta_pnca_LF2 = meta_pnca_LF1.dropna(subset=['pyrazinamide']) - -# sanity checks -# should be True -len(meta_pnca_LF2) == len(meta_pnca_LF1) - meta_pnca_LF1['pyrazinamide'].isna().sum() - -# unique counts -meta_pnca_LF2['mutation'].nunique() - -meta_pnca_LF2.groupby('mutation_info').nunique() - -# sanity check -meta_pnca_LF2['id'].nunique() #1908 - -# should be True -if meta_pnca_LF2['id'].nunique() == comp_pnca_samples: - print ('sanity check passed: complete numbers match') +print('Checking NA in snps...')# should be 0 +if snps_only.Mutationinformation.isna().sum() == 0: + print ('PASS: NO NAs/missing entries for SNPs') else: - print('Error: Please Debug!') + print('FAIL: SNP has NA, Possible mapping issues from dict?', + '\nDebug please!') +print('======================================================================') -# value counts -meta_pnca_LF2.mutation.value_counts() -#meta_pnca_LF2.groupby(['mutation_info', 'mutation']).size() +out_filename2 = gene.lower() + '_' + 'mcsm_snps.csv' +outfile2 = homedir + '/' + outdir + '/' + out_filename2 -# valid/comp snps -# uncomment as necessary -pnca_snps_COMP = pd.DataFrame(meta_pnca_LF2['Mutationinformation'].unique()) -len(pnca_snps_COMP) +print('Writing file: mCSM style muts', + '\nmutation format (SNP): {Wt}{Mut}', + '\nNo. of distinct muts:', len(snps_only), + '\nNo. of distinct positions:', len(pos_only), + '\nFilename:', out_filename2, + '\nPath:', homedir +'/'+ outdir) -# output csv: WITH column but WITHOUT row names (COMP snps with meta data) -# specify variable name for output file +snps_only.to_csv(outfile2, header = False, index = False) -gene = 'pnca' -#drug = 'pyrazinamide' -my_fname3 = '_comp_snps_with_metadata' -nrows = len(pnca_snps_COMP) +print('Finished writing:', out_filename2, + '\nNo. of rows:', len(snps_only) ) +print('======================================================================') +del(out_filename2) +#%% Write file: pnca_metadata (i.e pnca_LF1) +# where each row has UNIQUE mutations NOT unique sample ids +out_filename3 = gene.lower() + '_' + 'metadata.csv' +outfile3 = homedir + '/' + outdir + '/' + out_filename3 +print('Writing file: LF formatted data', + '\nFilename:', out_filename3, + '\nPath:', homedir +'/'+ outdir) -#output_file_path = '/home/tanu/git/Data/input/processed/pyrazinamide/' -#output_file_path = work_dir + '/Data/' -#output_file_path = data_dir + '/input/processed/' + drug + '/' -print(outdir) +pnca_LF1.to_csv(outfile3, header = True, index = False) +print('Finished writing:', out_filename3, + '\nNo. of rows:', len(pnca_LF1), + '\nNo. of cols:', len(pnca_LF1.columns) ) -if not os.path.exists(outdir): - print( outdir, 'does not exist. Creating') - os.makedirs(outdir) - exit() +print('======================================================================') -#out_filename = gene + my_fname3 + str(nrows) + '.csv' -#out_filename = '/' + gene + my_fname3 + '.csv' -#outfile = outdir + out_filename -#print(outfile) #<<<- check +#%% Write file: comp SNPs (i.e snps without any corresponding 'NA' in the +# column to allow OR calcs) -# write out file -#pnca_snps_COMP.to_csv(outfile, header = True, index = False) +# remove NA from pyrazinamide cols +pnca_LF2 = pnca_LF1.dropna(subset=['pyrazinamide']) -#========= -# Step 12d: comp snps only -#========= -# output csv: comp SNPS for info (i.e snps for which OR exist) -# specify variable name for output file +print('extracting OR muts by removing NAs from pyrazinamide cols') +if pnca_LF2.pyrazinamide.isna().sum() > 0: + print('FAIL: NAs NOT removed successfully') +else: + print('PASS: NAs removed successfully') -snps_only = pd.DataFrame(meta_pnca_LF2['Mutationinformation'].unique()) +# extracting comp snps only +comp_snps_only = pd.DataFrame(pnca_LF2['mutation'].unique()) +#print('Total no. of comp snps:', len(comp_snps_only)) +comp_snps_only.head() -gene = 'pnca' -#drug = 'pyrazinamide' -my_fname1 = '_comp_snps' -nrows = len(snps_only) +# assign column name +comp_snps_only.columns = ['mutation'] -#output_file_path = '/home/tanu/git/Data/input/processed/pyrazinamide/' -#output_file_path = work_dir + '/Data/' -#output_file_path = data_dir + '/input/processed/' + drug + '/' -print(outdir) +# count how many positions this corresponds to +comp_pos_only = pd.DataFrame(pnca_LF2['position'].unique()) +#print('Total no. of pos corresponding to comp_snps:', len(comp_pos_only)) + +out_filename4 = gene.lower() + '_' + 'comp_snps.csv' +outfile4 = homedir + '/' + outdir + '/' + out_filename4 +print('Writing file: comp snps to allow OR calcs', + '\nFilename:', out_filename4, + '\nPath:', homedir + '/' + outdir, + '\nNo. of comp muts:', len(comp_snps_only), + '\nNo. of distinct positions for comp muts:', len(comp_pos_only) ) -if not os.path.exists(outdir): - print( outdir, 'does not exist. Creating') - os.makedirs(outdir) - exit() +comp_snps_only.to_csv(outfile4, header = True, index = False) -#out_filename = gene + my_fname1 + str(nrows) + '.csv' -out_filename = '/' + gene + my_fname1 + '.csv' -outfile = outdir + out_filename -print(outfile) #<<<- check +print('Finished writing:', out_filename4, + '\nNo. of rows:', len(comp_snps_only) ) +#%% write file: mCSM style but with repitions for MSA and logo plots +all_muts_msa = pd.DataFrame(pnca_LF1['Mutationinformation']) +all_muts_msa.head() +# assign column name +all_muts_msa.columns = ['Mutationinformation'] -# write to csv: without column or row names -snps_only.to_csv(outfile, header = False, index = False) +# make sure it is string +all_muts_msa.columns.dtype -#=#=#=#=#=#=#=# -# COMMENT: LF1 is the file to extract all unique snps for mcsm -# but you have that already in file called pnca_snps... -# LF2: is the file for extracting snps tested for DS and hence OR calcs -#=#=#=#=#=#=#=# +# sort the column +all_muts_msa_sorted = all_muts_msa.sort_values(by = 'Mutationinformation') -########### -# Step 13 : Output the whole df i.e -# file for meta_data which is now formatted with -# each row as a unique snp rather than the original version where -# each row is a unique id -# ***** This is the file you will ADD the AF and OR calculations to ***** -########### +# create an extra column with protein name +all_muts_msa_sorted = all_muts_msa_sorted.assign(fasta_name = '3PL1') +all_muts_msa_sorted.head() -# output csv: the entire DF -# specify variable name for output file -gene = 'pnca' -#drug = 'pyrazinamide' -my_fname4 = '_metadata' -#nrows = len(meta_pnca_LF1) +# rearrange columns so the fasta name is the first column (required for mutate.script) +all_muts_msa_sorted = all_muts_msa_sorted[['fasta_name', 'Mutationinformation']] +all_muts_msa_sorted.head() -#output_file_path = '/home/tanu/git/Data/input/processed/pyrazinamide/' -#output_file_path = work_dir + '/Data/' -#output_file_path = data_dir + '/input/processed/' + drug + '/' -print(outdir) +print('Checking NA in snps...')# should be 0 +if all_muts_msa.Mutationinformation.isna().sum() == 0: + print ('PASS: NO NAs/missing entries for SNPs') +else: + print('FAIL: SNP has NA, Possible mapping issues from dict?', + '\nDebug please!') +print('======================================================================') -if not os.path.exists(outdir): - print( outdir, 'does not exist. Creating') - os.makedirs(outdir) - exit() +out_filename5 = gene.lower() + '_' + 'all_muts_msa.csv' +outfile5 = homedir + '/' + outdir + '/' + out_filename5 -#out_filename = gene + my_fname1 + str(nrows) + '.csv' -out_filename = '/' + gene + my_fname4 + '.csv' -outfile = outdir + out_filename -print(outfile) #<<<- check +print('Writing file: mCSM style muts for msa', + '\nmutation format (SNP): {Wt}{Mut}', + '\nNo.of lines of msa:', len(all_muts_msa), + '\nFilename:', out_filename5, + '\nPath:', homedir +'/'+ outdir) -# write out file -meta_pnca_LF1.to_csv(outfile) +all_muts_msa_sorted.to_csv(outfile5, header = False, index = False) + +print('Finished writing:', out_filename5, + '\nNo. of rows:', len(all_muts_msa) ) +print('======================================================================') +del(out_filename5) + +#%% write file for mutational positions +# count how many positions this corresponds to +pos_only = pd.DataFrame(pnca_LF1['position'].unique()) +# assign column name +pos_only.columns = ['position'] +# make sure dtype of column position is int or numeric and not string +pos_only.position.dtype +pos_only['position'] = pos_only['position'].astype(int) +pos_only.position.dtype + +# sort by position value +pos_only_sorted = pos_only.sort_values(by = 'position', ascending = True) + +out_filename6 = gene.lower() + '_' + 'mutational_positons.csv' +outfile6 = homedir + '/' + outdir + '/' + out_filename6 + +print('Writing file: mutational positions', + '\nNo. of distinct positions:', len(pos_only_sorted), + '\nFilename:', out_filename6, + '\nPath:', homedir +'/'+ outdir) + +pos_only_sorted.to_csv(outfile6, header = True, index = False) + +print('Finished writing:', out_filename6, + '\nNo. of rows:', len(pos_only_sorted) ) +print('======================================================================') +del(out_filename6) +#%% end of script +print('======================================================================') +print(u'\u2698' * 50, + '\nEnd of script: Data extraction and writing files' + '\n' + u'\u2698' * 50 ) +#%%