1176 lines
45 KiB
Python
Executable file
1176 lines
45 KiB
Python
Executable file
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
'''
|
|
Created on Tue Aug 6 12:56:03 2019
|
|
|
|
@author: tanu
|
|
'''
|
|
|
|
# FIXME: include error checking to enure you only
|
|
# concentrate on positions that have structural info?
|
|
|
|
# FIXME: import dirs.py to get the basic dir paths available
|
|
#=======================================================================
|
|
# TASK: extract ALL <gene> matched mutations from GWAS data
|
|
# Input data file has the following format: each row = unique sample id
|
|
# id,country,lineage,sublineage,drtype,drug,dr_muts_col,other_muts_col...
|
|
# 0,sampleID,USA,lineage2,lineage2.2.1,Drug-resistant,0.0,WT,gene_match<wt>POS<mut>; pncA_c.<wt>POS<mut>...
|
|
# where multiple mutations and multiple mutation types are separated by ';'.
|
|
# We are interested in the protein coding region i.e mutation with the<gene>_'p.' format.
|
|
# This script splits the mutations on the ';' and extracts protein coding muts only
|
|
# where each row is a separate mutation
|
|
# sample ids AND mutations are NOT unique, but the COMBINATION (sample id + mutation) = unique
|
|
|
|
# output files: all lower case
|
|
# 0) <gene>_common_ids.csv
|
|
# 1) <gene>_ambiguous_muts.csv
|
|
# 2) <gene>_mcsm_snps.csv
|
|
# 3) <gene>_metadata.csv
|
|
# 4) <gene>_all_muts_msa.csv
|
|
# 5) <gene>_mutational_positons.csv
|
|
#=======================================================================
|
|
#%% load libraries
|
|
import os, sys
|
|
import pandas as pd
|
|
#import numpy as np
|
|
import argparse
|
|
#=======================================================================
|
|
#%% homdir and curr dir and local imports
|
|
homedir = os.path.expanduser('~')
|
|
# set working dir
|
|
os.getcwd()
|
|
os.chdir(homedir + '/git/LSHTM_analysis/scripts')
|
|
os.getcwd()
|
|
|
|
# import aa dict
|
|
from reference_dict import my_aa_dict # CHECK DIR STRUC THERE!
|
|
#=======================================================================
|
|
#%% command line args
|
|
arg_parser = argparse.ArgumentParser()
|
|
#arg_parser.add_argument('-d', '--drug', help='drug name', default = 'pyrazinamide')
|
|
#arg_parser.add_argument('-g', '--gene', help='gene name', default = 'pncA') # case sensitive
|
|
arg_parser.add_argument('-d', '--drug', help='drug name', default = 'TESTDRUG')
|
|
arg_parser.add_argument('-g', '--gene', help='gene name (case sensitive)', default = 'testGene') # case sensitive
|
|
args = arg_parser.parse_args()
|
|
#=======================================================================
|
|
#%% variable assignment: input and output paths & filenames
|
|
#drug =
|
|
#gene = '
|
|
|
|
drug = args.drug
|
|
gene = args.gene
|
|
|
|
gene_match = gene + '_p.'
|
|
|
|
# building cols to extract
|
|
dr_muts_col = 'dr_mutations_' + drug
|
|
other_muts_col = 'other_mutations_' + drug
|
|
|
|
print('Extracting columns based on variables:\n'
|
|
, drug
|
|
, '\n'
|
|
, dr_muts_col
|
|
, '\n'
|
|
, other_muts_col
|
|
, '\n===============================================================')
|
|
#=======================================================================
|
|
#%% input and output dirs and files
|
|
#=======
|
|
# data dir
|
|
#=======
|
|
datadir = homedir + '/' + 'git/Data'
|
|
|
|
#=======
|
|
# input
|
|
#=======
|
|
in_filename = 'original_tanushree_data_v2.csv'
|
|
infile = datadir + '/' + in_filename
|
|
print('Input file: ', infile
|
|
, '\n============================================================')
|
|
|
|
#=======
|
|
# output
|
|
#=======
|
|
# several output files
|
|
# output filenames in respective sections at the time of outputting files
|
|
outdir = datadir + '/' + drug + '/' + 'output'
|
|
print('Output filename: in the respective sections'
|
|
, '\nOutput path: ', outdir
|
|
, '\n=============================================================')
|
|
|
|
#%%end of variable assignment for input and output files
|
|
#=======================================================================
|
|
#%% Read input file
|
|
master_data = pd.read_csv(infile, sep = ',')
|
|
|
|
# column names
|
|
#list(master_data.columns)
|
|
|
|
# extract elevant columns to extract from meta data related to the drug
|
|
meta_data = master_data[['id'
|
|
,'country'
|
|
,'lineage'
|
|
,'sublineage'
|
|
,'drtype'
|
|
, drug
|
|
, dr_muts_col
|
|
, other_muts_col
|
|
]]
|
|
|
|
del(master_data)
|
|
|
|
# checks and results
|
|
total_samples = meta_data['id'].nunique()
|
|
print('RESULT: Total samples:', total_samples
|
|
, '\n===========================================================')
|
|
|
|
# counts NA per column
|
|
meta_data.isna().sum()
|
|
print('No. of NAs/column:' + '\n', meta_data.isna().sum()
|
|
, '\n===========================================================')
|
|
|
|
# glance
|
|
meta_data.head()
|
|
|
|
# equivalent of table in R
|
|
# drug counts
|
|
meta_data[drug].value_counts()
|
|
print('RESULT: Sus and Res samples:\n', meta_data[drug].value_counts()
|
|
, '\n===========================================================')
|
|
|
|
# clear variables
|
|
del(in_filename,infile)
|
|
#del(outdir)
|
|
#%%
|
|
# !!!IMPORTANT!!! sanity check:
|
|
# This is to find out how many samples have 1 and more than 1 mutation,so you
|
|
# can use it to check if your data extraction process for dr_muts
|
|
# and other_muts has worked correctly AND also to check the dim of the
|
|
# final formatted data.
|
|
# This will have: unique COMBINATION of sample id and <gene_match> mutations
|
|
|
|
#========
|
|
# First: counting <gene_match> mutations in dr_muts_col column
|
|
#========
|
|
print('Now counting WT &', gene_match, 'muts within the column:', dr_muts_col)
|
|
|
|
# drop na and extract a clean df
|
|
clean_df = meta_data.dropna(subset=[dr_muts_col])
|
|
|
|
# sanity check: count na
|
|
na_count = meta_data[dr_muts_col].isna().sum()
|
|
|
|
if len(clean_df) == (total_samples - na_count):
|
|
print('PASS: clean_df extracted: length is', len(clean_df)
|
|
, '\nNo.of NAs =', na_count, '/', total_samples
|
|
, '\n==========================================================')
|
|
else:
|
|
print('FAIL: dropping NA failed'
|
|
, '\n==========================================================')
|
|
|
|
dr_gene_count = 0
|
|
wt = 0
|
|
id_dr = []
|
|
id2_dr = []
|
|
|
|
for i, id in enumerate(clean_df.id):
|
|
# print (i, id)
|
|
# id_dr.append(id)
|
|
count_gene_dr = clean_df[dr_muts_col].iloc[i].count(gene_match)
|
|
if count_gene_dr > 0:
|
|
id_dr.append(id)
|
|
if count_gene_dr > 1:
|
|
id2_dr.append(id)
|
|
# print(id, count_gene_dr)
|
|
dr_gene_count = dr_gene_count + count_gene_dr
|
|
count_wt = clean_df[dr_muts_col].iloc[i].count('WT')
|
|
wt = wt + count_wt
|
|
|
|
print('RESULTS:')
|
|
print('Total WT in dr_muts_col:', wt)
|
|
print('Total matches of', gene_match, 'in dr_muts_col:', dr_gene_count)
|
|
print('Total samples with > 1', gene_match, 'muts in dr_muts_col:', len(id2_dr) )
|
|
print('=================================================================')
|
|
|
|
del(i, id, wt, id2_dr, clean_df, na_count, count_gene_dr, count_wt)
|
|
|
|
#========
|
|
# Second: counting <gene_match> mutations in dr_muts_col column
|
|
#========
|
|
print('Now counting WT &', gene_match, 'muts within the column:', other_muts_col)
|
|
|
|
# drop na and extract a clean df
|
|
clean_df = meta_data.dropna(subset=[other_muts_col])
|
|
|
|
# sanity check: count na
|
|
na_count = meta_data[other_muts_col].isna().sum()
|
|
|
|
if len(clean_df) == (total_samples - na_count):
|
|
print('PASS: clean_df extracted: length is', len(clean_df)
|
|
, '\nNo.of NAs =', na_count, '/', total_samples
|
|
, '\n=========================================================')
|
|
else:
|
|
print('FAIL: dropping NA failed'
|
|
, '\n=========================================================')
|
|
|
|
other_gene_count = 0
|
|
wt_other = 0
|
|
id_other = []
|
|
id2_other = []
|
|
|
|
for i, id in enumerate(clean_df.id):
|
|
# print (i, id)
|
|
# id_other.append(id)
|
|
# count_gene_other = clean_df[other_muts_col].iloc[i].count('gene_match')
|
|
count_gene_other = clean_df[other_muts_col].iloc[i].count(gene_match)
|
|
if count_gene_other > 0:
|
|
id_other.append(id)
|
|
if count_gene_other > 1:
|
|
id2_other.append(id)
|
|
# print(id, count_gene_other)
|
|
other_gene_count = other_gene_count + count_gene_other
|
|
count_wt = clean_df[other_muts_col].iloc[i].count('WT')
|
|
wt_other = wt_other + count_wt
|
|
print('RESULTS:')
|
|
print('Total WT in other_muts_col:', wt_other)
|
|
print('Total matches of', gene_match, 'in', other_muts_col, ':', other_gene_count)
|
|
print('Total samples with > 1', gene_match, 'muts in other_muts_col:', len(id2_other) )
|
|
print('=================================================================')
|
|
|
|
print('Predicting total no. of rows in the curated df:', dr_gene_count + other_gene_count
|
|
, '\n===================================================================')
|
|
expected_rows = dr_gene_count + other_gene_count
|
|
|
|
del(i, id, wt_other, clean_df, na_count, id2_other, count_gene_other, count_wt)
|
|
|
|
#%%
|
|
############
|
|
# extracting dr and other muts separately along with the common cols
|
|
#############
|
|
print('Extracting dr_muts from col:', dr_muts_col, 'with other meta_data')
|
|
print('gene to extract:', gene_match )
|
|
|
|
#===============
|
|
# dr mutations: extract gene_match entries with meta data and ONLY dr_muts col
|
|
#===============
|
|
# FIXME: replace drug with variable containing the drug name
|
|
# !!! important !!!
|
|
meta_data_dr = meta_data[['id'
|
|
,'country'
|
|
,'lineage'
|
|
,'sublineage'
|
|
,'drtype'
|
|
, drug
|
|
, dr_muts_col
|
|
]]
|
|
print('expected dim should be:', len(meta_data), (len(meta_data.columns)-1) )
|
|
print('actual dim:', meta_data_dr.shape
|
|
, '\n===============================================================')
|
|
|
|
# Extract within this the gene of interest using string match
|
|
#meta_gene_dr = meta_data.loc[meta_data[dr_muts_col].str.contains('gene_match*', na = False)]
|
|
meta_gene_dr = meta_data_dr.loc[meta_data_dr[dr_muts_col].str.contains(gene_match, na = False)]
|
|
|
|
dr_id = meta_gene_dr['id'].unique()
|
|
print('RESULT: No. of samples with dr muts in pncA:', len(dr_id))
|
|
print('checking RESULT:',
|
|
'\nexpected len =', len(id_dr),
|
|
'\nactual len =', len(meta_gene_dr) )
|
|
|
|
if len(id_dr) == len(meta_gene_dr):
|
|
print('PASS: lengths match'
|
|
, '\n===============================================================')
|
|
else:
|
|
print('FAIL: length mismatch'
|
|
, '\n===============================================================')
|
|
|
|
dr_id = pd.Series(dr_id)
|
|
|
|
#=================
|
|
# other mutations: extract gene_match entries
|
|
#==================
|
|
print('Extracting dr_muts from:', other_muts_col,'with other meta_data')
|
|
# FIXME: replace drug with variable containing the drug name
|
|
# !!! important !!!
|
|
meta_data_other = meta_data[['id'
|
|
,'country'
|
|
,'lineage'
|
|
,'sublineage'
|
|
,'drtype'
|
|
, drug
|
|
, other_muts_col
|
|
]]
|
|
|
|
print('expected dim should be:', len(meta_data), (len(meta_data.columns)-1) )
|
|
print('actual dim:', meta_data_other.shape
|
|
, '\n===============================================================')
|
|
|
|
# Extract within this the gene of interest using string match
|
|
meta_gene_other = meta_data_other.loc[meta_data_other[other_muts_col].str.contains(gene_match, na = False)]
|
|
|
|
other_id = meta_gene_other['id'].unique()
|
|
print('RESULT: No. of samples with other muts:', len(other_id))
|
|
print('checking RESULT:',
|
|
'\nexpected len =', len(id_other),
|
|
'\nactual len =', len(meta_gene_other) )
|
|
|
|
if len(id_other) == len(meta_gene_other):
|
|
print('PASS: lengths match'
|
|
, '\n==============================================================')
|
|
else:
|
|
print('FAIL: length mismatch'
|
|
, '\n===============================================================')
|
|
|
|
other_id = pd.Series(other_id)
|
|
#%% Find common IDs
|
|
print('Now extracting common_ids...')
|
|
common_mut_ids = dr_id.isin(other_id).sum()
|
|
print('RESULT: No. of common ids:', common_mut_ids)
|
|
|
|
# sanity checks
|
|
# check if True: should be since these are common
|
|
dr_id.isin(other_id).sum() == other_id.isin(dr_id).sum()
|
|
|
|
# check if the 24 Ids that are common are indeed the same!
|
|
# bit of a tautology, but better safe than sorry!
|
|
common_ids = dr_id[dr_id.isin(other_id)]
|
|
common_ids = common_ids.reset_index()
|
|
common_ids.columns = ['index', 'id']
|
|
|
|
common_ids2 = other_id[other_id.isin(dr_id)]
|
|
common_ids2 = common_ids2.reset_index()
|
|
common_ids2.columns = ['index', 'id2']
|
|
|
|
# should be True
|
|
print(common_ids['id'].equals(common_ids2['id2']))
|
|
|
|
# good sanity check: use it later to check gene_sample_counts
|
|
expected_gene_samples = ( len(meta_gene_dr) + len(meta_gene_other) - common_mut_ids )
|
|
print('expected no. of gene samples:', expected_gene_samples)
|
|
print('=================================================================')
|
|
#%% write file
|
|
#print(outdir)
|
|
out_filename0 = gene.lower() + '_common_ids.csv'
|
|
outfile0 = outdir + '/' + out_filename0
|
|
|
|
#FIXME: CHECK line len(common_ids)
|
|
print('Writing file:'
|
|
, '\nFile:', outfile0
|
|
, '\nExpected no. of rows:', len(common_ids)
|
|
, '\n=============================================================')
|
|
|
|
common_ids.to_csv(outfile0)
|
|
del(out_filename0)
|
|
|
|
# clear variables
|
|
del(dr_id, other_id, meta_data_dr, meta_data_other, common_ids, common_mut_ids, common_ids2)
|
|
#%% Now extract 'all' pncA mutations: i.e 'gene_match*'
|
|
print('extracting from string match:', gene_match, 'mutations from cols:\n'
|
|
, dr_muts_col, 'and', other_muts_col, 'using string match:'
|
|
, '\n===================================================================')
|
|
#meta_gene_all = meta_data.loc[meta_data[dr_muts_col].str.contains(gene_match) | meta_data[other_muts_col].str.contains(gene_match) ]
|
|
meta_gene_all = meta_data.loc[meta_data[dr_muts_col].str.contains(gene_match, na = False) | meta_data[other_muts_col].str.contains(gene_match, na = False) ]
|
|
|
|
extracted_gene_samples = meta_gene_all['id'].nunique()
|
|
print('RESULT: actual no. of gene samples extracted:', extracted_gene_samples
|
|
, '\n===================================================================')
|
|
|
|
# sanity check: length of gene samples
|
|
print('Performing sanity check:')
|
|
if extracted_gene_samples == expected_gene_samples:
|
|
print('No. of gene samples:', len(meta_gene_all)
|
|
, '\nPASS: expected & actual no. of gene samples match'
|
|
, '\n=========================================================')
|
|
else:
|
|
print('FAIL: Debug please!'
|
|
, '\n===============================================================')
|
|
|
|
# count NA in drug column
|
|
gene_na = meta_gene_all[drug].isna().sum()
|
|
print('No. of gene samples without pza testing i.e NA in pza column:', gene_na)
|
|
|
|
# use it later to check number of complete samples from LF data
|
|
comp_gene_samples = len(meta_gene_all) - gene_na
|
|
print('comp gene samples tested for pza:', comp_gene_samples)
|
|
print('=================================================================')
|
|
|
|
# Comment: This is still dirty data since these
|
|
# are samples that have gene_match muts, but can have others as well
|
|
# since the format for mutations is mut1; mut2, etc.
|
|
print('This is still dirty data: samples have ', gene_match, 'muts but may have others as well'
|
|
, '\nsince the format for mutations is mut1; mut2, etc.'
|
|
, '\n=============================================================')
|
|
|
|
#%% tidy_split():Function to split mutations on specified delimiter: ';'
|
|
#https://stackoverflow.com/questions/41476150/removing-space-from-dataframe-columns-in-pandas
|
|
print('Performing tidy_split(): to separate the mutations into indivdual rows')
|
|
|
|
# define the split function
|
|
def tidy_split(df, column, sep='|', keep=False):
|
|
'''
|
|
Split the values of a column and expand so the new DataFrame has one split
|
|
value per row. Filters rows where the column is missing.
|
|
|
|
Params
|
|
------
|
|
df : pandas.DataFrame
|
|
dataframe with the column to split and expand
|
|
column : str
|
|
the column to split and expand
|
|
sep : str
|
|
the string used to split the column's values
|
|
keep : bool
|
|
whether to retain the presplit value as it's own row
|
|
|
|
Returns
|
|
-------
|
|
pandas.DataFrame
|
|
Returns a dataframe with the same columns as `df`.
|
|
'''
|
|
indexes = list()
|
|
new_values = list()
|
|
#df = df.dropna(subset=[column])#!!!!-----see this incase you need to uncomment based on use case
|
|
for i, presplit in enumerate(df[column].astype(str)):
|
|
values = presplit.split(sep)
|
|
if keep and len(values) > 1:
|
|
indexes.append(i)
|
|
new_values.append(presplit)
|
|
for value in values:
|
|
indexes.append(i)
|
|
new_values.append(value)
|
|
new_df = df.iloc[indexes, :].copy()
|
|
new_df[column] = new_values
|
|
return new_df
|
|
|
|
#%% end of tidy_split()
|
|
#=========
|
|
# DF1: dr_muts_col
|
|
#=========
|
|
########
|
|
# tidy_split(): on dr_muts_col column and remove leading white spaces
|
|
########
|
|
col_to_split1 = dr_muts_col
|
|
print ('Firstly, applying tidy split on dr muts df', meta_gene_dr.shape
|
|
, '\ncolumn name to apply tidy_split():', col_to_split1
|
|
, '\n============================================================')
|
|
# apply tidy_split()
|
|
dr_WF0 = tidy_split(meta_gene_dr, col_to_split1, sep = ';')
|
|
# remove leading white space else these are counted as distinct mutations as well
|
|
dr_WF0[dr_muts_col] = dr_WF0[dr_muts_col].str.lstrip()
|
|
|
|
# extract only the samples/rows with gene_match
|
|
dr_gene_WF0 = dr_WF0.loc[dr_WF0[dr_muts_col].str.contains(gene_match)]
|
|
|
|
print('lengths after tidy split and extracting', gene_match, 'muts:'
|
|
, '\nold length:' , len(meta_gene_dr)
|
|
, '\nlen after split:', len(dr_WF0)
|
|
, '\ndr_gene_WF0 length:', len(dr_gene_WF0)
|
|
, '\nexpected len:', dr_gene_count)
|
|
|
|
if len(dr_gene_WF0) == dr_gene_count:
|
|
print('PASS: length of dr_gene_WF0 match with expected length'
|
|
, '\n===============================================================')
|
|
else:
|
|
print('FAIL: lengths mismatch'
|
|
, '\n===============================================================')
|
|
|
|
# count the freq of 'dr_muts' samples
|
|
dr_muts_df = dr_gene_WF0 [['id', dr_muts_col]]
|
|
print('dim of dr_muts_df:', dr_muts_df.shape)
|
|
|
|
# add freq column
|
|
dr_muts_df['dr_sample_freq'] = dr_muts_df.groupby('id')['id'].transform('count')
|
|
#dr_muts_df['dr_sample_freq'] = dr_muts_df.loc[dr_muts_df.groupby('id')].transform('count')
|
|
print('revised dim of dr_muts_df:', dr_muts_df.shape)
|
|
|
|
c1 = dr_muts_df.dr_sample_freq.value_counts()
|
|
print('counting no. of sample frequency:\n', c1
|
|
, '\n===================================================================')
|
|
|
|
# sanity check: length of gene samples
|
|
if len(dr_gene_WF0) == c1.sum():
|
|
print('PASS: WF data has expected length'
|
|
, '\nlength of dr_gene WFO:', c1.sum()
|
|
, '\n===============================================================')
|
|
else:
|
|
print('FAIL: Debug please!'
|
|
, '\n===============================================================')
|
|
|
|
#!!! Important !!!
|
|
# Assign 'column name' on which split was performed as an extra column
|
|
# This is so you can identify if mutations are dr_type or other in the final df
|
|
dr_df = dr_gene_WF0.assign(mutation_info = dr_muts_col)
|
|
print('dim of dr_df:', dr_df.shape
|
|
, '\n=============================================================='
|
|
, '\nEnd of tidy split() on dr_muts, and added an extra column relecting mut_category'
|
|
, '\n===============================================================')
|
|
#%%
|
|
#=========
|
|
# DF2: other_mutations_pyrazinamdie
|
|
#=========
|
|
########
|
|
# tidy_split(): on other_muts_col column and remove leading white spaces
|
|
########
|
|
col_to_split2 = other_muts_col
|
|
print ('applying second tidy split() separately on other muts df', meta_gene_other.shape
|
|
, '\ncolumn name to apply tidy_split():', col_to_split2
|
|
, '\n============================================================')
|
|
|
|
# apply tidy_split()
|
|
other_WF1 = tidy_split(meta_gene_other, col_to_split2, sep = ';')
|
|
# remove the leading white spaces in the column
|
|
other_WF1[other_muts_col] = other_WF1[other_muts_col].str.strip()
|
|
|
|
# extract only the samples/rows with gene_match
|
|
other_gene_WF1 = other_WF1.loc[other_WF1[other_muts_col].str.contains(gene_match)]
|
|
|
|
print('lengths after tidy split and extracting', gene_match, 'muts:',
|
|
'\nold length:' , len(meta_gene_other),
|
|
'\nlen after split:', len(other_WF1),
|
|
'\nother_gene_WF1 length:', len(other_gene_WF1),
|
|
'\nexpected len:', other_gene_count)
|
|
|
|
if len(other_gene_WF1) == other_gene_count:
|
|
print('PASS: length matches with expected length'
|
|
, '\n===============================================================')
|
|
else:
|
|
print('FAIL: lengths mismatch'
|
|
, '\n===============================================================')
|
|
# count the freq of 'other muts' samples
|
|
other_muts_df = other_gene_WF1 [['id', other_muts_col]]
|
|
print('dim of other_muts_df:', other_muts_df.shape)
|
|
|
|
# add freq column
|
|
other_muts_df['other_sample_freq'] = other_muts_df.groupby('id')['id'].transform('count')
|
|
print('revised dim of other_muts_df:', other_muts_df.shape)
|
|
|
|
c2 = other_muts_df.other_sample_freq.value_counts()
|
|
print('counting no. of sample frequency:\n', c2)
|
|
print('=================================================================')
|
|
# sanity check: length of gene samples
|
|
if len(other_gene_WF1) == c2.sum():
|
|
print('PASS: WF data has expected length'
|
|
, '\nlength of other_gene WFO:', c2.sum()
|
|
, '\n===============================================================')
|
|
else:
|
|
print('FAIL: Debug please!'
|
|
, '\n===============================================================')
|
|
|
|
#!!! Important !!!
|
|
# Assign 'column name' on which split was performed as an extra column
|
|
# This is so you can identify if mutations are dr_type or other in the final df
|
|
other_df = other_gene_WF1.assign(mutation_info = other_muts_col)
|
|
print('dim of other_df:', other_df.shape
|
|
, '\n==============================================================='
|
|
, '\nEnd of tidy split() on other_muts, and added an extra column relecting mut_category'
|
|
, '\n===============================================================')
|
|
#%%
|
|
#==========
|
|
# Concatentating the two dfs: equivalent of rbind in R
|
|
#==========
|
|
#!!! important !!!
|
|
# change column names to allow concat:
|
|
# dr_muts.. & other_muts : 'mutation'
|
|
print('Now concatenating the two dfs by row'
|
|
, '\nfirst assigning a common colname: "mutation" to the col containing muts'
|
|
, '\nthis is done for both dfs'
|
|
, '\n===================================================================')
|
|
|
|
dr_df.columns
|
|
dr_df.rename(columns = {dr_muts_col: 'mutation'}, inplace = True)
|
|
dr_df.columns
|
|
|
|
other_df.columns
|
|
other_df.rename(columns = {other_muts_col: 'mutation'}, inplace = True)
|
|
other_df.columns
|
|
|
|
print('Now appending the two dfs:'
|
|
, '\ndr_df dim:', dr_df.shape
|
|
, '\nother_df dim:', other_df.shape
|
|
, '\ndr_df length:', len(dr_df)
|
|
, '\nother_df length:', len(other_df)
|
|
, '\nexpected length:', len(dr_df) + len(other_df)
|
|
, '\n=============================================================')
|
|
|
|
# checking colnames before concat
|
|
print('checking colnames BEFORE concatenating the two dfs...')
|
|
if (set(dr_df.columns) == set(other_df.columns)):
|
|
print('PASS: column names match necessary for merging two dfs')
|
|
else:
|
|
print('FAIL: Debug please!')
|
|
|
|
# concatenate (axis = 0): Rbind
|
|
gene_LF0 = pd.concat([dr_df, other_df], ignore_index = True, axis = 0)
|
|
|
|
# checking colnames and length after concat
|
|
print('checking colnames AFTER concatenating the two dfs...')
|
|
if (set(dr_df.columns) == set(gene_LF0.columns)):
|
|
print('PASS: column names match')
|
|
else:
|
|
print('FAIL: Debug please!')
|
|
|
|
print('checking length AFTER concatenating the two dfs...')
|
|
|
|
if len(gene_LF0) == len(dr_df) + len(other_df):
|
|
print('PASS:length of df after concat match'
|
|
, '\n===============================================================')
|
|
else:
|
|
print('FAIL: length mismatch'
|
|
, '\n===============================================================')
|
|
#%%
|
|
###########
|
|
# This is hopefully clean data, but just double check
|
|
# Filter LF data so that you only have
|
|
# mutations corresponding to gene_match* (string match pattern)
|
|
# this will be your list you run OR calcs
|
|
###########
|
|
print('length of gene_LF0:', len(gene_LF0),
|
|
'\nThis should be what you need. But just double check and extract', gene_match,
|
|
'\nfrom LF0 (concatenated data) using string match:', gene_match)
|
|
|
|
print('Double checking and creating df: gene_LF1')
|
|
gene_LF1 = gene_LF0[gene_LF0['mutation'].str.contains(gene_match)]
|
|
|
|
if len(gene_LF0) == len(gene_LF1):
|
|
print('PASS: length of gene_LF0 and gene_LF1 match',
|
|
'\nconfirming extraction and concatenating worked correctly')
|
|
else:
|
|
print('FAIL: BUT NOT FATAL!'
|
|
, '\ngene_LF0 and gene_LF1 lengths differ'
|
|
, '\nsuggesting error in extraction process'
|
|
, ' use gene_LF1 for downstreama analysis'
|
|
, '\n=========================================================')
|
|
print('length of dfs pre and post processing...'
|
|
, '\ngene WF data (unique samples in each row):', extracted_gene_samples
|
|
, '\ngene LF data (unique mutation in each row):', len(gene_LF1)
|
|
, '\n=============================================================')
|
|
|
|
#%% sanity check for extraction
|
|
print('Verifying whether extraction process worked correctly...')
|
|
if len(gene_LF1) == expected_rows:
|
|
print('PASS: extraction process performed correctly'
|
|
, '\nexpected length:', expected_rows
|
|
, '\ngot:', len(gene_LF1)
|
|
, '\nRESULT: Total no. of mutant sequences for logo plot:', len(gene_LF1)
|
|
, '\n=========================================================')
|
|
else:
|
|
print('FAIL: extraction process has bugs'
|
|
, '\nexpected length:', expected_rows
|
|
, '\ngot:', len(gene_LF1)
|
|
, ', \Debug please'
|
|
, '\n=========================================================')
|
|
#%%
|
|
print('Performing some more sanity checks...')
|
|
|
|
# From LF1:
|
|
# no. of unique muts
|
|
distinct_muts = gene_LF1.mutation.value_counts()
|
|
print('Distinct genomic mutations:', len(distinct_muts))
|
|
|
|
# no. of samples contributing the unique muta
|
|
gene_LF1.id.nunique()
|
|
print('No.of samples contributing to distinct genomic muts:', gene_LF1.id.nunique() )
|
|
|
|
# no. of dr and other muts
|
|
mut_grouped = gene_LF1.groupby('mutation_info').mutation.nunique()
|
|
print('No.of unique dr and other muts:\n', gene_LF1.groupby('mutation_info').mutation.nunique() )
|
|
|
|
# sanity check
|
|
if len(distinct_muts) == mut_grouped.sum() :
|
|
print('PASS:length of LF1 is as expected'
|
|
, '\n===============================================================')
|
|
else:
|
|
print('FAIL: Mistmatch in count of muts'
|
|
, '\nexpected count:', len(distinct_muts)
|
|
, '\nactual count:', mut_grouped.sum()
|
|
, '\nmuts should be distinct within dr* and other* type'
|
|
, '\ninspecting ...'
|
|
, '\n=========================================================')
|
|
muts_split = list(gene_LF1.groupby('mutation_info'))
|
|
dr_muts = muts_split[0][1].mutation
|
|
other_muts = muts_split[1][1].mutation
|
|
print('splitting muts by mut_info:', muts_split)
|
|
print('no.of dr_muts samples:', len(dr_muts))
|
|
print('no. of other_muts samples', len(other_muts))
|
|
#%%
|
|
# !!! IMPORTANT !!!!
|
|
# sanity check: There should not be any common muts
|
|
# i.e the same mutation cannot be classed as a drug AND 'others'
|
|
if dr_muts.isin(other_muts).sum() & other_muts.isin(dr_muts).sum() > 0:
|
|
print('WARNING: Ambiguous muts detected in dr_ and other_ mutation category'
|
|
, '\n===============================================================')
|
|
else:
|
|
print('PASS: NO ambiguous muts detected'
|
|
, '\nMuts are unique to dr_ and other_ mutation class'
|
|
, '\n=========================================================')
|
|
|
|
# inspect dr_muts and other muts
|
|
if dr_muts.isin(other_muts).sum() & other_muts.isin(dr_muts).sum() > 0:
|
|
print('Finding ambiguous muts...'
|
|
, '\n========================================================='
|
|
, '\nTotal no. of samples in dr_muts present in other_muts:', dr_muts.isin(other_muts).sum()
|
|
, '\nThese are:\n', dr_muts[dr_muts.isin(other_muts)]
|
|
, '\n========================================================='
|
|
, '\nTotal no. of samples in other_muts present in dr_muts:', other_muts.isin(dr_muts).sum()
|
|
, '\nThese are:\n', other_muts[other_muts.isin(dr_muts)]
|
|
, '\n=========================================================')
|
|
else:
|
|
print('Error: ambiguous muts present, but extraction failed. Debug!'
|
|
, '\n===============================================================')
|
|
|
|
print('Counting no. of ambiguous muts...')
|
|
|
|
if dr_muts[dr_muts.isin(other_muts)].nunique() == other_muts[other_muts.isin(dr_muts)].nunique():
|
|
common_muts = dr_muts[dr_muts.isin(other_muts)].value_counts().keys().tolist()
|
|
print('Distinct no. of ambigiuous muts detected:'+ str(len(common_muts))
|
|
, '\nlist of ambiguous mutations (see below):', *common_muts, sep = '\n')
|
|
print('\n===========================================================')
|
|
else:
|
|
print('Error: ambiguous muts detected, but extraction failed. Debug!'
|
|
, '\nNo. of ambiguous muts in dr:'
|
|
, len(dr_muts[dr_muts.isin(other_muts)].value_counts().keys().tolist())
|
|
, '\nNo. of ambiguous muts in other:'
|
|
, len(other_muts[other_muts.isin(dr_muts)].value_counts().keys().tolist())
|
|
, '\n=========================================================')
|
|
|
|
#%% clear variables
|
|
del(id_dr, id_other, meta_data, meta_gene_dr, meta_gene_other, mut_grouped, muts_split, other_WF1, other_df, other_muts_df, other_gene_count, gene_LF0, gene_na)
|
|
|
|
del(c1, c2, col_to_split1, col_to_split2, comp_gene_samples, dr_WF0, dr_df, dr_muts_df, dr_gene_WF0, dr_gene_count, expected_gene_samples, other_gene_WF1)
|
|
|
|
#%%: write file: ambiguous muts
|
|
# uncomment as necessary
|
|
#print(outdir)
|
|
#dr_muts.to_csv('dr_muts.csv', header = True)
|
|
#other_muts.to_csv('other_muts.csv', header = True)
|
|
|
|
out_filename1 = gene.lower() + '_ambiguous_muts.csv'
|
|
outfile1 = outdir + '/' + out_filename1
|
|
print('Writing file: ambiguous muts'
|
|
, '\nFilename:', out_filename1
|
|
, '\nPath:', outdir)
|
|
|
|
#common_muts = ['gene_matchVal180Phe','gene_matchGln10Pro'] # test
|
|
inspect = gene_LF1[gene_LF1['mutation'].isin(common_muts)]
|
|
inspect.to_csv(outfile1)
|
|
|
|
print('Finished writing:', out_filename1
|
|
, '\nNo. of rows:', len(inspect)
|
|
, '\nNo. of cols:', len(inspect.columns)
|
|
, '\nNo. of rows = no. of samples with the ambiguous muts present:'
|
|
, dr_muts.isin(other_muts).sum() + other_muts.isin(dr_muts).sum()
|
|
, '\n=============================================================')
|
|
|
|
del(out_filename1)
|
|
#%% end of data extraction and some files writing. Below are some more files writing.
|
|
#=============================================================================
|
|
#%% Formatting df: read aa dict and pull relevant info
|
|
print('Now some more formatting:'
|
|
, '\nread aa dict and pull relevant info'
|
|
, '\nformat mutations:'
|
|
, '\nsplit mutation into mCSM style muts: '
|
|
, '\nFormatting mutation in mCSM style format: {WT}<POS>{MUT}'
|
|
, '\nassign aa properties: adding 2 cols at a time for each prop'
|
|
, '\n===================================================================')
|
|
|
|
# BEWARE hardcoding : only works as we are adding aa prop once for wt and once for mut
|
|
# in each lookup cycle
|
|
ncol_mutf_add = 3 # mut split into 3 cols
|
|
ncol_aa_add = 2 # 2 aa prop add (wt & mut) in each mapping
|
|
|
|
#===========
|
|
# Split 'mutation' column into three: wild_type, position and
|
|
# mutant_type separately. Then map three letter code to one using
|
|
# reference_dict imported at the beginning.
|
|
# After importing, convert to mutation to lowercase for compatibility with dict
|
|
#===========
|
|
gene_LF1['mutation'] = gene_LF1.loc[:, 'mutation'].str.lower()
|
|
gene_regex = gene_match.lower()+'(\w{3})'
|
|
print('gene regex being used:', gene_regex)
|
|
|
|
mylen0 = len(gene_LF1.columns)
|
|
#=======
|
|
# Iterate through the dict, create a lookup dict i.e
|
|
# lookup_dict = {three_letter_code: one_letter_code}.
|
|
# lookup dict should be the key and the value (you want to create a column for)
|
|
# Then use this to perform the mapping separetly for wild type and mutant cols.
|
|
# The three letter code is extracted using a string match match from the dataframe and then converted
|
|
# to 'pandas series'since map only works in pandas series
|
|
#=======
|
|
print('Adding', ncol_mutf_add, 'more cols:\n')
|
|
|
|
# initialise a sub dict that is lookup dict for three letter code to 1-letter code
|
|
# adding three more cols
|
|
lookup_dict = dict()
|
|
for k, v in my_aa_dict.items():
|
|
lookup_dict[k] = v['one_letter_code']
|
|
# wt = gene_LF1['mutation'].str.extract('gene_p.(\w{3})').squeeze() # converts to a series that map works on
|
|
wt = gene_LF1['mutation'].str.extract(gene_regex).squeeze()
|
|
gene_LF1['wild_type'] = wt.map(lookup_dict)
|
|
mut = gene_LF1['mutation'].str.extract('\d+(\w{3})$').squeeze()
|
|
gene_LF1['mutant_type'] = mut.map(lookup_dict)
|
|
|
|
# extract position info from mutation column separetly using string match
|
|
gene_LF1['position'] = gene_LF1['mutation'].str.extract(r'(\d+)')
|
|
|
|
mylen1 = len(gene_LF1.columns)
|
|
|
|
# sanity checks
|
|
print('checking if 3-letter wt&mut residue extraction worked correctly')
|
|
if wt.isna().sum() & mut.isna().sum() == 0:
|
|
print('PASS: 3-letter wt&mut residue extraction worked correctly:'
|
|
, '\nNo NAs detected:'
|
|
, '\nwild-type\n', wt
|
|
, '\nmutant-type\n', mut
|
|
, '\ndim of df:', gene_LF1.shape)
|
|
else:
|
|
print('FAIL: 3-letter wt&mut residue extraction failed'
|
|
, '\nNo NAs detected:'
|
|
, '\nwild-type\n', wt
|
|
, '\nmutant-type\n', mut
|
|
, '\ndim of df:', gene_LF1.shape)
|
|
|
|
if mylen1 == mylen0 + ncol_mutf_add:
|
|
print('PASS: successfully added', ncol_mutf_add, 'cols'
|
|
, '\nold length:', mylen0
|
|
, '\nnew len:', mylen1)
|
|
else:
|
|
print('FAIL: failed to add cols:'
|
|
, '\nold length:', mylen0
|
|
, '\nnew len:', mylen1)
|
|
|
|
# clear variables
|
|
del(k, v, wt, mut, lookup_dict)
|
|
|
|
#=========
|
|
# iterate through the dict, create a lookup dict that i.e
|
|
# lookup_dict = {three_letter_code: aa_prop_water}
|
|
# Do this for both wild_type and mutant as above.
|
|
#=========
|
|
print('Adding', ncol_aa_add, 'more cols:\n')
|
|
|
|
# initialise a sub dict that is lookup dict for three letter code to aa prop
|
|
# adding two more cols
|
|
lookup_dict = dict()
|
|
for k, v in my_aa_dict.items():
|
|
lookup_dict[k] = v['aa_prop_water']
|
|
#print(lookup_dict)
|
|
# wt = gene_LF1['mutation'].str.extract('gene_p.(\w{3})').squeeze() # converts to a series that map works on
|
|
wt = gene_LF1['mutation'].str.extract(gene_regex).squeeze()
|
|
gene_LF1['wt_prop_water'] = wt.map(lookup_dict)
|
|
mut = gene_LF1['mutation'].str.extract('\d+(\w{3})$').squeeze()
|
|
gene_LF1['mut_prop_water'] = mut.map(lookup_dict)
|
|
|
|
mylen2 = len(gene_LF1.columns)
|
|
|
|
# sanity checks
|
|
print('checking if 3-letter wt&mut residue extraction worked correctly')
|
|
if wt.isna().sum() & mut.isna().sum() == 0:
|
|
print('PASS: 3-letter wt&mut residue extraction worked correctly:'
|
|
, '\nNo NAs detected:'
|
|
, '\nwild-type\n', wt
|
|
, '\nmutant-type\n', mut
|
|
, '\ndim of df:', gene_LF1.shape)
|
|
else:
|
|
print('FAIL: 3-letter wt&mut residue extraction failed'
|
|
, '\nNo NAs detected:'
|
|
, '\nwild-type\n', wt
|
|
, '\nmutant-type\n', mut
|
|
, '\ndim of df:', gene_LF1.shape)
|
|
|
|
if mylen2 == mylen1 + ncol_aa_add:
|
|
print('PASS: successfully added', ncol_aa_add, 'cols'
|
|
, '\nold length:', mylen1
|
|
, '\nnew len:', mylen2)
|
|
else:
|
|
print('FAIL: failed to add cols:'
|
|
, '\nold length:', mylen1
|
|
, '\nnew len:', mylen2)
|
|
|
|
# clear variables
|
|
del(k, v, wt, mut, lookup_dict)
|
|
|
|
#========
|
|
# iterate through the dict, create a lookup dict that i.e
|
|
# lookup_dict = {three_letter_code: aa_prop_polarity}
|
|
# Do this for both wild_type and mutant as above.
|
|
#=========
|
|
print('Adding', ncol_aa_add, 'more cols:\n')
|
|
|
|
# initialise a sub dict that is lookup dict for three letter code to aa prop
|
|
# adding two more cols
|
|
lookup_dict = dict()
|
|
for k, v in my_aa_dict.items():
|
|
lookup_dict[k] = v['aa_prop_polarity']
|
|
#print(lookup_dict)
|
|
# wt = gene_LF1['mutation'].str.extract('gene_p.(\w{3})').squeeze() # converts to a series that map works on
|
|
wt = gene_LF1['mutation'].str.extract(gene_regex).squeeze()
|
|
gene_LF1['wt_prop_polarity'] = wt.map(lookup_dict)
|
|
mut = gene_LF1['mutation'].str.extract(r'\d+(\w{3})$').squeeze()
|
|
gene_LF1['mut_prop_polarity'] = mut.map(lookup_dict)
|
|
|
|
mylen3 = len(gene_LF1.columns)
|
|
|
|
# sanity checks
|
|
print('checking if 3-letter wt&mut residue extraction worked correctly')
|
|
if wt.isna().sum() & mut.isna().sum() == 0:
|
|
print('PASS: 3-letter wt&mut residue extraction worked correctly:'
|
|
, '\nNo NAs detected:'
|
|
, '\nwild-type\n', wt
|
|
, '\nmutant-type\n', mut
|
|
, '\ndim of df:', gene_LF1.shape)
|
|
else:
|
|
print('FAIL: 3-letter wt&mut residue extraction failed'
|
|
, '\nNo NAs detected:'
|
|
, '\nwild-type\n', wt
|
|
, '\nmutant-type\n', mut
|
|
, '\ndim of df:', gene_LF1.shape)
|
|
|
|
if mylen3 == mylen2 + ncol_aa_add:
|
|
print('PASS: successfully added', ncol_aa_add, 'cols'
|
|
, '\nold length:', mylen1
|
|
, '\nnew len:', mylen2)
|
|
else:
|
|
print('FAIL: failed to add cols:'
|
|
, '\nold length:', mylen1
|
|
, '\nnew len:', mylen2)
|
|
|
|
# clear variables
|
|
del(k, v, wt, mut, lookup_dict)
|
|
|
|
#========
|
|
# iterate through the dict, create a lookup dict that i.e
|
|
# lookup_dict = {three_letter_code: aa_calcprop}
|
|
# Do this for both wild_type and mutant as above.
|
|
#=========
|
|
print('Adding', ncol_aa_add, 'more cols:\n')
|
|
|
|
lookup_dict = dict()
|
|
for k, v in my_aa_dict.items():
|
|
lookup_dict[k] = v['aa_calcprop']
|
|
#print(lookup_dict)
|
|
# wt = gene_LF1['mutation'].str.extract('gene_p.(\w{3})').squeeze() # converts to a series that map works on
|
|
wt = gene_LF1['mutation'].str.extract(gene_regex).squeeze()
|
|
gene_LF1['wt_calcprop'] = wt.map(lookup_dict)
|
|
mut = gene_LF1['mutation'].str.extract(r'\d+(\w{3})$').squeeze()
|
|
gene_LF1['mut_calcprop'] = mut.map(lookup_dict)
|
|
|
|
mylen4 = len(gene_LF1.columns)
|
|
|
|
# sanity checks
|
|
print('checking if 3-letter wt&mut residue extraction worked correctly')
|
|
if wt.isna().sum() & mut.isna().sum() == 0:
|
|
print('PASS: 3-letter wt&mut residue extraction worked correctly:'
|
|
, '\nNo NAs detected:'
|
|
, '\nwild-type\n', wt
|
|
, '\nmutant-type\n', mut
|
|
, '\ndim of df:', gene_LF1.shape)
|
|
else:
|
|
print('FAIL: 3-letter wt&mut residue extraction failed'
|
|
, '\nNo NAs detected:'
|
|
, '\nwild-type\n', wt
|
|
, '\nmutant-type\n', mut
|
|
, '\ndim of df:', gene_LF1.shape)
|
|
|
|
if mylen4 == mylen3 + ncol_aa_add:
|
|
print('PASS: successfully added', ncol_aa_add, 'cols'
|
|
, '\nold length:', mylen3
|
|
, '\nnew len:', mylen4)
|
|
else:
|
|
print('FAIL: failed to add cols:'
|
|
, '\nold length:', mylen3
|
|
, '\nnew len:', mylen4)
|
|
|
|
# clear variables
|
|
del(k, v, wt, mut, lookup_dict)
|
|
|
|
#========
|
|
# iterate through the dict, create a lookup dict that i.e
|
|
# lookup_dict = {three_letter_code: aa_taylor}
|
|
# Do this for both wild_type and mutant as above.
|
|
# caution: taylor mapping will create a list within a column
|
|
#=========
|
|
#print('Adding', ncol_aa_add, 'more cols:\n')
|
|
#lookup_dict = dict()
|
|
#for k, v in my_aa_dict.items():
|
|
# lookup_dict[k] = v['aa_taylor']
|
|
#print(lookup_dict)
|
|
# wt = gene_LF1['mutation'].str.extract(gene_regex).squeeze()
|
|
# gene_LF1['wt_taylor'] = wt.map(lookup_dict)
|
|
# mut = gene_LF1['mutation'].str.extract(r'\d+(\w{3})$').squeeze()
|
|
# gene_LF1['mut_taylor'] = mut.map(lookup_dict)
|
|
|
|
#mylen5 = len(gene_LF1.columns)
|
|
|
|
# sanity checks
|
|
#print('checking if 3-letter wt&mut residue extraction worked correctly')
|
|
#if wt.isna().sum() & mut.isna().sum() == 0:
|
|
# print('PASS: 3-letter wt&mut residue extraction worked correctly:'
|
|
# , '\nNo NAs detected:'
|
|
# , '\nwild-type\n', wt
|
|
# , '\nmutant-type\n', mut
|
|
# , '\ndim of df:', gene_LF1.shape)
|
|
#else:
|
|
# print('FAIL: 3-letter wt&mut residue extraction failed'
|
|
# , '\nNo NAs detected:'
|
|
# , '\nwild-type\n', wt
|
|
# , '\nmutant-type\n', mut
|
|
# , '\ndim of df:', gene_LF1.shape)
|
|
|
|
#if mylen5 == mylen4 + ncol_aa_add:
|
|
# print('PASS: successfully added', ncol_aa_add, 'cols'
|
|
# , '\nold length:', mylen4
|
|
# , '\nnew len:', mylen5)
|
|
#else:
|
|
# print('FAIL: failed to add cols:'
|
|
# , '\nold length:', mylen4
|
|
# , '\nnew len:', mylen5)
|
|
# clear variables
|
|
#del(k, v, wt, mut, lookup_dict)
|
|
|
|
########
|
|
# combine the wild_type+poistion+mutant_type columns to generate
|
|
# Mutationinformation (matches mCSM output field)
|
|
# Remember to use .map(str) for int col types to allow string concatenation
|
|
#########
|
|
gene_LF1['Mutationinformation'] = gene_LF1['wild_type'] + gene_LF1.position.map(str) + gene_LF1['mutant_type']
|
|
print('Created column: Mutationinformation'
|
|
, '\n====================================================================='
|
|
, gene_LF1.Mutationinformation.head(10))
|
|
|
|
#%% Write file: mCSM muts
|
|
snps_only = pd.DataFrame(gene_LF1['Mutationinformation'].unique())
|
|
snps_only.head()
|
|
# assign column name
|
|
snps_only.columns = ['Mutationinformation']
|
|
|
|
# count how many positions this corresponds to
|
|
pos_only = pd.DataFrame(gene_LF1['position'].unique())
|
|
|
|
print('Checking NA in snps...')# should be 0
|
|
if snps_only.Mutationinformation.isna().sum() == 0:
|
|
print ('PASS: NO NAs/missing entries for SNPs'
|
|
, '\n===============================================================')
|
|
else:
|
|
print('FAIL: SNP has NA, Possible mapping issues from dict?'
|
|
, '\nDebug please!'
|
|
, '\n=========================================================')
|
|
|
|
out_filename2 = gene.lower() + '_mcsm_snps.csv'
|
|
outfile2 = outdir + '/' + out_filename2
|
|
|
|
print('Writing file: mCSM style muts'
|
|
, '\nFilename:', out_filename2
|
|
, '\nPath:', outdir
|
|
, '\nmutation format (SNP): {WT}<POS>{MUT}'
|
|
, '\nNo. of distinct muts:', len(snps_only)
|
|
, '\nNo. of distinct positions:', len(pos_only)
|
|
, '\n=============================================================')
|
|
|
|
snps_only.to_csv(outfile2, header = False, index = False)
|
|
|
|
print('Finished writing:', out_filename2
|
|
, '\nNo. of rows:', len(snps_only)
|
|
, '\nNo. of cols:', len(snps_only.columns)
|
|
, '\n=============================================================')
|
|
del(out_filename2)
|
|
|
|
#%% Write file: gene_metadata (i.e gene_LF1)
|
|
# where each row has UNIQUE mutations NOT unique sample ids
|
|
out_filename3 = gene.lower() + '_metadata.csv'
|
|
outfile3 = outdir + '/' + out_filename3
|
|
print('Writing file: LF formatted data'
|
|
, '\nFilename:', out_filename3
|
|
, '\nPath:', outdir
|
|
, '\n============================================================')
|
|
|
|
gene_LF1.to_csv(outfile3, header = True, index = False)
|
|
print('Finished writing:', out_filename3
|
|
, '\nNo. of rows:', len(gene_LF1)
|
|
, '\nNo. of cols:', len(gene_LF1.columns)
|
|
, '\n=============================================================')
|
|
del(out_filename3)
|
|
|
|
#%% write file: mCSM style but with repitions for MSA and logo plots
|
|
all_muts_msa = pd.DataFrame(gene_LF1['Mutationinformation'])
|
|
all_muts_msa.head()
|
|
# assign column name
|
|
all_muts_msa.columns = ['Mutationinformation']
|
|
|
|
# make sure it is string
|
|
all_muts_msa.columns.dtype
|
|
|
|
# sort the column
|
|
all_muts_msa_sorted = all_muts_msa.sort_values(by = 'Mutationinformation')
|
|
|
|
# create an extra column with protein name
|
|
all_muts_msa_sorted = all_muts_msa_sorted.assign(fasta_name = '3PL1')
|
|
all_muts_msa_sorted.head()
|
|
|
|
# rearrange columns so the fasta name is the first column (required for mutate.script)
|
|
all_muts_msa_sorted = all_muts_msa_sorted[['fasta_name', 'Mutationinformation']]
|
|
all_muts_msa_sorted.head()
|
|
|
|
print('Checking NA in snps...')# should be 0
|
|
if all_muts_msa.Mutationinformation.isna().sum() == 0:
|
|
print ('PASS: NO NAs/missing entries for SNPs'
|
|
, '\n===============================================================')
|
|
else:
|
|
print('FAIL: SNP has NA, Possible mapping issues from dict?'
|
|
, '\nDebug please!'
|
|
, '\n=========================================================')
|
|
|
|
out_filename4 = gene.lower() +'_all_muts_msa.csv'
|
|
outfile4 = outdir + '/' + out_filename4
|
|
|
|
print('Writing file: mCSM style muts for msa',
|
|
'\nFilename:', out_filename4,
|
|
'\nPath:', outdir,
|
|
'\nmutation format (SNP): {WT}<POS>{MUT}',
|
|
'\nNo.of lines of msa:', len(all_muts_msa),
|
|
)
|
|
|
|
all_muts_msa_sorted.to_csv(outfile4, header = False, index = False)
|
|
|
|
print('Finished writing:', out_filename4
|
|
, '\nNo. of rows:', len(all_muts_msa)
|
|
, '\nNo. of cols:', len(all_muts_msa.columns)
|
|
, '\n=============================================================')
|
|
|
|
del(out_filename4)
|
|
|
|
#%% write file for mutational positions
|
|
# count how many positions this corresponds to
|
|
pos_only = pd.DataFrame(gene_LF1['position'].unique())
|
|
# assign column name
|
|
pos_only.columns = ['position']
|
|
# make sure dtype of column position is int or numeric and not string
|
|
pos_only.position.dtype
|
|
pos_only['position'] = pos_only['position'].astype(int)
|
|
pos_only.position.dtype
|
|
|
|
# sort by position value
|
|
pos_only_sorted = pos_only.sort_values(by = 'position', ascending = True)
|
|
|
|
out_filename5 = gene.lower() + '_mutational_positons.csv'
|
|
outfile5 = outdir + '/' + out_filename5
|
|
|
|
print('Writing file: mutational positions'
|
|
, '\nNo. of distinct positions:', len(pos_only_sorted)
|
|
, '\nFilename:', out_filename5
|
|
, '\nPath:', outdir
|
|
, '\n=============================================================')
|
|
|
|
pos_only_sorted.to_csv(outfile5, header = True, index = False)
|
|
|
|
print('Finished writing:', out_filename5
|
|
, '\nNo. of rows:', len(pos_only_sorted)
|
|
, '\nNo. of cols:', len(pos_only_sorted.columns)
|
|
, '\n=============================================================')
|
|
|
|
del(out_filename5)
|
|
#=======================================================================
|
|
print(u'\u2698' * 50,
|
|
'\nEnd of script: Data extraction and writing files'
|
|
'\n' + u'\u2698' * 50 )
|
|
#%% end of script
|