552 lines
No EOL
24 KiB
Python
552 lines
No EOL
24 KiB
Python
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
'''
|
|
@author: tanu
|
|
'''
|
|
#%% Description
|
|
# FIXME: include error checking to enure you only
|
|
# concentrate on positions that have structural info?
|
|
|
|
# FIXME: import dirs.py to get the basic dir paths available
|
|
#=======================================================================
|
|
# TASK: extract ALL <gene> matched mutations from GWAS data
|
|
# Input data file has the following format: each row = unique sample id
|
|
# id,country,lineage,sublineage,drtype,drug,dr_muts_col,other_muts_col...
|
|
# 0,sampleID,USA,lineage2,lineage2.2.1,Drug-resistant,0.0,WT,gene_match<wt>POS<mut>; pncA_c.<wt>POS<mut>...
|
|
# where multiple mutations and multiple mutation types are separated by ';'.
|
|
# We are interested in the protein coding region i.e mutation with the<gene>_'p.' format.
|
|
# This script splits the mutations on the ';' and extracts protein coding muts only
|
|
# where each row is a separate mutation
|
|
# sample ids AND mutations are NOT unique, but the COMBINATION (sample id + mutation) = unique
|
|
|
|
|
|
# output files: all lower case
|
|
# 1) <gene>_gwas.csv
|
|
# 2) <gene>_common_ids.csv
|
|
# 3) <gene>_ambiguous_muts.csv
|
|
# 4) <gene>_mcsm_formatted_snps.csv
|
|
# 5) <gene>_metadata_poscounts.csv
|
|
# 6) <gene>_metadata.csv
|
|
# 7) <gene>_all_muts_msa.csv
|
|
# 8) <gene>_mutational_positons.csv
|
|
|
|
#------------
|
|
# NOTE
|
|
#-----------
|
|
# drtype is renamed to 'resistance' in the 35k dataset
|
|
# all colnames in the ouput files lowercase
|
|
|
|
#-------------
|
|
# requires
|
|
#-------------
|
|
#reference_dict.py
|
|
#tidy_split.py
|
|
|
|
# bash counting for cross checks: example
|
|
#grep -oP 'pncA_p.[A-Za-z]{3}[0-9]+[A-Za-z]{3}' mtb_gwas_meta_v6.csv | sort | uniq -c | wc -l
|
|
#=======================================================================
|
|
#%% load libraries
|
|
import os, sys
|
|
import re
|
|
import pandas as pd
|
|
import numpy as np
|
|
import argparse
|
|
from statistics import mean, median, mode
|
|
from statistics import multimode
|
|
#=======================================================================
|
|
#%% dir and local imports
|
|
homedir = os.path.expanduser('~')
|
|
# set working dir
|
|
os.getcwd()
|
|
os.chdir(homedir + '/git/LSHTM_analysis/scripts')
|
|
os.getcwd()
|
|
#=======================================================================
|
|
#%% command line args
|
|
arg_parser = argparse.ArgumentParser()
|
|
arg_parser.add_argument('-d', '--drug', help='drug name (case sensitive)', default = None)
|
|
arg_parser.add_argument('-g', '--gene', help='gene name (case sensitive)', default = None)
|
|
arg_parser.add_argument('--datadir', help = 'Data Directory. By default, it assmumes homedir + git/Data')
|
|
arg_parser.add_argument('-i', '--input_dir', help = 'Input dir containing pdb files. By default, it assmumes homedir + <drug> + input')
|
|
arg_parser.add_argument('-o', '--output_dir', help = 'Output dir for results. By default, it assmes homedir + <drug> + output')
|
|
arg_parser.add_argument('-m', '--make_dirs', help = 'Make dir for input and output', action='store_true')
|
|
|
|
arg_parser.add_argument('--debug', action ='store_true', help = 'Debug Mode')
|
|
|
|
args = arg_parser.parse_args()
|
|
###############################################################################
|
|
#%% variable assignment: input and output paths & filenames
|
|
drug = args.drug
|
|
gene = args.gene
|
|
datadir = args.datadir
|
|
indir = args.input_dir
|
|
outdir = args.output_dir
|
|
make_dirs = args.make_dirs
|
|
###############################################################################
|
|
#%% variable assignment: input and output dirs and files
|
|
#=======
|
|
# dirs
|
|
#=======
|
|
if not datadir:
|
|
datadir = homedir + '/' + 'git/Data'
|
|
|
|
if not indir:
|
|
indir = datadir + '/' + drug + '/input_v2'
|
|
|
|
if not outdir:
|
|
outdir = datadir + '/' + drug + '/output_v2'
|
|
|
|
if make_dirs:
|
|
print('make_dirs is turned on, creating data dir:', datadir)
|
|
try:
|
|
os.makedirs(datadir, exist_ok = True)
|
|
print("Directory '%s' created successfully" %datadir)
|
|
except OSError as error:
|
|
print("Directory '%s' can not be created")
|
|
|
|
print('make_dirs is turned on, creating indir:', indir)
|
|
try:
|
|
os.makedirs(indir, exist_ok = True)
|
|
print("Directory '%s' created successfully" %indir)
|
|
except OSError as error:
|
|
print("Directory '%s' can not be created")
|
|
|
|
print('make_dirs is turned on, creating outdir:', outdir)
|
|
try:
|
|
os.makedirs(outdir, exist_ok = True)
|
|
print("Directory '%s' created successfully" %outdir)
|
|
except OSError as error:
|
|
print("Directory '%s' can not be created")
|
|
|
|
# handle missing dirs here
|
|
if not os.path.isdir(datadir):
|
|
print('ERROR: Data directory does not exist:', datadir
|
|
, '\nPlease create and ensure gwas data is present and then rerun\nelse specify cmd option --make_dirs')
|
|
sys.exit()
|
|
if not os.path.isdir(indir):
|
|
print('ERROR: Input directory does not exist:', indir
|
|
, '\nPlease either create or specify indir and rerun\nelse specify cmd option --make_dirs')
|
|
sys.exit()
|
|
if not os.path.isdir(outdir):
|
|
print('ERROR: Output directory does not exist:', outdir
|
|
, '\nPlease create or specify outdir and rerun\nelse specify cmd option --make_dirs')
|
|
sys.exit()
|
|
###############################################################################
|
|
#%% required local imports
|
|
from reference_dict import my_aa_dict # CHECK DIR STRUC THERE!
|
|
from tidy_split import tidy_split
|
|
###############################################################################
|
|
#%% creating required variables: gene and drug dependent, and input filename
|
|
gene_match = gene + '_p.'
|
|
print('mut pattern for gene', gene, ':', gene_match)
|
|
|
|
nssnp_match = gene_match +'[A-Za-z]{3}[0-9]+[A-Za-z]{3}'
|
|
print('nsSNP for gene', gene, ':', nssnp_match)
|
|
|
|
nssnp_match_captureG = "(" + nssnp_match + ")"
|
|
|
|
wt_regex = gene_match.lower()+'([A-Za-z]{3})'
|
|
print('wt regex:', wt_regex)
|
|
|
|
mut_regex = r'[0-9]+(\w{3})$'
|
|
print('mt regex:', mut_regex)
|
|
|
|
pos_regex = r'([0-9]+)'
|
|
print('position regex:', pos_regex)
|
|
|
|
# building cols to extract
|
|
dr_muts_col = 'dr_mutations_' + drug
|
|
other_muts_col = 'other_mutations_' + drug
|
|
resistance_col = 'drtype'
|
|
|
|
print('Extracting columns based on variables:\n'
|
|
, drug
|
|
, '\n'
|
|
, dr_muts_col
|
|
, '\n'
|
|
, other_muts_col
|
|
, '\n'
|
|
, resistance_col
|
|
, '\n===============================================================')
|
|
#=======
|
|
# input
|
|
#=======
|
|
#in_filename_master_master = 'original_tanushree_data_v2.csv' #19k
|
|
in_filename_master = 'mtb_gwas_meta_v6.csv' #35k
|
|
infile_master = datadir + '/' + in_filename_master
|
|
print('Input file: ', infile_master
|
|
, '\n============================================================')
|
|
|
|
#=======
|
|
# output
|
|
#=======
|
|
# several output files: in respective sections at the time of outputting files
|
|
print('Output filename: in the respective sections'
|
|
, '\nOutput path: ', outdir
|
|
, '\n=============================================================')
|
|
|
|
#end of variable assignment for input and output files
|
|
###############################################################################
|
|
#%% Read input file
|
|
master_data = pd.read_csv(infile_master, sep = ',')
|
|
|
|
# column names
|
|
#list(master_data.columns)
|
|
|
|
# extract elevant columns to extract from meta data related to the drug
|
|
if in_filename_master == 'original_tanushree_data_v2.csv':
|
|
meta_data = master_data[['id'
|
|
, 'country'
|
|
, 'lineage'
|
|
, 'sublineage'
|
|
, 'drtype'
|
|
, drug
|
|
, dr_muts_col
|
|
, other_muts_col]]
|
|
else:
|
|
core_cols = ['id'
|
|
, 'sample'
|
|
#, 'patient_id'
|
|
#, 'strain'
|
|
, 'lineage'
|
|
, 'sublineage'
|
|
, 'country_code'
|
|
#, 'geographic_source'
|
|
, resistance_col]
|
|
|
|
variable_based_cols = [drug
|
|
, dr_muts_col
|
|
, other_muts_col]
|
|
|
|
cols_to_extract = core_cols + variable_based_cols
|
|
print('Extracting', len(cols_to_extract), 'columns from master data')
|
|
|
|
meta_data = master_data[cols_to_extract]
|
|
del(master_data, variable_based_cols, cols_to_extract)
|
|
|
|
print('Extracted meta data from filename:', in_filename_master
|
|
, '\nDim:', meta_data.shape)
|
|
|
|
# checks and results
|
|
total_samples = meta_data['id'].nunique()
|
|
print('RESULT: Total samples:', total_samples
|
|
, '\n===========================================================')
|
|
|
|
# counts NA per column
|
|
meta_data.isna().sum()
|
|
print('No. of NAs/column:' + '\n', meta_data.isna().sum()
|
|
, '\n===========================================================\n')
|
|
###############################################################################
|
|
#%% Quick checks:
|
|
meta_data['lineage'].value_counts()
|
|
meta_data['lineage'].value_counts().sum()
|
|
meta_data['lineage'].nunique()
|
|
|
|
# replace lineage with 'L' in lineage_labels
|
|
#meta_data['lineage_labels'] = meta_data['lineage']
|
|
#meta_data['lineage_labels'].equals(meta_data['lineage'])
|
|
#all(meta_data['lineage_labels'].value_counts() == meta_data['lineage'].value_counts())
|
|
#meta_data['lineage_labels'] = meta_data['lineage_labels'].str.replace('lineage', 'L')
|
|
#meta_data['lineage'].value_counts()
|
|
#meta_data['lineage_labels'].value_counts()
|
|
|
|
meta_data['lineage'] = meta_data['lineage'].str.replace('lineage', 'L')
|
|
meta_data['lineage'].value_counts()
|
|
print("\n================================"
|
|
, "\nLineage numbers"
|
|
, "\nComplete lineage samples:", meta_data['lineage'].value_counts().sum()
|
|
, "\nMissing lineage samples:", meta_data['id'].nunique() - meta_data['lineage'].value_counts().sum()
|
|
, "\n================================")
|
|
|
|
|
|
meta_data['id'].nunique()
|
|
meta_data['sample'].nunique()
|
|
meta_data['id'].equals(meta_data['sample'])
|
|
foo = meta_data.copy()
|
|
foo['Diff'] = np.where( foo['id'] == foo['sample'] , '1', '0')
|
|
foo['Diff'].value_counts()
|
|
|
|
meta_data['drtype'].value_counts()
|
|
meta_data['drtype'].value_counts().sum()
|
|
print("\n================================"
|
|
, "\ndrtype numbers"
|
|
, "\nComplete drtype samples:", meta_data['drtype'].value_counts().sum()
|
|
, "\nMissing drtype samples:", meta_data['id'].nunique() - meta_data['drtype'].value_counts().sum()
|
|
, "\n================================")
|
|
|
|
meta_data['drug_name'] = meta_data[drug].map({1:'R'
|
|
, 0:'S'})
|
|
meta_data['drug_name'].value_counts()
|
|
meta_data[drug].value_counts()
|
|
meta_data[drug].value_counts().sum()
|
|
print("\n================================"
|
|
, "\ndrug", drug, "numbers"
|
|
, "\nComplete drug samples:", meta_data[drug].value_counts().sum()
|
|
, "\nMissing drug samples:", meta_data['id'].nunique() - meta_data[drug].value_counts().sum()
|
|
, "\n================================")
|
|
|
|
print("\n================================"
|
|
, "\ndrug", drug, "numbers"
|
|
, "\nComplete drug samples:", meta_data['drug_name'].value_counts().sum()
|
|
, "\nMissing drug samples:", meta_data['id'].nunique() - meta_data['drug_name'].value_counts().sum()
|
|
, "\n================================")
|
|
|
|
#%% Quick counts: All samples, drug:
|
|
print('===========================================================\n'
|
|
, 'RESULT: Total no. of samples tested for', drug, ':', meta_data[drug].value_counts().sum()
|
|
, '\n===========================================================\n'
|
|
, 'RESULT: No. of Sus and Res', drug, 'samples:\n', meta_data[drug].value_counts()
|
|
, '\n===========================================================\n'
|
|
, 'RESULT: Percentage of Sus and Res', drug, 'samples:\n', meta_data[drug].value_counts(normalize = True)*100
|
|
, '\n===========================================================')
|
|
|
|
print('===========================================================\n'
|
|
, 'RESULT: Total no. of samples tested for', drug, ':', meta_data['drug_name'].value_counts().sum()
|
|
, '\n===========================================================\n'
|
|
, 'RESULT: No. of Sus and Res', drug, 'samples:\n', meta_data['drug_name'].value_counts()
|
|
, '\n===========================================================\n'
|
|
, 'RESULT: Percentage of Sus and Res', drug, 'samples:\n', meta_data['drug_name'].value_counts(normalize = True)*100
|
|
, '\n===========================================================')
|
|
|
|
##############################################################################
|
|
#%% Extracting nsSNP for gene (meta_gene_all): from dr_muts col and other_muts_col
|
|
#meta_data_gene = meta_data.loc[ (meta_data[dr_muts_col].str.contains(nssnp_match, na = False, regex = True, case = False) ) | (meta_data[other_muts_col].str.contains(nssnp_match, na = False, regex = True, case = False) ) ]
|
|
# so downstream code doesn't change
|
|
meta_gene_all = meta_data.loc[ (meta_data[dr_muts_col].str.contains(nssnp_match, na = False, regex = True, case = False) ) | (meta_data[other_muts_col].str.contains(nssnp_match, na = False, regex = True, case = False) ) ]
|
|
#%% DF: with dr_muts_col
|
|
meta_gene_dr = meta_gene_all.loc[meta_gene_all[dr_muts_col].str.contains(nssnp_match, na = False, regex = True, case = False)]
|
|
meta_gene_dr1 = meta_data.loc[meta_data[dr_muts_col].str.contains(nssnp_match, na = False, regex = True, case = False)]
|
|
|
|
if meta_gene_dr.equals(meta_gene_dr1):
|
|
print('\nPASS: DF with column:', dr_muts_col, 'extracted successfully'
|
|
, '\ngene_snp_match in column:',dr_muts_col, meta_gene_dr.shape)
|
|
else:
|
|
sys.exit('\nFAIL: DF with column:', dr_muts_col,'could not be extracted'
|
|
, '\nshape of df1:', meta_gene_dr.shape
|
|
, '\nshape of df2:', meta_gene_dr1.shape
|
|
, '\nCheck again!')
|
|
##############################################################################
|
|
#%% DF: with other_muts_col
|
|
meta_gene_other = meta_gene_all.loc[meta_gene_all[other_muts_col].str.contains(nssnp_match, na = False, regex = True, case = False)]
|
|
meta_gene_other1 = meta_data.loc[meta_data[other_muts_col].str.contains(nssnp_match, na = False, regex = True, case = False)]
|
|
|
|
print('gene_snp_match in dr:', len(meta_gene_other))
|
|
meta_gene_other.equals(meta_gene_other1)
|
|
|
|
if meta_gene_other.equals(meta_gene_other1):
|
|
print('\nPASS: DF with column:', other_muts_col,'extracted successfully'
|
|
, '\ngene_snp_match in column:',other_muts_col, meta_gene_other.shape)
|
|
else:
|
|
sys.exit('\nFAIL: DF with column:', other_muts_col,'could not be extracted'
|
|
, '\nshape of df1:', meta_gene_other.shape
|
|
, '\nshape of df2:', meta_gene_other1.shape
|
|
, '\nCheck again!')
|
|
##############################################################################
|
|
#%% Quick counts: <gene>nsSNP samples, drug
|
|
meta_gene_all[drug].value_counts()
|
|
|
|
print('===========================================================\n'
|
|
, 'RESULT: Total no. of samples for', drug, 'with nsSNP mutations:', meta_gene_all['id'].nunique()
|
|
, '\n===========================================================\n'
|
|
|
|
, '===========================================================\n'
|
|
, 'RESULT: Total no. of samples tested for', drug, 'with nsSNP mutations:', meta_gene_all[drug].value_counts().sum()
|
|
, '\n===========================================================\n'
|
|
|
|
, '===========================================================\n'
|
|
, 'RESULT: Total no. of samples tested for', drug, 'with nsSNP mutations:', meta_gene_all['drug_name'].value_counts().sum()
|
|
|
|
, '\n===========================================================\n'
|
|
, 'RESULT: No. of Sus and Res', drug, 'samples with nsSNP:\n', meta_gene_all['drug_name'].value_counts()
|
|
|
|
, '\n===========================================================\n'
|
|
, 'RESULT: Percentage of Sus and Res', drug, 'samples with nsSNP mutations:\n', meta_gene_all['drug_name'].value_counts(normalize = True)*100
|
|
, '\n===========================================================')
|
|
###############################################################################
|
|
#%% Create a copy of indices for downstream mergeing
|
|
meta_gene_all['index_orig'] = meta_gene_all.index
|
|
meta_gene_all['index_orig_copy'] = meta_gene_all.index
|
|
|
|
all(meta_gene_all.index.values == meta_gene_all['index_orig'].values)
|
|
all(meta_gene_all.index.values == meta_gene_all['index_orig_copy'].values)
|
|
##############################################################################
|
|
#%% Important sanity checks: Dr muts column for tidy split(), nsSNPs, etc.
|
|
# Split based on semi colon dr_muts_col and other_muts_col
|
|
search = ";"
|
|
|
|
# count of occurrence of ";" in dr_muts_col: No.of semicolons + 1 is no. of rows created * occurence
|
|
count_df_dr = meta_gene_dr[['id', dr_muts_col]]
|
|
count_df_dr['dr_semicolon_count'] = meta_gene_dr.loc[:, dr_muts_col].str.count(search, re.I)
|
|
dr_sc_C = count_df_dr['dr_semicolon_count'].value_counts().reset_index()
|
|
dr_sc_C
|
|
|
|
dr_sc_C['index_semicolon'] = (dr_sc_C['index'] + 1) *dr_sc_C['dr_semicolon_count']
|
|
dr_sc_C
|
|
expected_drC = dr_sc_C['index_semicolon'].sum()
|
|
expected_drC
|
|
|
|
# count no. of nsSNPs and extract those nsSNPs
|
|
count_df_dr['dr_geneSNP_count'] = meta_gene_dr.loc[:, dr_muts_col].str.count(nssnp_match, re.I)
|
|
dr_gene_count1 = count_df_dr['dr_geneSNP_count'].sum()
|
|
# DEL: need to count by find_all
|
|
# dr_snps_df = meta_gene_dr.loc[:, dr_muts_col].str.extract(nssnp_match_captureG, re.I)
|
|
# dr_snps_df.columns = [dr_muts_col]
|
|
# dr_snps_countU = dr_snps_df[dr_muts_col].nunique()
|
|
# dr_snps_countU
|
|
# dr_snps_count_dict = dr_snps_df[dr_muts_col].value_counts().to_dict()
|
|
# dr_snps_count_dict
|
|
###############################################################################
|
|
# This is to find out how many samples have 1 and more than 1 mutation,so you
|
|
# can use it to check if your data extraction process for dr_muts
|
|
# and other_muts has worked correctly AND also to check the dim of the
|
|
# final formatted data.
|
|
# This will have: unique COMBINATION of sample id and <gene_match> mutations
|
|
#========
|
|
# First: counting <gene_match> mutations in dr_muts_col column
|
|
#========
|
|
print('Now counting WT &', nssnp_match, 'muts within the column:', dr_muts_col)
|
|
# drop na and extract a clean df
|
|
clean_df = meta_data.dropna(subset=[dr_muts_col])
|
|
|
|
# sanity check: count na
|
|
na_count = meta_data[dr_muts_col].isna().sum()
|
|
|
|
if len(clean_df) == (total_samples - na_count):
|
|
print('PASS: clean_df extracted: length is', len(clean_df)
|
|
, '\nNo.of NAs in', dr_muts_col, '=', na_count, '/', total_samples
|
|
, '\n==========================================================')
|
|
else:
|
|
sys.exit('FAIL: Could not drop NAs')
|
|
|
|
dr_gene_count = 0
|
|
wt = 0
|
|
id_dr = []
|
|
id2_dr = []
|
|
dr_gene_mutsL = []
|
|
#nssnp_match_regex = re.compile(nssnp_match)
|
|
|
|
for i, id in enumerate(clean_df.id):
|
|
#print (i, id)
|
|
#id_dr.append(id)
|
|
#count_gene_dr = clean_df[dr_muts_col].iloc[i].count(gene_match) # can include stop muts
|
|
count_gene_dr = len(re.findall(nssnp_match, clean_df[dr_muts_col].iloc[i], re.IGNORECASE))
|
|
gene_drL = re.findall(nssnp_match, clean_df[dr_muts_col].iloc[i], re.IGNORECASE)
|
|
#print(count_gene_dr)
|
|
if count_gene_dr > 0:
|
|
id_dr.append(id)
|
|
if count_gene_dr > 1:
|
|
id2_dr.append(id)
|
|
#print(id, count_gene_dr)
|
|
dr_gene_count = dr_gene_count + count_gene_dr
|
|
dr_gene_mutsL = dr_gene_mutsL + gene_drL
|
|
count_wt = clean_df[dr_muts_col].iloc[i].count('WT')
|
|
wt = wt + count_wt
|
|
|
|
print('RESULTS:')
|
|
print('Total WT in dr_muts_col:', wt)
|
|
print('Total matches of', gene, 'SNP matches in', dr_muts_col, ':', dr_gene_count)
|
|
print('Total samples with > 1', gene, 'nsSNPs in dr_muts_col:', len(id2_dr) )
|
|
print('Total matches of UNIQUE', gene, 'SNP matches in', dr_muts_col, ':', len(set(dr_gene_mutsL)))
|
|
print('=================================================================')
|
|
|
|
if dr_gene_count == dr_gene_count1:
|
|
print('\nPass: dr gene SNP count match')
|
|
else:
|
|
sys.exit('\nFAIL: dr gene SNP count MISmatch')
|
|
|
|
del(clean_df, na_count, i, id, wt, id2_dr, count_gene_dr, count_wt)
|
|
|
|
#%% tidy_split(): dr_muts_col
|
|
#=========
|
|
# DF1: dr_muts_col
|
|
# and remove leading white spaces
|
|
#=========
|
|
col_to_split1 = dr_muts_col
|
|
print ('Firstly, applying tidy split on dr muts df', meta_gene_dr.shape
|
|
, '\ncolumn name to apply tidy_split():' , col_to_split1
|
|
, '\n============================================================')
|
|
|
|
# apply tidy_split()
|
|
dr_WF0 = tidy_split(meta_gene_dr, col_to_split1, sep = ';')
|
|
# remove leading white space else these are counted as distinct mutations as well
|
|
dr_WF0[dr_muts_col] = dr_WF0[dr_muts_col].str.strip()
|
|
|
|
if len(dr_WF0) == expected_drC:
|
|
print('\nPass: tidy split on', dr_muts_col, 'worked'
|
|
, '\nExpected nrows:', expected_drC
|
|
, '\nGot nrows:', len(dr_WF0) )
|
|
else:
|
|
print('\nFAIL: tidy split on', dr_muts_col, 'did not work'
|
|
, '\nExpected nrows:', expected_drC
|
|
, '\nGot nrows:', len(dr_WF0) )
|
|
|
|
# Extract only the samples/rows with nssnp_match
|
|
#dr_gene_WF0 = dr_WF0.loc[dr_WF0[dr_muts_col].str.contains(gene_match)]
|
|
dr_gene_WF0 = dr_WF0.loc[dr_WF0[dr_muts_col].str.contains(nssnp_match, regex = True, case = False)]
|
|
dr_gene_WF0_v2 = dr_WF0.loc[dr_WF0[dr_muts_col].str.contains(nssnp_match_captureG, regex = True, case = False)]
|
|
|
|
print('Lengths after tidy split and extracting', nssnp_match, 'muts:'
|
|
, '\nOld length:' , len(meta_gene_dr)
|
|
, '\nLength after split:', len(dr_WF0)
|
|
, '\nLength of nssnp df:', len(dr_gene_WF0)
|
|
, '\nExpected len:', dr_gene_count
|
|
, '\n=============================================================')
|
|
|
|
if len(dr_gene_WF0) == dr_gene_count:
|
|
print('PASS: length matches expected length'
|
|
, '\n===============================================================')
|
|
else:
|
|
sys.exit('FAIL: lengths mismatch')
|
|
|
|
# count the freq of 'dr_muts' samples
|
|
dr_muts_df = dr_gene_WF0 [['id', dr_muts_col]]
|
|
print('Dim of dr_muts_df:', dr_muts_df.shape)
|
|
|
|
# add freq column
|
|
dr_muts_df['dr_sample_freq'] = dr_muts_df.groupby('id')['id'].transform('count')
|
|
#dr_muts_df['dr_sample_freq'] = dr_muts_df.loc[dr_muts_df.groupby('id')].transform('count')
|
|
print('Revised dim of dr_muts_df:', dr_muts_df.shape)
|
|
|
|
c1 = dr_muts_df.dr_sample_freq.value_counts()
|
|
print('Counting no. of sample frequency:\n', c1
|
|
, '\n===================================================================')
|
|
|
|
# sanity check: length of gene samples
|
|
if len(dr_gene_WF0) == c1.sum():
|
|
print('PASS: WF data has expected length'
|
|
, '\nLength of dr_gene WFO:', c1.sum()
|
|
, '\n===============================================================')
|
|
else:
|
|
sys.exit('FAIL: length mismatch!')
|
|
|
|
# Important: Assign 'column name' on which split was performed as an extra column
|
|
# This is so you can identify if mutations are dr_type or other in the final df
|
|
dr_df = dr_gene_WF0.assign(mutation_info = dr_muts_col)
|
|
print('Dim of dr_df:', dr_df.shape
|
|
, '\n=============================================================='
|
|
, '\nEnd of tidy split() on dr_muts, and added an extra column relecting mut_category'
|
|
, '\n===============================================================')
|
|
########################################################################
|
|
#%% Important sanity checks: other muts column for tidy split(), nsSNPs, etc.
|
|
|
|
# count of occurrence of ";" in other_muts_col: No.of semicolons + 1 is no. of rows created * occurence
|
|
count_df_other = meta_gene_other[['id', other_muts_col]]
|
|
count_df_other['other_semicolon_count'] = meta_gene_other.loc[:, other_muts_col].str.count(search, re.I)
|
|
other_sc_C = count_df_other['other_semicolon_count'].value_counts().reset_index()
|
|
other_sc_C
|
|
other_sc_C['index_semicolon'] = (other_sc_C['index']+1)*other_sc_C['other_semicolon_count']
|
|
other_sc_C
|
|
expected_otherC = other_sc_C['index_semicolon'].sum()
|
|
expected_otherC
|
|
|
|
# count no. of nsSNPs and extract those nsSNPs
|
|
count_df_other['other_geneSNP_count'] = meta_gene_other.loc[:, other_muts_col].str.count(nssnp_match, re.I)
|
|
|
|
# DEL: need to count by find_all
|
|
# other_snps_df = meta_gene_other.loc[:, other_muts_col].str.extract(nssnp_match_captureG, re.I)
|
|
# other_snps_df
|
|
# other_snps_df.columns = [other_muts_col]
|
|
# other_snps_countU = other_snps_df[other_muts_col].nunique()
|
|
# other_snps_countU
|
|
# other_snps_count_dict = other_snps_df[other_muts_col].value_counts().to_dict()
|
|
# other_snps_count_dict
|
|
############################################################################### |