minor changes to variable names in .R & .py
This commit is contained in:
parent
dd91692673
commit
c184841951
2 changed files with 96 additions and 75 deletions
|
@ -4,43 +4,41 @@
|
||||||
# and add the calculated params to meta_data extracted from
|
# and add the calculated params to meta_data extracted from
|
||||||
# pnca_data_extraction.py
|
# pnca_data_extraction.py
|
||||||
#===========================================
|
#===========================================
|
||||||
homedir = '~'
|
|
||||||
getwd()
|
getwd()
|
||||||
#setwd('~/git/LSHTM_analysis/meta_data_analysis')
|
setwd('~/git/LSHTM_analysis/meta_data_analysis')
|
||||||
setwd(paste0(homedir, '/', 'git/LSHTM_analysis/meta_data_analysis'))
|
|
||||||
getwd()
|
getwd()
|
||||||
|
|
||||||
#%% variable assignment: input and output paths & filenames
|
#%% variable assignment: input and output paths & filenames
|
||||||
drug = 'pyrazinamide'
|
drug = 'pyrazinamide'
|
||||||
gene = 'pncA'
|
gene = 'pncA'
|
||||||
gene_match = paste0(gene,'_p.')
|
gene_match = paste0(gene,'_p.')
|
||||||
print(gene_match)
|
cat(gene_match)
|
||||||
|
|
||||||
#=======
|
#=======
|
||||||
# input dir
|
# input dir
|
||||||
#=======
|
#=======
|
||||||
# file1: Raw data
|
# infile1: Raw data
|
||||||
#indir = 'git/Data/pyrazinamide/input/original'
|
#indir = 'git/Data/pyrazinamide/input/original'
|
||||||
indir = paste0('git/Data', '/', drug, '/', 'input/original')
|
indir = paste0('~/git/Data')
|
||||||
in_filename = 'original_tanushree_data_v2.csv'
|
in_filename = 'original_tanushree_data_v2.csv'
|
||||||
infile = paste0(homedir, '/', indir, '/', in_filename)
|
infile = paste0(indir, '/', in_filename)
|
||||||
print(paste0('Reading infile:', ' ', infile) )
|
cat(paste0('Reading infile1: raw data', ' ', infile) )
|
||||||
|
|
||||||
# file2: file to extract valid snps and add calcs to: pnca_metadata.csv {outfile3 from data extraction script}
|
# infile2: gene associated meta data file to extract valid snps and add calcs to
|
||||||
indir_metadata = paste0('git/Data', '/', drug, '/', 'output')
|
# filename: outfile3 from data_extraction.py
|
||||||
|
indir_metadata = paste0('~/git/Data', '/', drug, '/', 'output')
|
||||||
in_filename_metadata = 'pnca_metadata.csv'
|
in_filename_metadata = 'pnca_metadata.csv'
|
||||||
infile_metadata = paste0(homedir, '/', indir_metadata, '/', in_filename_metadata)
|
infile_metadata = paste0(indir_metadata, '/', in_filename_metadata)
|
||||||
print(paste0('Reading metadata infile:', infile_metadata))
|
cat(paste0('Reading infile2: gene associated metadata:', infile_metadata))
|
||||||
|
|
||||||
#=========
|
#=========
|
||||||
# output dir
|
# output dir
|
||||||
#=========
|
#=========
|
||||||
# output filename in respective section at the time of outputting files
|
|
||||||
# outdir = 'git/Data/pyrazinamide/output'
|
# outdir = 'git/Data/pyrazinamide/output'
|
||||||
outdir = paste0('git/Data', '/', drug, '/', 'output')
|
outdir = paste0('~/git/Data', '/', drug, '/', 'output')
|
||||||
out_filename = paste0(tolower(gene),'_', 'meta_data_with_AFandOR.csv')
|
out_filename = paste0(tolower(gene),'_', 'meta_data_with_AFandOR.csv')
|
||||||
outfile = paste0(homedir, '/', outdir, '/', out_filename)
|
outfile = paste0(outdir, '/', out_filename)
|
||||||
print(paste0('Output file with full path:', outfile))
|
cat(paste0('Output file with full path:', outfile))
|
||||||
|
|
||||||
#%% end of variable assignment for input and output files
|
#%% end of variable assignment for input and output files
|
||||||
|
|
||||||
|
@ -63,7 +61,7 @@ rm(indir, in_filename, infile)
|
||||||
raw_data = raw_data[!is.na(raw_data$pyrazinamide),]
|
raw_data = raw_data[!is.na(raw_data$pyrazinamide),]
|
||||||
|
|
||||||
total_samples = length(unique(raw_data$id))
|
total_samples = length(unique(raw_data$id))
|
||||||
print(paste0('Total samples without NA in', ' ', drug, 'is:', total_samples))
|
cat(paste0('Total samples without NA in', ' ', drug, 'is:', total_samples))
|
||||||
|
|
||||||
# sanity check: should be true
|
# sanity check: should be true
|
||||||
is.numeric(total_samples)
|
is.numeric(total_samples)
|
||||||
|
@ -82,7 +80,7 @@ raw_data$all_muts_pnca = tolower(raw_data$all_mutations_pyrazinamide)
|
||||||
|
|
||||||
# sanity checks
|
# sanity checks
|
||||||
#table(grepl("pnca_p",raw_data$all_muts_pnca))
|
#table(grepl("pnca_p",raw_data$all_muts_pnca))
|
||||||
print(paste0('converting gene match:', gene_match, ' ', 'to lowercase'))
|
cat(paste0('converting gene match:', gene_match, ' ', 'to lowercase'))
|
||||||
gene_match = tolower(gene_match)
|
gene_match = tolower(gene_match)
|
||||||
|
|
||||||
table(grepl(gene_match,raw_data$all_muts_pnca))
|
table(grepl(gene_match,raw_data$all_muts_pnca))
|
||||||
|
@ -109,7 +107,7 @@ table(mut, dst)
|
||||||
#===============
|
#===============
|
||||||
# Step 2: Read valid snps for which OR can be calculated (infile_comp_snps.csv)
|
# Step 2: Read valid snps for which OR can be calculated (infile_comp_snps.csv)
|
||||||
#===============
|
#===============
|
||||||
print(paste0('Reading metadata infile:', infile_metadata))
|
cat(paste0('Reading metadata infile:', infile_metadata))
|
||||||
|
|
||||||
pnca_metadata = read.csv(infile_metadata
|
pnca_metadata = read.csv(infile_metadata
|
||||||
# , file.choose()
|
# , file.choose()
|
||||||
|
@ -118,8 +116,8 @@ pnca_metadata = read.csv(infile_metadata
|
||||||
|
|
||||||
|
|
||||||
# clear variables
|
# clear variables
|
||||||
rm(homedir, in_filename, indir, infile)
|
rm(indir, in_filename, infile)
|
||||||
rm(indir_metadata, infile_metadata, in_filename_metadata)
|
rm(indir_metadata, in_filename_metadata, infile_metadata)
|
||||||
|
|
||||||
# count na in pyrazinamide column
|
# count na in pyrazinamide column
|
||||||
tot_pza_na = sum(is.na(pnca_metadata$pyrazinamide))
|
tot_pza_na = sum(is.na(pnca_metadata$pyrazinamide))
|
||||||
|
@ -130,15 +128,15 @@ pnca_snps_or = pnca_metadata[!is.na(pnca_metadata$pyrazinamide),]
|
||||||
|
|
||||||
# sanity check
|
# sanity check
|
||||||
if(nrow(pnca_snps_or) == expected_rows){
|
if(nrow(pnca_snps_or) == expected_rows){
|
||||||
print('PASS: no. of rows match with expected_rows')
|
cat('PASS: no. of rows match with expected_rows')
|
||||||
} else{
|
} else{
|
||||||
print('FAIL: nrows mismatch.')
|
cat('FAIL: nrows mismatch.')
|
||||||
}
|
}
|
||||||
|
|
||||||
# extract unique snps to iterate over for AF and OR calcs
|
# extract unique snps to iterate over for AF and OR calcs
|
||||||
pnca_snps_unique = unique(pnca_snps_or$mutation)
|
pnca_snps_unique = unique(pnca_snps_or$mutation)
|
||||||
|
|
||||||
print(paste0('Total no. of distinct comp snps to perform OR calcs: ', length(pnca_snps_unique)))
|
cat(paste0('Total no. of distinct comp snps to perform OR calcs: ', length(pnca_snps_unique)))
|
||||||
|
|
||||||
# Define OR function
|
# Define OR function
|
||||||
x = as.numeric(mut)
|
x = as.numeric(mut)
|
||||||
|
@ -192,38 +190,45 @@ hist(log(ors)
|
||||||
|
|
||||||
# FIXME: could be good to add a sanity check
|
# FIXME: could be good to add a sanity check
|
||||||
if (table(names(ors) == names(pvals)) & table(names(ors) == names(afs)) & table(names(pvals) == names(afs)) == length(pnca_snps_unique)){
|
if (table(names(ors) == names(pvals)) & table(names(ors) == names(afs)) & table(names(pvals) == names(afs)) == length(pnca_snps_unique)){
|
||||||
print('PASS: names of ors, pvals and afs match: proceed with combining into a single df')
|
cat('PASS: names of ors, pvals and afs match: proceed with combining into a single df')
|
||||||
} else{
|
} else{
|
||||||
print('FAIL: names of ors, pvals and afs mismatch')
|
cat('FAIL: names of ors, pvals and afs mismatch')
|
||||||
}
|
}
|
||||||
|
|
||||||
# combine
|
# combine ors, pvals and afs
|
||||||
|
cat('Combining calculated params into a df: ors, pvals and afs')
|
||||||
|
|
||||||
comb_AF_and_OR = data.frame(ors, pvals, afs)
|
comb_AF_and_OR = data.frame(ors, pvals, afs)
|
||||||
head(rownames(comb_AF_and_OR))
|
cat('No. of rows in comb_AF_and_OR: ', nrow(comb_AF_and_OR)
|
||||||
|
, '\nNo. of cols in comb_AF_and_OR: ', ncol(comb_AF_and_OR))
|
||||||
|
|
||||||
|
cat('Rownames == mutation: ', head(rownames(comb_AF_and_OR)))
|
||||||
|
|
||||||
# add rownames of comb_AF_and_OR as an extra column 'mutation' to allow merging based on this column
|
# add rownames of comb_AF_and_OR as an extra column 'mutation' to allow merging based on this column
|
||||||
comb_AF_and_OR$mutation = rownames(comb_AF_and_OR)
|
comb_AF_and_OR$mutation = rownames(comb_AF_and_OR)
|
||||||
|
|
||||||
# sanity check
|
# sanity check
|
||||||
if (table(rownames(comb_AF_and_OR) == comb_AF_and_OR$mutation)){
|
if (table(rownames(comb_AF_and_OR) == comb_AF_and_OR$mutation)){
|
||||||
print('PASS: rownames and mutaion col values match')
|
cat('PASS: rownames and mutaion col values match')
|
||||||
}else{
|
}else{
|
||||||
print('FAIL: rownames and mutation col values mismatch')
|
cat('FAIL: rownames and mutation col values mismatch')
|
||||||
}
|
}
|
||||||
|
|
||||||
############
|
############
|
||||||
# Merge 1:
|
# Merge 1: combine meta data file with calculated num params
|
||||||
###########
|
###########
|
||||||
df1 = pnca_metadata
|
df1 = pnca_metadata
|
||||||
df2 = comb_AF_and_OR
|
df2 = comb_AF_and_OR
|
||||||
|
|
||||||
head(df1$mutation); head(df2$mutation)
|
cat('checking commom col of the two dfs before merging:'
|
||||||
|
,'\ndf1:', head(df1$mutation)
|
||||||
|
, '\ndf2:', head(df2$mutation))
|
||||||
|
|
||||||
# FIXME: newlines
|
cat(paste0('merging two dfs: '
|
||||||
print(paste0('merging two dfs: '
|
|
||||||
,'\ndf1 (big df i.e. meta data) nrows: ', nrow(df1)
|
,'\ndf1 (big df i.e. meta data) nrows: ', nrow(df1)
|
||||||
,'\ndf2 (small df i.e af, or, pval) nrows: ', nrow(df2)
|
,'\ndf2 (small df i.e af, or, pval) nrows: ', nrow(df2)
|
||||||
, 'expected rows in merged df: ', nrow(df1), 'expected cols in merged_df: ', (ncol(df1) + ncol(df2) - 1)))
|
,'\nexpected rows in merged df: ', nrow(df1)
|
||||||
|
,'\nexpected cols in merged_df: ', (ncol(df1) + ncol(df2) - 1)))
|
||||||
|
|
||||||
merged_df = merge(df1 # big file
|
merged_df = merge(df1 # big file
|
||||||
, df2 # small (afor file)
|
, df2 # small (afor file)
|
||||||
|
@ -232,9 +237,9 @@ merged_df = merge(df1 # big file
|
||||||
|
|
||||||
# sanity check
|
# sanity check
|
||||||
if(ncol(merged_df) == (ncol(df1) + ncol(df2) - 1)){
|
if(ncol(merged_df) == (ncol(df1) + ncol(df2) - 1)){
|
||||||
print(paste0('PASS: no. of cols is as expected: ', ncol(merged_df)))
|
cat(paste0('PASS: no. of cols is as expected: ', ncol(merged_df)))
|
||||||
} else{
|
} else{
|
||||||
print('FAIL: no.of cols mistmatch')
|
cat('FAIL: no.of cols mistmatch')
|
||||||
}
|
}
|
||||||
|
|
||||||
# quick check
|
# quick check
|
||||||
|
@ -243,18 +248,24 @@ merged_df[merged_df$mutation == i,]
|
||||||
|
|
||||||
# count na in each column
|
# count na in each column
|
||||||
na_count = sapply(merged_df, function(y) sum(length(which(is.na(y))))); na_count
|
na_count = sapply(merged_df, function(y) sum(length(which(is.na(y))))); na_count
|
||||||
# only some or and Af should be NA
|
|
||||||
#Row.names ors pvals afs
|
# check last three cols: should be NA
|
||||||
#63 63 63 63
|
if ( identical(na_count[[length(na_count)]], na_count[[length(na_count)-1]], na_count[[length(na_count)-2]])){
|
||||||
|
cat('PASS: No. of NAs for OR, AF and Pvals are equal as expected',
|
||||||
|
'\nNo. of NA: ', na_count[[length(na_count)]])
|
||||||
|
} else {
|
||||||
|
cat('FAIl: No. of NAs for OR, AF and Pvals mismatch')
|
||||||
|
}
|
||||||
|
|
||||||
# reassign custom colnames
|
# reassign custom colnames
|
||||||
|
cat('Assigning custom colnames for the calculated params...')
|
||||||
colnames(merged_df)[colnames(merged_df)== "ors"] <- "OR"
|
colnames(merged_df)[colnames(merged_df)== "ors"] <- "OR"
|
||||||
colnames(merged_df)[colnames(merged_df)== "afs"] <- "AF"
|
|
||||||
colnames(merged_df)[colnames(merged_df)== "pvals"] <- "pvalue"
|
colnames(merged_df)[colnames(merged_df)== "pvals"] <- "pvalue"
|
||||||
|
colnames(merged_df)[colnames(merged_df)== "afs"] <- "AF"
|
||||||
|
|
||||||
colnames(merged_df)
|
colnames(merged_df)
|
||||||
|
|
||||||
# add log OR and neglog pvalue
|
# add 3 more cols: log OR, neglog pvalue and AF_percent cols
|
||||||
merged_df$logor = log(merged_df$OR)
|
merged_df$logor = log(merged_df$OR)
|
||||||
is.numeric(merged_df$logor)
|
is.numeric(merged_df$logor)
|
||||||
|
|
||||||
|
@ -273,17 +284,22 @@ merged_df[merged_df$mutation == i,]
|
||||||
# FIXME: harcoding (beware!), NOT FATAL though!
|
# FIXME: harcoding (beware!), NOT FATAL though!
|
||||||
ncol_added = 3
|
ncol_added = 3
|
||||||
|
|
||||||
print(paste0('Added', ncol_added, ' ', 'more cols to merged_df i.e log10 OR and -log10 P-val: '
|
cat(paste0('Added', ' ', ncol_added, ' more cols to merged_df:'
|
||||||
, 'no. of cols in merged_df now: ', ncol(merged_df)))
|
, '\ncols added: logor, neglog10pvalue and AF_percent:'
|
||||||
|
, '\nno. of cols in merged_df now: ', ncol(merged_df)))
|
||||||
|
|
||||||
#%% write file out: pnca_meta_data_with_AFandOR
|
#%% write file out: pnca_meta_data_with_AFandOR
|
||||||
print(paste0('writing output file in: '
|
cat(paste0('writing output file: '
|
||||||
, 'Filename: ', out_filename
|
, '\nFilename: ', out_filename
|
||||||
, 'Path:', outdir))
|
, '\nPath:', outdir))
|
||||||
|
|
||||||
write.csv(merged_df, outfile
|
write.csv(merged_df, outfile
|
||||||
, row.names = F)
|
, row.names = F)
|
||||||
|
|
||||||
print(paste0('Finished writing:', out_filename, '\nExpected no. of cols:', ncol(merged_df)))
|
cat(paste0('Finished writing:'
|
||||||
print('======================================================================')
|
, out_filename
|
||||||
|
, '\nNo. of rows: ', nrow(merged_df)
|
||||||
|
, '\nNo. of cols: ', ncol(merged_df)))
|
||||||
|
|
||||||
|
cat('======================================================================')
|
||||||
rm(out_filename)
|
rm(out_filename)
|
||||||
|
|
|
@ -14,7 +14,7 @@ Created on Tue Aug 6 12:56:03 2019
|
||||||
# load libraries
|
# load libraries
|
||||||
import os, sys
|
import os, sys
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import numpy as np
|
#import numpy as np
|
||||||
|
|
||||||
#from pandas.api.types import is_string_dtype
|
#from pandas.api.types import is_string_dtype
|
||||||
#from pandas.api.types import is_numeric_dtype
|
#from pandas.api.types import is_numeric_dtype
|
||||||
|
@ -61,21 +61,21 @@ gene_match = gene + '_p.'
|
||||||
# input dir
|
# input dir
|
||||||
#=======
|
#=======
|
||||||
#indir = 'git/Data/pyrazinamide/input/original'
|
#indir = 'git/Data/pyrazinamide/input/original'
|
||||||
indir = 'git/Data' + '/' + drug + '/' + 'input/original'
|
indir = homedir + '/' + 'git/Data'
|
||||||
#=========
|
#=========
|
||||||
# output dir
|
# output dir
|
||||||
#=========
|
#=========
|
||||||
# several output files
|
# several output files
|
||||||
# output filenames in respective sections at the time of outputting files
|
# output filenames in respective sections at the time of outputting files
|
||||||
#outdir = 'git/Data/pyrazinamide/output'
|
#outdir = 'git/Data/pyrazinamide/output'
|
||||||
outdir = 'git/Data' + '/' + drug + '/' + 'output'
|
outdir = homedir + '/' + 'git/Data' + '/' + drug + '/' + 'output'
|
||||||
|
|
||||||
#%%end of variable assignment for input and output files
|
#%%end of variable assignment for input and output files
|
||||||
#==============================================================================
|
#==============================================================================
|
||||||
#%% Read files
|
#%% Read files
|
||||||
|
|
||||||
in_filename = 'original_tanushree_data_v2.csv'
|
in_filename = 'original_tanushree_data_v2.csv'
|
||||||
infile = homedir + '/' + indir + '/' + in_filename
|
infile = indir + '/' + in_filename
|
||||||
print('Reading input master file:', infile)
|
print('Reading input master file:', infile)
|
||||||
|
|
||||||
master_data = pd.read_csv(infile, sep = ',')
|
master_data = pd.read_csv(infile, sep = ',')
|
||||||
|
@ -325,12 +325,12 @@ print('======================================================================')
|
||||||
#%% write file
|
#%% write file
|
||||||
#print(outdir)
|
#print(outdir)
|
||||||
out_filename0 = gene.lower() + '_' + 'common_ids.csv'
|
out_filename0 = gene.lower() + '_' + 'common_ids.csv'
|
||||||
outfile0 = homedir + '/' + outdir + '/' + out_filename0
|
outfile0 = outdir + '/' + out_filename0
|
||||||
|
|
||||||
#FIXME: CHECK line len(common_ids)
|
#FIXME: CHECK line len(common_ids)
|
||||||
print('Writing file: common ids:',
|
print('Writing file: common ids:',
|
||||||
'\nFilename:', out_filename0,
|
'\nFilename:', out_filename0,
|
||||||
'\nPath:', homedir +'/'+ outdir,
|
'\nPath:', outdir,
|
||||||
'\nExpected no. of rows:', len(common_ids) )
|
'\nExpected no. of rows:', len(common_ids) )
|
||||||
|
|
||||||
common_ids.to_csv(outfile0)
|
common_ids.to_csv(outfile0)
|
||||||
|
@ -690,7 +690,7 @@ print('Counting no. of ambiguous muts...')
|
||||||
|
|
||||||
if dr_muts[dr_muts.isin(other_muts)].nunique() == other_muts[other_muts.isin(dr_muts)].nunique():
|
if dr_muts[dr_muts.isin(other_muts)].nunique() == other_muts[other_muts.isin(dr_muts)].nunique():
|
||||||
common_muts = dr_muts[dr_muts.isin(other_muts)].value_counts().keys().tolist()
|
common_muts = dr_muts[dr_muts.isin(other_muts)].value_counts().keys().tolist()
|
||||||
print('No. of ambigiuous muts detected:'+ str(len(common_muts)),
|
print('Distinct no. of ambigiuous muts detected:'+ str(len(common_muts)),
|
||||||
'list of ambiguous mutations (see below):', *common_muts, sep = '\n')
|
'list of ambiguous mutations (see below):', *common_muts, sep = '\n')
|
||||||
else:
|
else:
|
||||||
print('Error: ambiguous muts detected, but extraction failed. Debug!',
|
print('Error: ambiguous muts detected, but extraction failed. Debug!',
|
||||||
|
@ -711,21 +711,27 @@ del(c1, c2, col_to_split1, col_to_split2, comp_pnca_samples, dr_WF0, dr_df, dr_m
|
||||||
#other_muts.to_csv('other_muts.csv', header = True)
|
#other_muts.to_csv('other_muts.csv', header = True)
|
||||||
|
|
||||||
out_filename1 = gene.lower() + '_' + 'ambiguous_muts.csv'
|
out_filename1 = gene.lower() + '_' + 'ambiguous_muts.csv'
|
||||||
outfile1 = homedir + '/' + outdir + '/' + out_filename1
|
outfile1 = outdir + '/' + out_filename1
|
||||||
print('Writing file: ambiguous muts',
|
print('Writing file: ambiguous muts',
|
||||||
'\nFilename:', out_filename1,
|
'\nFilename:', out_filename1,
|
||||||
'\nPath:', homedir +'/'+ outdir)
|
'\nPath:', outdir)
|
||||||
|
|
||||||
#common_muts = ['pncA_p.Val180Phe','pncA_p.Gln10Pro'] # test
|
#common_muts = ['pncA_p.Val180Phe','pncA_p.Gln10Pro'] # test
|
||||||
inspect = pnca_LF1[pnca_LF1['mutation'].isin(common_muts)]
|
inspect = pnca_LF1[pnca_LF1['mutation'].isin(common_muts)]
|
||||||
inspect.to_csv(outfile1)
|
inspect.to_csv(outfile1)
|
||||||
|
|
||||||
print('Finished writing:', out_filename1, '\nExpected no. of rows (no. of samples with the ambiguous muts present):', dr_muts.isin(other_muts).sum() + other_muts.isin(dr_muts).sum())
|
print('Finished writing:', out_filename1,
|
||||||
|
'\nNo. of rows:', len(inspect),
|
||||||
|
'\nNo. of cols:', len(inspect.columns),
|
||||||
|
'\nNo. of rows = no. of samples with the ambiguous muts present:', dr_muts.isin(other_muts).sum() + other_muts.isin(dr_muts).sum())
|
||||||
print('======================================================================')
|
print('======================================================================')
|
||||||
del(out_filename1)
|
del(out_filename1)
|
||||||
|
|
||||||
|
|
||||||
#%%
|
#%% read aa dict and pull relevant info
|
||||||
|
print('Reading aa dict and fetching1 letter aa code',
|
||||||
|
'\nFormatting mutation in mCSM style format: {WT}<POS>{MUT}',
|
||||||
|
'\nAdding aa properties')
|
||||||
#===========
|
#===========
|
||||||
# Split 'mutation' column into three: wild_type, position and
|
# Split 'mutation' column into three: wild_type, position and
|
||||||
# mutant_type separately. Then map three letter code to one using
|
# mutant_type separately. Then map three letter code to one using
|
||||||
|
@ -733,7 +739,6 @@ del(out_filename1)
|
||||||
# First: Import reference dict
|
# First: Import reference dict
|
||||||
# Second: convert to mutation to lowercase for compatibility with dict
|
# Second: convert to mutation to lowercase for compatibility with dict
|
||||||
#===========
|
#===========
|
||||||
from reference_dict import my_aa_dict # CHECK DIR STRUC THERE!
|
|
||||||
pnca_LF1['mutation'] = pnca_LF1.loc[:, 'mutation'].str.lower()
|
pnca_LF1['mutation'] = pnca_LF1.loc[:, 'mutation'].str.lower()
|
||||||
#=======
|
#=======
|
||||||
# Iterate through the dict, create a lookup dict i.e
|
# Iterate through the dict, create a lookup dict i.e
|
||||||
|
@ -756,7 +761,7 @@ pnca_LF1['position'] = pnca_LF1['mutation'].str.extract(r'(\d+)')
|
||||||
|
|
||||||
# clear variables
|
# clear variables
|
||||||
del(k, v, wt, mut, lookup_dict)
|
del(k, v, wt, mut, lookup_dict)
|
||||||
print('======================================================================')
|
#print('======================================================================')
|
||||||
#=========
|
#=========
|
||||||
# iterate through the dict, create a lookup dict that i.e
|
# iterate through the dict, create a lookup dict that i.e
|
||||||
# lookup_dict = {three_letter_code: aa_prop_water}
|
# lookup_dict = {three_letter_code: aa_prop_water}
|
||||||
|
@ -777,7 +782,7 @@ for k, v in my_aa_dict.items():
|
||||||
|
|
||||||
# clear variables
|
# clear variables
|
||||||
del(k, v, wt, mut, lookup_dict)
|
del(k, v, wt, mut, lookup_dict)
|
||||||
print('======================================================================')
|
#print('======================================================================')
|
||||||
#========
|
#========
|
||||||
# iterate through the dict, create a lookup dict that i.e
|
# iterate through the dict, create a lookup dict that i.e
|
||||||
# lookup_dict = {three_letter_code: aa_prop_polarity}
|
# lookup_dict = {three_letter_code: aa_prop_polarity}
|
||||||
|
@ -798,7 +803,7 @@ for k, v in my_aa_dict.items():
|
||||||
|
|
||||||
# clear variables
|
# clear variables
|
||||||
del(k, v, wt, mut, lookup_dict)
|
del(k, v, wt, mut, lookup_dict)
|
||||||
print('======================================================================')
|
#print('======================================================================')
|
||||||
|
|
||||||
#========
|
#========
|
||||||
# iterate through the dict, create a lookup dict that i.e
|
# iterate through the dict, create a lookup dict that i.e
|
||||||
|
@ -866,12 +871,12 @@ else:
|
||||||
print('======================================================================')
|
print('======================================================================')
|
||||||
|
|
||||||
out_filename2 = gene.lower() + '_' + 'mcsm_snps.csv'
|
out_filename2 = gene.lower() + '_' + 'mcsm_snps.csv'
|
||||||
outfile2 = homedir + '/' + outdir + '/' + out_filename2
|
outfile2 = outdir + '/' + out_filename2
|
||||||
|
|
||||||
print('Writing file: mCSM style muts',
|
print('Writing file: mCSM style muts',
|
||||||
'\nFilename:', out_filename2,
|
'\nFilename:', out_filename2,
|
||||||
'\nPath:', homedir +'/'+ outdir,
|
'\nPath:', outdir,
|
||||||
'\nmutation format (SNP): {Wt}<POS>{Mut}',
|
'\nmutation format (SNP): {WT}<POS>{MUT}',
|
||||||
'\nNo. of distinct muts:', len(snps_only),
|
'\nNo. of distinct muts:', len(snps_only),
|
||||||
'\nNo. of distinct positions:', len(pos_only))
|
'\nNo. of distinct positions:', len(pos_only))
|
||||||
|
|
||||||
|
@ -887,10 +892,10 @@ del(out_filename2)
|
||||||
#%% Write file: pnca_metadata (i.e pnca_LF1)
|
#%% Write file: pnca_metadata (i.e pnca_LF1)
|
||||||
# where each row has UNIQUE mutations NOT unique sample ids
|
# where each row has UNIQUE mutations NOT unique sample ids
|
||||||
out_filename3 = gene.lower() + '_' + 'metadata.csv'
|
out_filename3 = gene.lower() + '_' + 'metadata.csv'
|
||||||
outfile3 = homedir + '/' + outdir + '/' + out_filename3
|
outfile3 = outdir + '/' + out_filename3
|
||||||
print('Writing file: LF formatted data',
|
print('Writing file: LF formatted data',
|
||||||
'\nFilename:', out_filename3,
|
'\nFilename:', out_filename3,
|
||||||
'\nPath:', homedir +'/'+ outdir)
|
'\nPath:', outdir)
|
||||||
|
|
||||||
pnca_LF1.to_csv(outfile3, header = True, index = False)
|
pnca_LF1.to_csv(outfile3, header = True, index = False)
|
||||||
print('Finished writing:', out_filename3,
|
print('Finished writing:', out_filename3,
|
||||||
|
@ -929,12 +934,12 @@ else:
|
||||||
print('======================================================================')
|
print('======================================================================')
|
||||||
|
|
||||||
out_filename4 = gene.lower() + '_' + 'all_muts_msa.csv'
|
out_filename4 = gene.lower() + '_' + 'all_muts_msa.csv'
|
||||||
outfile4 = homedir + '/' + outdir + '/' + out_filename4
|
outfile4 = outdir + '/' + out_filename4
|
||||||
|
|
||||||
print('Writing file: mCSM style muts for msa',
|
print('Writing file: mCSM style muts for msa',
|
||||||
'\nFilename:', out_filename4,
|
'\nFilename:', out_filename4,
|
||||||
'\nPath:', homedir +'/'+ outdir,
|
'\nPath:', outdir,
|
||||||
'\nmutation format (SNP): {Wt}<POS>{Mut}',
|
'\nmutation format (SNP): {WT}<POS>{MUT}',
|
||||||
'\nNo.of lines of msa:', len(all_muts_msa),
|
'\nNo.of lines of msa:', len(all_muts_msa),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -961,12 +966,12 @@ pos_only.position.dtype
|
||||||
pos_only_sorted = pos_only.sort_values(by = 'position', ascending = True)
|
pos_only_sorted = pos_only.sort_values(by = 'position', ascending = True)
|
||||||
|
|
||||||
out_filename5 = gene.lower() + '_' + 'mutational_positons.csv'
|
out_filename5 = gene.lower() + '_' + 'mutational_positons.csv'
|
||||||
outfile5 = homedir + '/' + outdir + '/' + out_filename5
|
outfile5 = outdir + '/' + out_filename5
|
||||||
|
|
||||||
print('Writing file: mutational positions',
|
print('Writing file: mutational positions',
|
||||||
'\nNo. of distinct positions:', len(pos_only_sorted),
|
'\nNo. of distinct positions:', len(pos_only_sorted),
|
||||||
'\nFilename:', out_filename5,
|
'\nFilename:', out_filename5,
|
||||||
'\nPath:', homedir +'/'+ outdir)
|
'\nPath:', outdir)
|
||||||
|
|
||||||
pos_only_sorted.to_csv(outfile5, header = True, index = False)
|
pos_only_sorted.to_csv(outfile5, header = True, index = False)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue