added consistent style scripts to format kd & rd values
This commit is contained in:
parent
e4a7deae7b
commit
d3d82623d2
2 changed files with 458 additions and 0 deletions
203
scripts/rd_df.py
Executable file
203
scripts/rd_df.py
Executable file
|
@ -0,0 +1,203 @@
|
|||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Created on Tue Aug 6 12:56:03 2019
|
||||
|
||||
@author: tanu
|
||||
'''
|
||||
#=============================================================================
|
||||
# Task: Residue depth (rd) processing to generate a df with residue_depth(rd)
|
||||
# values
|
||||
|
||||
# FIXME: source file is MANUALLY downloaded from the website
|
||||
# Input: '.tsv' i.e residue depth txt file (output from .zip file manually
|
||||
# downloaded from the website).
|
||||
# This should be integrated into the pipeline
|
||||
|
||||
# Output: .csv with 3 cols i.e position, rd_values & 3-letter wt aa code(caps)
|
||||
#=============================================================================
|
||||
#%% load packages
|
||||
import sys, os
|
||||
import argparse
|
||||
import pandas as pd
|
||||
#=============================================================================
|
||||
#%% specify input and curr dir
|
||||
homedir = os.path.expanduser('~')
|
||||
|
||||
# set working dir
|
||||
os.getcwd()
|
||||
os.chdir(homedir + '/git/LSHTM_analysis/meta_data_analysis')
|
||||
os.getcwd()
|
||||
#=======================================================================
|
||||
#%% command line args
|
||||
arg_parser = argparse.ArgumentParser()
|
||||
arg_parser.add_argument('-d', '--drug', help='drug name', default = None)
|
||||
arg_parser.add_argument('-g', '--gene', help='gene name (case sensitive)', default = None)
|
||||
|
||||
arg_parser.add_argument('--datadir', help = 'Data Directory. By default, it assmumes homedir + git/Data')
|
||||
arg_parser.add_argument('-i', '--input_dir', help = 'Input dir containing pdb files. By default, it assmumes homedir + <drug> + input')
|
||||
arg_parser.add_argument('-o', '--output_dir', help = 'Output dir for results. By default, it assmes homedir + <drug> + output')
|
||||
|
||||
arg_parser.add_argument('-rd','--rd_file', help = 'residue depth file. By default, it assmumes a file called <gene>_rd.tsv in output_dir')
|
||||
|
||||
arg_parser.add_argument('--debug', action='store_true', help = 'Debug Mode')
|
||||
|
||||
args = arg_parser.parse_args()
|
||||
#=======================================================================
|
||||
#%% variable assignment: input and output
|
||||
#drug = 'pyrazinamide'
|
||||
#gene = 'pncA'
|
||||
drug = args.drug
|
||||
gene = args.gene
|
||||
gene_match = gene + '_p.'
|
||||
|
||||
data_dir = args.datadir
|
||||
indir = args.input_dir
|
||||
outdir = args.output_dir
|
||||
|
||||
rd_filename = args.rd_file
|
||||
|
||||
DEBUG = args.debug
|
||||
|
||||
#============
|
||||
# directories
|
||||
#============
|
||||
if data_dir:
|
||||
datadir = data_dir
|
||||
else:
|
||||
datadir = homedir + '/' + 'git/Data'
|
||||
|
||||
if not indir:
|
||||
indir = datadir + '/' + drug + '/' + 'input'
|
||||
|
||||
if not outdir:
|
||||
outdir = datadir + '/' + drug + '/' + 'output'
|
||||
|
||||
#======
|
||||
# input
|
||||
#=======
|
||||
if rd_filename:
|
||||
in_filename_rd = rd_filename
|
||||
else:
|
||||
#in_filename_rd = '3pl1_rd.tsv'
|
||||
in_filename_rd = gene.lower() + '_rd.tsv'
|
||||
|
||||
infile_rd = outdir + '/' + in_filename_rd
|
||||
print('Input file:', infile_rd
|
||||
, '\n=============================================================')
|
||||
|
||||
#=======
|
||||
# output
|
||||
#=======
|
||||
out_filename_rd = gene.lower() + '_rd.csv'
|
||||
outfile_rd = outdir + '/' + out_filename_rd
|
||||
print('Output file:', outfile_rd
|
||||
, '\n=============================================================')
|
||||
|
||||
#%% end of variable assignment for input and output files
|
||||
#=======================================================================
|
||||
#%% rd values from <gene>_rd.tsv values
|
||||
def rd_to_csv(inputtsv, outputrdcsv):
|
||||
"""
|
||||
formats residue depth values from input file
|
||||
|
||||
@param inputtsv: tsv file downloaded from {INSERT LINK}
|
||||
@type inputtsv: string
|
||||
|
||||
@param outputrdsv: csv file with rd values
|
||||
@type outfile_rd: string
|
||||
|
||||
@return: none, writes rd values df as csv
|
||||
"""
|
||||
#========================
|
||||
# read downloaded tsv file
|
||||
#========================
|
||||
#%% Read input file
|
||||
rd_data = pd.read_csv(inputtsv, sep = '\t')
|
||||
print('Reading input file:', inputtsv
|
||||
, '\nNo. of rows:', len(rd_data)
|
||||
, '\nNo. of cols:', len(rd_data.columns))
|
||||
|
||||
print('Column names:', rd_data.columns
|
||||
, '\n===========================================================')
|
||||
#========================
|
||||
# creating position col
|
||||
#========================
|
||||
# Extracting residue number from index and assigning
|
||||
# the values to a column [position]. Then convert the position col to numeric.
|
||||
rd_data['position'] = rd_data.index.str.extract('([0-9]+)').values
|
||||
|
||||
# converting position to numeric
|
||||
rd_data['position'] = pd.to_numeric(rd_data['position'])
|
||||
rd_data['position'].dtype
|
||||
|
||||
print('Extracted residue num from index and assigned as a column:'
|
||||
, '\ncolumn name: position'
|
||||
, '\ntotal no. of cols now:', len(rd_data.columns)
|
||||
, '\n=========================================================')
|
||||
|
||||
#========================
|
||||
# Renaming amino-acid
|
||||
# and all-atom cols
|
||||
#========================
|
||||
print('Renaming columns:'
|
||||
, '\ncolname==> # chain:residue: wt_3letter_caps'
|
||||
, '\nYES... the column name *actually* contains a # ..!'
|
||||
, '\ncolname==> all-atom: rd_values'
|
||||
, '\n=========================================================')
|
||||
|
||||
rd_data.rename(columns = {'# chain:residue':'wt_3letter_caps', 'all-atom':'rd_values'}, inplace = True)
|
||||
print('Column names:', rd_data.columns)
|
||||
|
||||
#========================
|
||||
# extracting df with the
|
||||
# desired columns
|
||||
#========================
|
||||
print('Extracting relevant columns for writing df as csv')
|
||||
|
||||
rd_df = rd_data[['position','rd_values','wt_3letter_caps']]
|
||||
|
||||
if len(rd_df) == len(rd_data):
|
||||
print('PASS: extracted df has expected no. of rows'
|
||||
,'\nExtracted df dim:'
|
||||
,'\nNo. of rows:', len(rd_df)
|
||||
,'\nNo. of cols:', len(rd_df.columns))
|
||||
else:
|
||||
print('FAIL: no. of rows mimatch'
|
||||
, '\nExpected no. of rows:', len(rd_data)
|
||||
, '\nGot no. of rows:', len(rd_df)
|
||||
, '\n=====================================================')
|
||||
|
||||
# Ensuring lowercase column names for consistency
|
||||
rd_df.columns = rd_df.columns.str.lower()
|
||||
|
||||
#===============
|
||||
# writing file
|
||||
#===============
|
||||
print('Writing file:'
|
||||
, '\nFilename:', outputrdcsv
|
||||
# , '\nPath:', outdir
|
||||
# , '\nExpected no. of rows:', len(rd_df)
|
||||
# , '\nExpected no. of cols:', len(rd_df.columns)
|
||||
, '\n=========================================================')
|
||||
|
||||
rd_df.to_csv(outputrdcsv, header = True, index = False)
|
||||
|
||||
#%% end of function
|
||||
#=======================================================================
|
||||
#%% call function
|
||||
#rd_to_csv(infile_rd, outfile_rd)
|
||||
#=======================================================================
|
||||
def main():
|
||||
print('residue depth using the following params'
|
||||
, '\nInput residue depth file:', in_filename_rd
|
||||
, '\nOutput:', out_filename_rd)
|
||||
rd_to_csv(infile_rd, outfile_rd)
|
||||
print('Finished Writing file:'
|
||||
, '\nFilename:', outfile_rd
|
||||
, '\n=============================================================')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
#%% end of script
|
||||
#=======================================================================
|
Loading…
Add table
Add a link
Reference in a new issue