ran struc param analysis
This commit is contained in:
parent
96da4d8ed5
commit
2cebd338ba
5 changed files with 373 additions and 382 deletions
|
@ -46,10 +46,8 @@ os.getcwd()
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
#%% command line args
|
#%% command line args
|
||||||
arg_parser = argparse.ArgumentParser()
|
arg_parser = argparse.ArgumentParser()
|
||||||
arg_parser.add_argument('-d', '--drug', help='drug name', default = 'pyrazinamide')
|
arg_parser.add_argument('-d', '--drug', help='drug name', default = None)
|
||||||
arg_parser.add_argument('-g', '--gene', help='gene name', default = 'pncA') # case sensitive
|
arg_parser.add_argument('-g', '--gene', help='gene name', default = None) # case sensitive
|
||||||
#arg_parser.add_argument('-d', '--drug', help='drug name', default = 'TESTDRUG')
|
|
||||||
#arg_parser.add_argument('-g', '--gene', help='gene name (case sensitive)', default = 'testGene') # case sensitive
|
|
||||||
args = arg_parser.parse_args()
|
args = arg_parser.parse_args()
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
#%% variable assignment: input and output
|
#%% variable assignment: input and output
|
||||||
|
@ -101,178 +99,178 @@ print('Output filename:', out_filename
|
||||||
#%% function/methd to combine 4 dfs
|
#%% function/methd to combine 4 dfs
|
||||||
|
|
||||||
def combine_dfs(dssp_csv, kd_csv, rd_csv, mcsm_csv, out_combined_csv):
|
def combine_dfs(dssp_csv, kd_csv, rd_csv, mcsm_csv, out_combined_csv):
|
||||||
"""
|
"""
|
||||||
Combine 4 dfs
|
Combine 4 dfs
|
||||||
|
|
||||||
@param dssp_df: csv file (output from dssp_df.py)
|
@param dssp_df: csv file (output from dssp_df.py)
|
||||||
@type dssp_df: string
|
@type dssp_df: string
|
||||||
|
|
||||||
@param kd_df: csv file (output from kd_df.py)
|
@param kd_df: csv file (output from kd_df.py)
|
||||||
@type ks_df: string
|
@type ks_df: string
|
||||||
|
|
||||||
@param rd_df: csv file (output from rd_df.py)
|
@param rd_df: csv file (output from rd_df.py)
|
||||||
@type rd_df: string
|
@type rd_df: string
|
||||||
|
|
||||||
# FIXME
|
# FIXME
|
||||||
@param mcsm_df: csv file (output of mcsm pipeline)CHECK}
|
@param mcsm_df: csv file (output of mcsm pipeline)CHECK}
|
||||||
@type mcsm_df: string
|
@type mcsm_df: string
|
||||||
|
|
||||||
@param out_combined_csv: csv file output
|
@param out_combined_csv: csv file output
|
||||||
@type out_combined_csv: string
|
@type out_combined_csv: string
|
||||||
|
|
||||||
@return: none, writes combined df as csv
|
@return: none, writes combined df as csv
|
||||||
"""
|
"""
|
||||||
#========================
|
#========================
|
||||||
# read input csv files to combine
|
# read input csv files to combine
|
||||||
#========================
|
#========================
|
||||||
dssp_df = pd.read_csv(dssp_csv, sep = ',')
|
dssp_df = pd.read_csv(dssp_csv, sep = ',')
|
||||||
kd_df = pd.read_csv(kd_csv, sep = ',')
|
kd_df = pd.read_csv(kd_csv, sep = ',')
|
||||||
rd_df = pd.read_csv(rd_csv, sep = ',')
|
rd_df = pd.read_csv(rd_csv, sep = ',')
|
||||||
mcsm_df = pd.read_csv(mcsm_csv, sep = ',')
|
mcsm_df = pd.read_csv(mcsm_csv, sep = ',')
|
||||||
|
|
||||||
print('Reading input files:'
|
print('Reading input files:'
|
||||||
, '\ndssp file:', dssp_csv
|
, '\ndssp file:', dssp_csv
|
||||||
, '\nNo. of rows:', len(dssp_df)
|
, '\nNo. of rows:', len(dssp_df)
|
||||||
, '\nNo. of cols:', len(dssp_df.columns)
|
, '\nNo. of cols:', len(dssp_df.columns)
|
||||||
, '\nColumn names:', dssp_df.columns
|
, '\nColumn names:', dssp_df.columns
|
||||||
, '\n==================================================================='
|
, '\n==================================================================='
|
||||||
, '\nkd file:', kd_csv
|
, '\nkd file:', kd_csv
|
||||||
, '\nNo. of rows:', len(kd_df)
|
, '\nNo. of rows:', len(kd_df)
|
||||||
, '\nNo. of cols:', len(kd_df.columns)
|
, '\nNo. of cols:', len(kd_df.columns)
|
||||||
, '\nColumn names:', kd_df.columns
|
, '\nColumn names:', kd_df.columns
|
||||||
, '\n==================================================================='
|
, '\n==================================================================='
|
||||||
, '\nrd file:', rd_csv
|
, '\nrd file:', rd_csv
|
||||||
, '\nNo. of rows:', len(rd_df)
|
, '\nNo. of rows:', len(rd_df)
|
||||||
, '\nNo. of cols:', len(rd_df.columns)
|
, '\nNo. of cols:', len(rd_df.columns)
|
||||||
, '\nColumn names:', rd_df.columns
|
, '\nColumn names:', rd_df.columns
|
||||||
, '\n==================================================================='
|
, '\n==================================================================='
|
||||||
, '\nrd file:', mcsm_csv
|
, '\nrd file:', mcsm_csv
|
||||||
, '\nNo. of rows:', len(mcsm_df)
|
, '\nNo. of rows:', len(mcsm_df)
|
||||||
, '\nNo. of cols:', len(mcsm_df.columns)
|
, '\nNo. of cols:', len(mcsm_df.columns)
|
||||||
, '\nColumn names:', mcsm_df.columns
|
, '\nColumn names:', mcsm_df.columns
|
||||||
, '\n===================================================================')
|
, '\n===================================================================')
|
||||||
|
|
||||||
#========================
|
#========================
|
||||||
# merge 1 (combined_df1)
|
# merge 1 (combined_df1)
|
||||||
# concatenating 3dfs:
|
# concatenating 3dfs:
|
||||||
# dssp_df, kd_df, rd_df
|
# dssp_df, kd_df, rd_df
|
||||||
#========================
|
#========================
|
||||||
print('starting first merge...\n')
|
print('starting first merge...\n')
|
||||||
|
|
||||||
# checking no. of rows
|
# checking no. of rows
|
||||||
print('Checking if no. of rows of the 3 dfs are equal:\n'
|
print('Checking if no. of rows of the 3 dfs are equal:\n'
|
||||||
, len(dssp_df) == len(kd_df) == len(rd_df)
|
, len(dssp_df) == len(kd_df) == len(rd_df)
|
||||||
, '\nReason: fasta files and pdb files vary since not all pos are part of the structure'
|
, '\nReason: fasta files and pdb files vary since not all pos are part of the structure'
|
||||||
, '\n===================================================================')
|
, '\n===================================================================')
|
||||||
|
|
||||||
# variables for sanity checks
|
# variables for sanity checks
|
||||||
expected_rows_df1 = max(len(dssp_df), len(kd_df), len(rd_df))
|
expected_rows_df1 = max(len(dssp_df), len(kd_df), len(rd_df))
|
||||||
# beware of harcoding! used for sanity check
|
# beware of harcoding! used for sanity check
|
||||||
ndfs = 3
|
ndfs = 3
|
||||||
ncol_merge = 1
|
ncol_merge = 1
|
||||||
offset = ndfs- ncol_merge
|
offset = ndfs- ncol_merge
|
||||||
expected_cols_df1 = len(dssp_df.columns) + len(kd_df.columns) + len(rd_df.columns) - offset
|
expected_cols_df1 = len(dssp_df.columns) + len(kd_df.columns) + len(rd_df.columns) - offset
|
||||||
|
|
||||||
print('Merge 1:'
|
print('Merge 1:'
|
||||||
, '\ncombining 3dfs by commom col: position'
|
, '\ncombining 3dfs by commom col: position'
|
||||||
, '\nExpected nrows in combined_df:', expected_rows_df1
|
, '\nExpected nrows in combined_df:', expected_rows_df1
|
||||||
, '\nExpected ncols in combined_df:', expected_cols_df1
|
, '\nExpected ncols in combined_df:', expected_cols_df1
|
||||||
, '\nResetting the common col as the index'
|
, '\nResetting the common col as the index'
|
||||||
, '\n===================================================================')
|
, '\n===================================================================')
|
||||||
|
|
||||||
#dssp_df.set_index('position', inplace = True)
|
#dssp_df.set_index('position', inplace = True)
|
||||||
#kd_df.set_index('position', inplace = True)
|
#kd_df.set_index('position', inplace = True)
|
||||||
#rd_df.set_index('position', inplace =True)
|
#rd_df.set_index('position', inplace =True)
|
||||||
|
|
||||||
#combined_df = pd.concat([dssp_df, kd_df, rd_df], axis = 1, sort = False).reset_index()
|
#combined_df = pd.concat([dssp_df, kd_df, rd_df], axis = 1, sort = False).reset_index()
|
||||||
#combined_df.rename(columns = {'index':'position'})
|
#combined_df.rename(columns = {'index':'position'})
|
||||||
|
|
||||||
combined_df1 = pd.concat(
|
combined_df1 = pd.concat(
|
||||||
(my_index.set_index('position') for my_index in [dssp_df, kd_df, rd_df])
|
(my_index.set_index('position') for my_index in [dssp_df, kd_df, rd_df])
|
||||||
, axis = 1, join = 'outer').reset_index()
|
, axis = 1, join = 'outer').reset_index()
|
||||||
|
|
||||||
# sanity check
|
# sanity check
|
||||||
print('Checking dimensions of concatenated df1...')
|
print('Checking dimensions of concatenated df1...')
|
||||||
if len(combined_df1) == expected_rows_df1 and len(combined_df1.columns) == expected_cols_df1:
|
if len(combined_df1) == expected_rows_df1 and len(combined_df1.columns) == expected_cols_df1:
|
||||||
print('PASS: combined df has expected dimensions'
|
print('PASS: combined df has expected dimensions'
|
||||||
, '\nNo. of rows in combined df:', len(combined_df1)
|
, '\nNo. of rows in combined df:', len(combined_df1)
|
||||||
, '\nNo. of cols in combined df:', len(combined_df1.columns)
|
, '\nNo. of cols in combined df:', len(combined_df1.columns)
|
||||||
, '\n===============================================================')
|
, '\n===============================================================')
|
||||||
else:
|
else:
|
||||||
print('FAIL: combined df does not have expected dimensions'
|
print('FAIL: combined df does not have expected dimensions'
|
||||||
, '\nNo. of rows in combined df:', len(combined_df1)
|
, '\nNo. of rows in combined df:', len(combined_df1)
|
||||||
, '\nNo. of cols in combined df:', len(combined_df1.columns)
|
, '\nNo. of cols in combined df:', len(combined_df1.columns)
|
||||||
, '\n===============================================================')
|
, '\n===============================================================')
|
||||||
|
|
||||||
#========================
|
#========================
|
||||||
# merge 2 (combined_df2)
|
# merge 2 (combined_df2)
|
||||||
# concatenating 2dfs:
|
# concatenating 2dfs:
|
||||||
# mcsm_df, combined_df1 (result of merge1)
|
# mcsm_df, combined_df1 (result of merge1)
|
||||||
# sort the cols
|
# sort the cols
|
||||||
#========================
|
#========================
|
||||||
print('starting second merge...\n')
|
print('starting second merge...\n')
|
||||||
|
|
||||||
# rename col 'Position' in mcsm_df to lowercase 'position'
|
# rename col 'Position' in mcsm_df to lowercase 'position'
|
||||||
# as it matches the combined_df1 colname to perfom merge
|
# as it matches the combined_df1 colname to perfom merge
|
||||||
|
|
||||||
#mcsm_df.columns
|
#mcsm_df.columns
|
||||||
#mcsm_df.rename(columns = {'Position':'position'}) # not working!
|
#mcsm_df.rename(columns = {'Position':'position'}) # not working!
|
||||||
# copy 'Position' column with the correct colname
|
# copy 'Position' column with the correct colname
|
||||||
print('Firstly, copying \'Position\' col and renaming \'position\' to allow merging'
|
print('Firstly, copying \'Position\' col and renaming \'position\' to allow merging'
|
||||||
, '\nNo. of cols before copying: ', len(mcsm_df.columns))
|
, '\nNo. of cols before copying: ', len(mcsm_df.columns))
|
||||||
|
|
||||||
mcsm_df['position'] = mcsm_df['Position']
|
mcsm_df['position'] = mcsm_df['Position']
|
||||||
print('No. of cols after copying: ', len(mcsm_df.columns))
|
print('No. of cols after copying: ', len(mcsm_df.columns))
|
||||||
|
|
||||||
# sanity check
|
# sanity check
|
||||||
if mcsm_df['position'].equals(mcsm_df['Position']):
|
if mcsm_df['position'].equals(mcsm_df['Position']):
|
||||||
print('PASS: Copying worked correctly'
|
print('PASS: Copying worked correctly'
|
||||||
, '\ncopied col matches original column'
|
, '\ncopied col matches original column'
|
||||||
, '\n===============================================================')
|
, '\n===============================================================')
|
||||||
else:
|
else:
|
||||||
print('FAIL: copied col does not match original column'
|
print('FAIL: copied col does not match original column'
|
||||||
, '\n================================================================')
|
, '\n================================================================')
|
||||||
|
|
||||||
# variables for sanity checks
|
# variables for sanity checks
|
||||||
expected_rows_df2 = len(mcsm_df)
|
expected_rows_df2 = len(mcsm_df)
|
||||||
# beware of harcoding! used for sanity check
|
# beware of harcoding! used for sanity check
|
||||||
ndfs = 2
|
ndfs = 2
|
||||||
ncol_merge = 1
|
ncol_merge = 1
|
||||||
offset = ndfs - ncol_merge
|
offset = ndfs - ncol_merge
|
||||||
expected_cols_df2 = len(mcsm_df.columns) + len(combined_df1.columns) - offset
|
expected_cols_df2 = len(mcsm_df.columns) + len(combined_df1.columns) - offset
|
||||||
|
|
||||||
print('Merge 2:'
|
print('Merge 2:'
|
||||||
, '\ncombining 2dfs by commom col: position'
|
, '\ncombining 2dfs by commom col: position'
|
||||||
, '\nExpected nrows in combined_df:', expected_rows_df2
|
, '\nExpected nrows in combined_df:', expected_rows_df2
|
||||||
, '\nExpected ncols in combined_df:', expected_cols_df2
|
, '\nExpected ncols in combined_df:', expected_cols_df2
|
||||||
, '\n===================================================================')
|
, '\n===================================================================')
|
||||||
|
|
||||||
combined_df2 = mcsm_df.merge(combined_df1, on = 'position')
|
combined_df2 = mcsm_df.merge(combined_df1, on = 'position')
|
||||||
|
|
||||||
# sanity check
|
# sanity check
|
||||||
print('Checking dimensions of concatenated df2...')
|
print('Checking dimensions of concatenated df2...')
|
||||||
if len(combined_df2) == expected_rows_df2 and len(combined_df2.columns) == expected_cols_df2:
|
if len(combined_df2) == expected_rows_df2 and len(combined_df2.columns) == expected_cols_df2:
|
||||||
print('PASS: combined df2 has expected dimensions'
|
print('PASS: combined df2 has expected dimensions'
|
||||||
, '\nNo. of rows in combined df:', len(combined_df2)
|
, '\nNo. of rows in combined df:', len(combined_df2)
|
||||||
, '\nNo. of cols in combined df:', len(combined_df2.columns)
|
, '\nNo. of cols in combined df:', len(combined_df2.columns)
|
||||||
, '\n===============================================================')
|
, '\n===============================================================')
|
||||||
else:
|
else:
|
||||||
print('FAIL: combined df2 does not have expected dimensions'
|
print('FAIL: combined df2 does not have expected dimensions'
|
||||||
, '\nNo. of rows in combined df:', len(combined_df2)
|
, '\nNo. of rows in combined df:', len(combined_df2)
|
||||||
, '\nNo. of cols in combined df:', len(combined_df2.columns)
|
, '\nNo. of cols in combined df:', len(combined_df2.columns)
|
||||||
, '\n===============================================================')
|
, '\n===============================================================')
|
||||||
|
|
||||||
#===============
|
#===============
|
||||||
# writing file
|
# writing file
|
||||||
#===============
|
#===============
|
||||||
print('Writing file:'
|
print('Writing file:'
|
||||||
, '\nFilename:', out_combined_csv
|
, '\nFilename:', out_combined_csv
|
||||||
# , '\nPath:', outdir
|
# , '\nPath:', outdir
|
||||||
, '\nExpected no. of rows:', len(combined_df2)
|
, '\nExpected no. of rows:', len(combined_df2)
|
||||||
, '\nExpected no. of cols:', len(combined_df2.columns)
|
, '\nExpected no. of cols:', len(combined_df2.columns)
|
||||||
, '\n=========================================================')
|
, '\n=========================================================')
|
||||||
|
|
||||||
combined_df2.to_csv(out_combined_csv, header = True, index = False)
|
combined_df2.to_csv(out_combined_csv, header = True, index = False)
|
||||||
|
|
||||||
#%% end of function
|
#%% end of function
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
|
@ -280,19 +278,18 @@ def combine_dfs(dssp_csv, kd_csv, rd_csv, mcsm_csv, out_combined_csv):
|
||||||
#combine_dfs(infile1, infile2, infile3, infile4, outfile)
|
#combine_dfs(infile1, infile2, infile3, infile4, outfile)
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
def main():
|
def main():
|
||||||
print('Combining 4 dfs:\n'
|
print('Combining 4 dfs:\n'
|
||||||
, in_filename1, '\n'
|
, in_filename1, '\n'
|
||||||
, in_filename2, '\n'
|
, in_filename2, '\n'
|
||||||
, in_filename3, '\n'
|
, in_filename3, '\n'
|
||||||
, in_filename4, '\n'
|
, in_filename4, '\n'
|
||||||
, 'output csv:', out_filename)
|
, 'output csv:', out_filename)
|
||||||
combine_dfs(infile1, infile2, infile3, infile4, outfile)
|
combine_dfs(infile1, infile2, infile3, infile4, outfile)
|
||||||
print('Finished Writing file:'
|
print('Finished Writing file:'
|
||||||
, '\nFilename:', out_filename
|
, '\nFilename:', outfile
|
||||||
, '\nPath:', outdir
|
## , '\nNo. of rows:', ''
|
||||||
## , '\nNo. of rows:', ''
|
## , '\nNo. of cols:', ''
|
||||||
## , '\nNo. of cols:', ''
|
, '\n===========================================================')
|
||||||
, '\n===========================================================')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -57,8 +57,8 @@ args = arg_parser.parse_args()
|
||||||
|
|
||||||
drug = args.drug
|
drug = args.drug
|
||||||
gene = args.gene
|
gene = args.gene
|
||||||
|
|
||||||
gene_match = gene + '_p.'
|
gene_match = gene + '_p.'
|
||||||
|
|
||||||
# building cols to extract
|
# building cols to extract
|
||||||
dr_muts_col = 'dr_mutations_' + drug
|
dr_muts_col = 'dr_mutations_' + drug
|
||||||
other_muts_col = 'other_mutations_' + drug
|
other_muts_col = 'other_mutations_' + drug
|
||||||
|
@ -80,8 +80,8 @@ datadir = homedir + '/' + 'git/Data'
|
||||||
#=======
|
#=======
|
||||||
# input
|
# input
|
||||||
#=======
|
#=======
|
||||||
#in_filename = 'original_tanushree_data_v2.csv'
|
in_filename = 'original_tanushree_data_v2.csv'
|
||||||
in_filename = 'mtb_gwas_v3.csv'
|
#in_filename = 'mtb_gwas_v3.csv'
|
||||||
infile = datadir + '/' + in_filename
|
infile = datadir + '/' + in_filename
|
||||||
print('Input file: ', infile
|
print('Input file: ', infile
|
||||||
, '\n============================================================')
|
, '\n============================================================')
|
||||||
|
@ -1028,25 +1028,25 @@ del(k, v, wt, mut, lookup_dict)
|
||||||
|
|
||||||
########
|
########
|
||||||
# combine the wild_type+poistion+mutant_type columns to generate
|
# combine the wild_type+poistion+mutant_type columns to generate
|
||||||
# Mutationinformation (matches mCSM output field)
|
# mutationinformation (matches mCSM output field)
|
||||||
# Remember to use .map(str) for int col types to allow string concatenation
|
# Remember to use .map(str) for int col types to allow string concatenation
|
||||||
#########
|
#########
|
||||||
gene_LF1['Mutationinformation'] = gene_LF1['wild_type'] + gene_LF1.position.map(str) + gene_LF1['mutant_type']
|
gene_LF1['mutationinformation'] = gene_LF1['wild_type'] + gene_LF1.position.map(str) + gene_LF1['mutant_type']
|
||||||
print('Created column: Mutationinformation'
|
print('Created column: mutationinformation'
|
||||||
, '\n====================================================================='
|
, '\n====================================================================='
|
||||||
, gene_LF1.Mutationinformation.head(10))
|
, gene_LF1.mutationinformation.head(10))
|
||||||
|
|
||||||
#%% Write file: mCSM muts
|
#%% Write file: mCSM muts
|
||||||
snps_only = pd.DataFrame(gene_LF1['Mutationinformation'].unique())
|
snps_only = pd.DataFrame(gene_LF1['mutationinformation'].unique())
|
||||||
snps_only.head()
|
snps_only.head()
|
||||||
# assign column name
|
# assign column name
|
||||||
snps_only.columns = ['Mutationinformation']
|
snps_only.columns = ['mutationinformation']
|
||||||
|
|
||||||
# count how many positions this corresponds to
|
# count how many positions this corresponds to
|
||||||
pos_only = pd.DataFrame(gene_LF1['position'].unique())
|
pos_only = pd.DataFrame(gene_LF1['position'].unique())
|
||||||
|
|
||||||
print('Checking NA in snps...')# should be 0
|
print('Checking NA in snps...')# should be 0
|
||||||
if snps_only.Mutationinformation.isna().sum() == 0:
|
if snps_only.mutationinformation.isna().sum() == 0:
|
||||||
print ('PASS: NO NAs/missing entries for SNPs'
|
print ('PASS: NO NAs/missing entries for SNPs'
|
||||||
, '\n===============================================================')
|
, '\n===============================================================')
|
||||||
else:
|
else:
|
||||||
|
@ -1090,27 +1090,27 @@ print('Finished writing:', out_filename3
|
||||||
del(out_filename3)
|
del(out_filename3)
|
||||||
|
|
||||||
#%% write file: mCSM style but with repitions for MSA and logo plots
|
#%% write file: mCSM style but with repitions for MSA and logo plots
|
||||||
all_muts_msa = pd.DataFrame(gene_LF1['Mutationinformation'])
|
all_muts_msa = pd.DataFrame(gene_LF1['mutationinformation'])
|
||||||
all_muts_msa.head()
|
all_muts_msa.head()
|
||||||
# assign column name
|
# assign column name
|
||||||
all_muts_msa.columns = ['Mutationinformation']
|
all_muts_msa.columns = ['mutationinformation']
|
||||||
|
|
||||||
# make sure it is string
|
# make sure it is string
|
||||||
all_muts_msa.columns.dtype
|
all_muts_msa.columns.dtype
|
||||||
|
|
||||||
# sort the column
|
# sort the column
|
||||||
all_muts_msa_sorted = all_muts_msa.sort_values(by = 'Mutationinformation')
|
all_muts_msa_sorted = all_muts_msa.sort_values(by = 'mutationinformation')
|
||||||
|
|
||||||
# create an extra column with protein name
|
# create an extra column with protein name
|
||||||
all_muts_msa_sorted = all_muts_msa_sorted.assign(fasta_name = '3PL1')
|
all_muts_msa_sorted = all_muts_msa_sorted.assign(fasta_name = '3PL1')
|
||||||
all_muts_msa_sorted.head()
|
all_muts_msa_sorted.head()
|
||||||
|
|
||||||
# rearrange columns so the fasta name is the first column (required for mutate.script)
|
# rearrange columns so the fasta name is the first column (required for mutate.script)
|
||||||
all_muts_msa_sorted = all_muts_msa_sorted[['fasta_name', 'Mutationinformation']]
|
all_muts_msa_sorted = all_muts_msa_sorted[['fasta_name', 'mutationinformation']]
|
||||||
all_muts_msa_sorted.head()
|
all_muts_msa_sorted.head()
|
||||||
|
|
||||||
print('Checking NA in snps...')# should be 0
|
print('Checking NA in snps...')# should be 0
|
||||||
if all_muts_msa.Mutationinformation.isna().sum() == 0:
|
if all_muts_msa.mutationinformation.isna().sum() == 0:
|
||||||
print ('PASS: NO NAs/missing entries for SNPs'
|
print ('PASS: NO NAs/missing entries for SNPs'
|
||||||
, '\n===============================================================')
|
, '\n===============================================================')
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -30,10 +30,8 @@ os.getcwd()
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
#%% command line args
|
#%% command line args
|
||||||
arg_parser = argparse.ArgumentParser()
|
arg_parser = argparse.ArgumentParser()
|
||||||
#arg_parser.add_argument('-d', '--drug', help='drug name', default = 'pyrazinamide')
|
arg_parser.add_argument('-d', '--drug', help='drug name', default = None)
|
||||||
#arg_parser.add_argument('-g', '--gene', help='gene name', default = 'pncA') # case sensitive
|
arg_parser.add_argument('-g', '--gene', help='gene name (case sensitive)', default = None) # case sensitive
|
||||||
arg_parser.add_argument('-d', '--drug', help='drug name', default = 'TESTDRUG')
|
|
||||||
arg_parser.add_argument('-g', '--gene', help='gene name (case sensitive)', default = 'testGene') # case sensitive
|
|
||||||
args = arg_parser.parse_args()
|
args = arg_parser.parse_args()
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
#%% variable assignment: input and output
|
#%% variable assignment: input and output
|
||||||
|
@ -49,6 +47,8 @@ args = arg_parser.parse_args()
|
||||||
|
|
||||||
drug = args.drug
|
drug = args.drug
|
||||||
gene = args.gene
|
gene = args.gene
|
||||||
|
gene_match = gene + '_p.'
|
||||||
|
|
||||||
#==========
|
#==========
|
||||||
# data dir
|
# data dir
|
||||||
#==========
|
#==========
|
||||||
|
@ -147,7 +147,7 @@ def extract_chain_dssp(inputpdbfile):
|
||||||
return pdbchainlist
|
return pdbchainlist
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
#%% write csv of processed dssp output
|
#%% write csv of processed dssp output
|
||||||
def dssp_to_csv(inputdsspfile, outfile, pdbchainlist):
|
def dssp_to_csv(inputdsspfile, outfile, pdbchainlist = ['A']):
|
||||||
"""
|
"""
|
||||||
Create a df from a dssp file containing ASA, RSA, SS for all chains
|
Create a df from a dssp file containing ASA, RSA, SS for all chains
|
||||||
|
|
||||||
|
|
233
scripts/kd_df.py
233
scripts/kd_df.py
|
@ -39,10 +39,8 @@ os.getcwd()
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
#%% command line args
|
#%% command line args
|
||||||
arg_parser = argparse.ArgumentParser()
|
arg_parser = argparse.ArgumentParser()
|
||||||
#arg_parser.add_argument('-d', '--drug', help='drug name', default = 'pyrazinamide')
|
arg_parser.add_argument('-d', '--drug', help='drug name', default = None)
|
||||||
#arg_parser.add_argument('-g', '--gene', help='gene name', default = 'pncA') # case sensitive
|
arg_parser.add_argument('-g', '--gene', help='gene name', default = None)
|
||||||
arg_parser.add_argument('-d', '--drug', help='drug name', default = 'DRUGNAME')
|
|
||||||
arg_parser.add_argument('-g', '--gene', help='gene name', default = 'geneName')
|
|
||||||
#arg_parser.add_argument('-p', '--plot', help='show plot', action='store_true')
|
#arg_parser.add_argument('-p', '--plot', help='show plot', action='store_true')
|
||||||
args = arg_parser.parse_args()
|
args = arg_parser.parse_args()
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
|
@ -81,8 +79,8 @@ print('Output filename:', out_filename
|
||||||
#%% end of variable assignment for input and output files
|
#%% end of variable assignment for input and output files
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
#%% kd values from fasta file and output csv
|
#%% kd values from fasta file and output csv
|
||||||
def kd_to_csv(inputfasta, outputkdcsv, windowsize):
|
def kd_to_csv(inputfasta, outputkdcsv, windowsize = 3):
|
||||||
"""
|
"""
|
||||||
Calculate kd (hydropathy values) from input fasta file
|
Calculate kd (hydropathy values) from input fasta file
|
||||||
|
|
||||||
@param inputfasta: fasta file
|
@param inputfasta: fasta file
|
||||||
|
@ -96,138 +94,137 @@ def kd_to_csv(inputfasta, outputkdcsv, windowsize):
|
||||||
|
|
||||||
@return: none, writes kd values df as csv
|
@return: none, writes kd values df as csv
|
||||||
"""
|
"""
|
||||||
#========================
|
#========================
|
||||||
# read input fasta file
|
# read input fasta file
|
||||||
#========================
|
#========================
|
||||||
fh = open(inputfasta)
|
fh = open(inputfasta)
|
||||||
|
|
||||||
for record in SeqIO.parse(fh, 'fasta'):
|
for record in SeqIO.parse(fh, 'fasta'):
|
||||||
id = record.id
|
id = record.id
|
||||||
seq = record.seq
|
seq = record.seq
|
||||||
num_residues = len(seq)
|
num_residues = len(seq)
|
||||||
fh.close()
|
fh.close()
|
||||||
|
|
||||||
sequence = str(seq)
|
sequence = str(seq)
|
||||||
X = ProteinAnalysis(sequence)
|
X = ProteinAnalysis(sequence)
|
||||||
|
|
||||||
#===================
|
#===================
|
||||||
# calculate KD values: same as the expasy server
|
# calculate KD values: same as the expasy server
|
||||||
#===================
|
#===================
|
||||||
my_window = windowsize
|
my_window = windowsize
|
||||||
offset = round((my_window/2)-0.5)
|
offset = round((my_window/2)-0.5)
|
||||||
# edge weight is set to default (100%)
|
# edge weight is set to default (100%)
|
||||||
|
|
||||||
kd_values = (X.protein_scale(ProtParamData.kd , window = my_window))
|
kd_values = (X.protein_scale(ProtParamData.kd , window = my_window))
|
||||||
# sanity checks
|
# sanity checks
|
||||||
print('Sequence Length:', num_residues)
|
print('Sequence Length:', num_residues)
|
||||||
print('kd_values Length:',len(kd_values))
|
print('kd_values Length:',len(kd_values))
|
||||||
print('Window Length:', my_window)
|
print('Window Length:', my_window)
|
||||||
print('Window Offset:', offset)
|
print('Window Offset:', offset)
|
||||||
print('=================================================================')
|
print('=================================================================')
|
||||||
print('Checking:len(kd values) is as expected for the given window size & offset...')
|
print('Checking:len(kd values) is as expected for the given window size & offset...')
|
||||||
expected_length = num_residues - (my_window - offset)
|
expected_length = num_residues - (my_window - offset)
|
||||||
if len(kd_values) == expected_length:
|
if len(kd_values) == expected_length:
|
||||||
print('PASS: expected and actual length of kd values match')
|
print('PASS: expected and actual length of kd values match')
|
||||||
else:
|
else:
|
||||||
print('FAIL: length mismatch'
|
print('FAIL: length mismatch'
|
||||||
,'\nExpected length:', expected_length
|
,'\nExpected length:', expected_length
|
||||||
,'\nActual length:', len(kd_values)
|
,'\nActual length:', len(kd_values)
|
||||||
, '\n=========================================================')
|
, '\n=========================================================')
|
||||||
|
|
||||||
#===================
|
#===================
|
||||||
# creating two dfs
|
# creating two dfs
|
||||||
#===================
|
#===================
|
||||||
# 1) aa sequence and 2) kd_values. Then reset index for each df
|
# 1) aa sequence and 2) kd_values. Then reset index for each df
|
||||||
# which will allow easy merging of the two dfs.
|
# which will allow easy merging of the two dfs.
|
||||||
|
|
||||||
# df1: df of aa seq with index reset to start from 1
|
# df1: df of aa seq with index reset to start from 1
|
||||||
# (reflective of the actual aa position in a sequence)
|
# (reflective of the actual aa position in a sequence)
|
||||||
# Name column of wt as 'wild_type' to be the same name used
|
# Name column of wt as 'wild_type' to be the same name used
|
||||||
# in the file required for merging later.
|
# in the file required for merging later.
|
||||||
dfSeq = pd.DataFrame({'wild_type_kd':list(sequence)})
|
dfSeq = pd.DataFrame({'wild_type_kd':list(sequence)})
|
||||||
dfSeq.index = np.arange(1, len(dfSeq) + 1) # python is not inclusive
|
dfSeq.index = np.arange(1, len(dfSeq) + 1) # python is not inclusive
|
||||||
|
|
||||||
# df2: df of kd_values with index reset to start from offset + 1 and
|
# df2: df of kd_values with index reset to start from offset + 1 and
|
||||||
# subsequent matched length of the kd_values
|
# subsequent matched length of the kd_values
|
||||||
dfVals = pd.DataFrame({'kd_values':kd_values})
|
dfVals = pd.DataFrame({'kd_values':kd_values})
|
||||||
dfVals.index = np.arange(offset + 1, len(dfVals) + 1 + offset)
|
dfVals.index = np.arange(offset + 1, len(dfVals) + 1 + offset)
|
||||||
|
|
||||||
# sanity checks
|
# sanity checks
|
||||||
max(dfVals['kd_values'])
|
max(dfVals['kd_values'])
|
||||||
min(dfVals['kd_values'])
|
min(dfVals['kd_values'])
|
||||||
|
|
||||||
#===================
|
#===================
|
||||||
# concatenating dfs
|
# concatenating dfs
|
||||||
#===================
|
#===================
|
||||||
# Merge the two on index
|
# Merge the two on index
|
||||||
# (as these are now reflective of the aa position numbers): df1 and df2
|
# (as these are now reflective of the aa position numbers): df1 and df2
|
||||||
# This will introduce NaN where there is missing values. In our case this
|
# This will introduce NaN where there is missing values. In our case this
|
||||||
# will be 2 (first and last ones based on window size and offset)
|
# will be 2 (first and last ones based on window size and offset)
|
||||||
|
|
||||||
kd_df = pd.concat([dfSeq, dfVals], axis = 1)
|
kd_df = pd.concat([dfSeq, dfVals], axis = 1)
|
||||||
|
|
||||||
#============================
|
#============================
|
||||||
# renaming index to position
|
# renaming index to position
|
||||||
#============================
|
#============================
|
||||||
kd_df = kd_df.rename_axis('position')
|
kd_df = kd_df.rename_axis('position')
|
||||||
kd_df.head
|
kd_df.head
|
||||||
|
|
||||||
print('Checking: position col i.e. index should be numeric')
|
print('Checking: position col i.e. index should be numeric')
|
||||||
if kd_df.index.dtype == 'int64':
|
if kd_df.index.dtype == 'int64':
|
||||||
print('PASS: position col is numeric'
|
print('PASS: position col is numeric'
|
||||||
, '\ndtype is:', kd_df.index.dtype)
|
, '\ndtype is:', kd_df.index.dtype)
|
||||||
else:
|
else:
|
||||||
print('FAIL: position col is not numeric'
|
print('FAIL: position col is not numeric'
|
||||||
, '\nConverting to numeric')
|
, '\nConverting to numeric')
|
||||||
kd_df.index.astype('int64')
|
kd_df.index.astype('int64')
|
||||||
print('Checking dtype for after conversion:\n'
|
print('Checking dtype for after conversion:\n'
|
||||||
, '\ndtype is:', kd_df.index.dtype
|
, '\ndtype is:', kd_df.index.dtype
|
||||||
, '\n=========================================================')
|
, '\n=========================================================')
|
||||||
|
|
||||||
#===============
|
#===============
|
||||||
# writing file
|
# writing file
|
||||||
#===============
|
#===============
|
||||||
print('Writing file:'
|
print('Writing file:'
|
||||||
, '\nFilename:', outputkdcsv
|
, '\nFilename:', outputkdcsv
|
||||||
# , '\nPath:', outdir
|
# , '\nPath:', outdir
|
||||||
, '\nExpected no. of rows:', len(kd_df)
|
, '\nExpected no. of rows:', len(kd_df)
|
||||||
, '\nExpected no. of cols:', len(kd_df.columns)
|
, '\nExpected no. of cols:', len(kd_df.columns)
|
||||||
, '\n=============================================================')
|
, '\n=============================================================')
|
||||||
|
|
||||||
kd_df.to_csv(outputkdcsv, header = True, index = True)
|
kd_df.to_csv(outputkdcsv, header = True, index = True)
|
||||||
|
|
||||||
#===============
|
#===============
|
||||||
# plot: optional!
|
# plot: optional!
|
||||||
#===============
|
#===============
|
||||||
# http://www.dalkescientific.com/writings/NBN/plotting.html
|
# http://www.dalkescientific.com/writings/NBN/plotting.html
|
||||||
|
|
||||||
# FIXME: save fig
|
# FIXME: save fig
|
||||||
# extract just pdb if from 'id' to pass to title of plot
|
# extract just pdb if from 'id' to pass to title of plot
|
||||||
# foo = re.match(r'(^[0-9]{1}\w{3})', id).groups(1)
|
# foo = re.match(r'(^[0-9]{1}\w{3})', id).groups(1)
|
||||||
# if doplot:
|
# if doplot:
|
||||||
plot(kd_values, linewidth = 1.0)
|
plot(kd_values, linewidth = 1.0)
|
||||||
#axis(xmin = 1, xmax = num_residues)
|
#axis(xmin = 1, xmax = num_residues)
|
||||||
xlabel('Residue Number')
|
xlabel('Residue Number')
|
||||||
ylabel('Hydrophobicity')
|
ylabel('Hydrophobicity')
|
||||||
title('K&D Hydrophobicity for ' + id)
|
title('K&D Hydrophobicity for ' + id)
|
||||||
show()
|
show()
|
||||||
|
|
||||||
#%% end of function
|
#%% end of function
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
#%% call function
|
#%% call function
|
||||||
#kd_to_csv(infile, outfile, windowsize = 3)
|
#kd_to_csv(infile, outfile, windowsize = 3)
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
def main():
|
def main():
|
||||||
print('Running hydropathy calcs with following params\n'
|
print('Running hydropathy calcs with following params\n'
|
||||||
, in_filename
|
, in_filename
|
||||||
, '\noutfile:', out_filename)
|
, '\noutfile:', out_filename)
|
||||||
kd_to_csv(infile, outfile, 3)
|
kd_to_csv(infile, outfile, 3)
|
||||||
print('Finished writing file:'
|
print('Finished writing file:'
|
||||||
, '\nFilename:', out_filename
|
, '\nFilename:', outfile
|
||||||
, '\nPath:', outdir
|
, '\n=============================================================')
|
||||||
, '\n=============================================================')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
#%% end of script
|
#%% end of script
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
|
|
147
scripts/rd_df.py
147
scripts/rd_df.py
|
@ -31,10 +31,8 @@ os.getcwd()
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
#%% command line args
|
#%% command line args
|
||||||
arg_parser = argparse.ArgumentParser()
|
arg_parser = argparse.ArgumentParser()
|
||||||
#arg_parser.add_argument('-d', '--drug', help='drug name', default = 'pyrazinamide')
|
arg_parser.add_argument('-d', '--drug', help='drug name', default = None)
|
||||||
#arg_parser.add_argument('-g', '--gene', help='gene name', default = 'pncA') # case sensitive
|
arg_parser.add_argument('-g', '--gene', help='gene name', default = None) # case sensitive
|
||||||
arg_parser.add_argument('-d', '--drug', help='drug name', default = 'TESTDRUG')
|
|
||||||
arg_parser.add_argument('-g', '--gene', help='gene name (case sensitive)', default = 'testGene') # case sensitive
|
|
||||||
args = arg_parser.parse_args()
|
args = arg_parser.parse_args()
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
#%% variable assignment: input and output
|
#%% variable assignment: input and output
|
||||||
|
@ -72,7 +70,7 @@ print('Output filename:', out_filename
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
#%% rd values from <gene>_rd.tsv values
|
#%% rd values from <gene>_rd.tsv values
|
||||||
def rd_to_csv(inputtsv, outputrdcsv):
|
def rd_to_csv(inputtsv, outputrdcsv):
|
||||||
"""
|
"""
|
||||||
Calculate kd (hydropathy values) from input fasta file
|
Calculate kd (hydropathy values) from input fasta file
|
||||||
|
|
||||||
@param inputtsv: tsv file downloaded from {INSERT LINK}
|
@param inputtsv: tsv file downloaded from {INSERT LINK}
|
||||||
|
@ -83,76 +81,76 @@ def rd_to_csv(inputtsv, outputrdcsv):
|
||||||
|
|
||||||
@return: none, writes rd values df as csv
|
@return: none, writes rd values df as csv
|
||||||
"""
|
"""
|
||||||
#========================
|
#========================
|
||||||
# read downloaded tsv file
|
# read downloaded tsv file
|
||||||
#========================
|
#========================
|
||||||
#%% Read input file
|
#%% Read input file
|
||||||
rd_data = pd.read_csv(inputtsv, sep = '\t')
|
rd_data = pd.read_csv(inputtsv, sep = '\t')
|
||||||
print('Reading input file:', inputtsv
|
print('Reading input file:', inputtsv
|
||||||
, '\nNo. of rows:', len(rd_data)
|
, '\nNo. of rows:', len(rd_data)
|
||||||
, '\nNo. of cols:', len(rd_data.columns))
|
, '\nNo. of cols:', len(rd_data.columns))
|
||||||
|
|
||||||
print('Column names:', rd_data.columns
|
print('Column names:', rd_data.columns
|
||||||
, '\n===============================================================')
|
, '\n===============================================================')
|
||||||
#========================
|
#========================
|
||||||
# creating position col
|
# creating position col
|
||||||
#========================
|
#========================
|
||||||
# Extracting residue number from index and assigning
|
# Extracting residue number from index and assigning
|
||||||
# the values to a column [position]. Then convert the position col to numeric.
|
# the values to a column [position]. Then convert the position col to numeric.
|
||||||
rd_data['position'] = rd_data.index.str.extract('([0-9]+)').values
|
rd_data['position'] = rd_data.index.str.extract('([0-9]+)').values
|
||||||
|
|
||||||
# converting position to numeric
|
# converting position to numeric
|
||||||
rd_data['position'] = pd.to_numeric(rd_data['position'])
|
rd_data['position'] = pd.to_numeric(rd_data['position'])
|
||||||
rd_data['position'].dtype
|
rd_data['position'].dtype
|
||||||
|
|
||||||
print('Extracted residue num from index and assigned as a column:'
|
print('Extracted residue num from index and assigned as a column:'
|
||||||
, '\ncolumn name: position'
|
, '\ncolumn name: position'
|
||||||
, '\ntotal no. of cols now:', len(rd_data.columns)
|
, '\ntotal no. of cols now:', len(rd_data.columns)
|
||||||
, '\n=============================================================')
|
, '\n=============================================================')
|
||||||
|
|
||||||
#========================
|
#========================
|
||||||
# Renaming amino-acid
|
# Renaming amino-acid
|
||||||
# and all-atom cols
|
# and all-atom cols
|
||||||
#========================
|
#========================
|
||||||
print('Renaming columns:'
|
print('Renaming columns:'
|
||||||
, '\ncolname==> # chain:residue: wt_3letter_caps'
|
, '\ncolname==> # chain:residue: wt_3letter_caps'
|
||||||
, '\nYES... the column name *actually* contains a # ..!'
|
, '\nYES... the column name *actually* contains a # ..!'
|
||||||
, '\ncolname==> all-atom: rd_values'
|
, '\ncolname==> all-atom: rd_values'
|
||||||
, '\n=============================================================')
|
, '\n=============================================================')
|
||||||
|
|
||||||
rd_data.rename(columns = {'# chain:residue':'wt_3letter_caps', 'all-atom':'rd_values'}, inplace = True)
|
rd_data.rename(columns = {'# chain:residue':'wt_3letter_caps', 'all-atom':'rd_values'}, inplace = True)
|
||||||
print('Column names:', rd_data.columns)
|
print('Column names:', rd_data.columns)
|
||||||
|
|
||||||
#========================
|
#========================
|
||||||
# extracting df with the
|
# extracting df with the
|
||||||
# desired columns
|
# desired columns
|
||||||
#========================
|
#========================
|
||||||
print('Extracting relevant columns for writing df as csv')
|
print('Extracting relevant columns for writing df as csv')
|
||||||
|
|
||||||
rd_df = rd_data[['position','rd_values','wt_3letter_caps']]
|
rd_df = rd_data[['position','rd_values','wt_3letter_caps']]
|
||||||
|
|
||||||
if len(rd_df) == len(rd_data):
|
if len(rd_df) == len(rd_data):
|
||||||
print('PASS: extracted df has expected no. of rows'
|
print('PASS: extracted df has expected no. of rows'
|
||||||
,'\nExtracted df dim:'
|
,'\nExtracted df dim:'
|
||||||
,'\nNo. of rows:', len(rd_df)
|
,'\nNo. of rows:', len(rd_df)
|
||||||
,'\nNo. of cols:', len(rd_df.columns))
|
,'\nNo. of cols:', len(rd_df.columns))
|
||||||
else:
|
else:
|
||||||
print('FAIL: no. of rows mimatch'
|
print('FAIL: no. of rows mimatch'
|
||||||
, '\nExpected no. of rows:', len(rd_data)
|
, '\nExpected no. of rows:', len(rd_data)
|
||||||
, '\nGot no. of rows:', len(rd_df)
|
, '\nGot no. of rows:', len(rd_df)
|
||||||
, '\n=====================================================')
|
, '\n=====================================================')
|
||||||
|
|
||||||
#===============
|
#===============
|
||||||
# writing file
|
# writing file
|
||||||
#===============
|
#===============
|
||||||
print('Writing file:'
|
print('Writing file:'
|
||||||
, '\nFilename:', outputrdcsv
|
, '\nFilename:', outputrdcsv
|
||||||
# , '\nPath:', outdir
|
# , '\nPath:', outdir
|
||||||
# , '\nExpected no. of rows:', len(rd_df)
|
# , '\nExpected no. of rows:', len(rd_df)
|
||||||
# , '\nExpected no. of cols:', len(rd_df.columns)
|
# , '\nExpected no. of cols:', len(rd_df.columns)
|
||||||
, '\n=========================================================')
|
, '\n=========================================================')
|
||||||
|
|
||||||
rd_df.to_csv(outputrdcsv, header = True, index = False)
|
rd_df.to_csv(outputrdcsv, header = True, index = False)
|
||||||
|
|
||||||
#%% end of function
|
#%% end of function
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
|
@ -160,16 +158,15 @@ def rd_to_csv(inputtsv, outputrdcsv):
|
||||||
#rd_to_csv(infile, outfile)
|
#rd_to_csv(infile, outfile)
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
def main():
|
def main():
|
||||||
print('residue depth using the following params\n'
|
print('residue depth using the following params\n'
|
||||||
, in_filename
|
, in_filename
|
||||||
, '\noutfile:', out_filename)
|
, '\noutfile:', out_filename)
|
||||||
rd_to_csv(infile, outfile)
|
rd_to_csv(infile, outfile)
|
||||||
print('Finished Writing file:'
|
print('Finished Writing file:'
|
||||||
, '\nFilename:', out_filename
|
, '\nFilename:', outfile
|
||||||
, '\nPath:', outdir
|
, '\n=============================================================')
|
||||||
, '\n=============================================================')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
#%% end of script
|
#%% end of script
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue