added foldx_scaled and deepddg_scaled values added to combine_df.py and also used that script to merge all the dfs so that merged_df2 and merged_df3 are infact what we need for downstream processing

This commit is contained in:
Tanushree Tunstall 2021-09-10 16:58:36 +01:00
parent dda5d1ea93
commit 4ba4ff602e
5 changed files with 354 additions and 977 deletions

View file

@ -41,6 +41,7 @@ import pandas as pd
from pandas import DataFrame
import numpy as np
import argparse
from functools import reduce
#=======================================================================
#%% specify input and curr dir
homedir = os.path.expanduser('~')
@ -92,19 +93,6 @@ outdir = args.output_dir
gene_match = gene + '_p.'
print('mut pattern for gene', gene, ':', gene_match)
# !"Redundant, now that improvements have been made!
# See section "REGEX"
# nssnp_match = gene_match +'[A-Za-z]{3}[0-9]+[A-Za-z]{3}'
# print('nsSNP for gene', gene, ':', nssnp_match)
# wt_regex = gene_match.lower()+'([A-Za-z]{3})'
# print('wt regex:', wt_regex)
# mut_regex = r'[0-9]+(\w{3})$'
# print('mt regex:', mut_regex)
# pos_regex = r'([0-9]+)'
# print('position regex:', pos_regex)
#%%=======================================================================
#==============
# directories
@ -122,49 +110,52 @@ if not outdir:
# input
#=======
#in_filename_mcsm = gene.lower() + '_complex_mcsm_norm.csv'
in_filename_mcsm = gene.lower() + '_complex_mcsm_norm_SAM.csv' # gidb
in_filename_foldx = gene.lower() + '_foldx.csv'
in_filename_deepddg = gene.lower() + '_ni_deepddg.csv' # change to decent filename and put it in the correct dir
in_filename_dssp = gene.lower() + '_dssp.csv'
in_filename_kd = gene.lower() + '_kd.csv'
in_filename_rd = gene.lower() + '_rd.csv'
in_filename_mcsm = gene.lower() + '_complex_mcsm_norm_SAM.csv' # gidb
in_filename_foldx = gene.lower() + '_foldx.csv'
in_filename_deepddg = gene.lower() + '_ni_deepddg.csv' # change to decent filename and put it in the correct dir
in_filename_dssp = gene.lower() + '_dssp.csv'
in_filename_kd = gene.lower() + '_kd.csv'
in_filename_rd = gene.lower() + '_rd.csv'
#in_filename_snpinfo = 'ns' + gene.lower() + '_snp_info_f.csv' # gwas f info
in_filename_afor = gene.lower() + '_af_or.csv'
in_filename_afor = gene.lower() + '_af_or.csv'
#in_filename_afor_kin = gene.lower() + '_af_or_kinship.csv'
infilename_dynamut = gene.lower() + '_complex_dynamut_norm.csv'
infilename_dynamut2 = gene.lower() + '_complex_dynamut2_norm.csv'
infilename_mcsm_na = gene.lower() + '_complex_mcsm_na_norm.csv'
infilename_mcsm_f_snps = gene.lower() + '_mcsm_formatted_snps.csv'
infile_mcsm = outdir + in_filename_mcsm
infile_foldx = outdir + in_filename_foldx
infile_mcsm = outdir + in_filename_mcsm
infile_foldx = outdir + in_filename_foldx
infile_deepddg = outdir + in_filename_deepddg
infile_dssp = outdir + in_filename_dssp
infile_kd = outdir + in_filename_kd
infile_rd = outdir + in_filename_rd
#infile_snpinfo = outdir + in_filename_snpinfo
infile_afor = outdir + in_filename_afor
#infile_afor_kin = outdir + in_filename_afor_kin
infile_dynamut = outdir + 'dynamut_results/' + infilename_dynamut
infile_dynamut2 = outdir + 'dynamut_results/dynamut2/' + infilename_dynamut2
infile_mcsm_na = outdir + 'mcsm_na_results/' + infilename_mcsm_na
infile_mcsm_f_snps = outdir + infilename_mcsm_f_snps
infile_dssp = outdir + in_filename_dssp
infile_kd = outdir + in_filename_kd
infile_rd = outdir + in_filename_rd
#infile_snpinfo = outdir + '/' + in_filename_snpinfo
infile_afor = outdir + '/' + in_filename_afor
#infile_afor_kin = outdir + '/' + in_filename_afor_kin
print('\nInput path:', indir
, '\nOutput path:', outdir, '\n'
, '\nInput filename mcsm:', infile_mcsm
, '\nInput filename foldx:', infile_foldx, '\n'
, '\nInput filename deepddg', infile_deepddg , '\n'
, '\nInput filename dssp:', infile_dssp
, '\nInput filename kd:', infile_kd
, '\nInput filename rd', infile_rd
#, '\nInput filename snp info:', infile_snpinfo, '\n'
, '\nInput filename af or:', infile_afor
#, '\nInput filename afor kinship:', infile_afor_kin
, '\n============================================================')
# read csv
mcsm_df = pd.read_csv(infile_mcsm, sep = ',')
foldx_df = pd.read_csv(infile_foldx , sep = ',')
deepddg_df = pd.read_csv(infile_deepddg, sep = ',')
dssp_df = pd.read_csv(infile_dssp, sep = ',')
kd_df = pd.read_csv(infile_kd, sep = ',')
rd_df = pd.read_csv(infile_rd, sep = ',')
afor_df = pd.read_csv(infile_afor, sep = ',')
dynamut_df = pd.read_csv(infile_dynamut, sep = ',')
dynamut2_df = pd.read_csv(infile_dynamut2, sep = ',')
mcsm_na_df = pd.read_csv(infile_mcsm_na, sep = ',')
mcsm_f_snps = pd.read_csv(infile_mcsm_f_snps, sep = ',', names = ['mutationinformation'], header = None)
#=======
# output
#=======
out_filename_comb = gene.lower() + '_all_params.csv'
outfile_comb = outdir + '/' + out_filename_comb
outfile_comb = outdir + out_filename_comb
print('Output filename:', outfile_comb
, '\n===================================================================')
@ -174,12 +165,101 @@ r_join = 'right'
i_join = 'inner'
# end of variable assignment for input and output files
#%%============================================================================
#%%############################################################################
#=====================
# some preprocessing
#=====================
#-------------
# FoldX
#-------------
foldx_df.shape
#=======================
# scale foldx values
#=======================
# Rescale values in Foldx_change col b/w -1 and 1 so negative numbers
# stay neg and pos numbers stay positive
foldx_min = foldx_df['ddg'].min()
foldx_max = foldx_df['ddg'].max()
foldx_min
foldx_max
foldx_scale = lambda x : x/abs(foldx_min) if x < 0 else (x/foldx_max if x >= 0 else 'failed')
foldx_df['foldx_scaled'] = foldx_df['ddg'].apply(foldx_scale)
print('Raw foldx scores:\n', foldx_df['ddg']
, '\n---------------------------------------------------------------'
, '\nScaled foldx scores:\n', foldx_df['foldx_scaled'])
# additional check added
fsmi = foldx_df['foldx_scaled'].min()
fsma = foldx_df['foldx_scaled'].max()
c = foldx_df[foldx_df['ddg']>=0].count()
foldx_pos = c.get(key = 'ddg')
c2 = foldx_df[foldx_df['foldx_scaled']>=0].count()
foldx_pos2 = c2.get(key = 'foldx_scaled')
if foldx_pos == foldx_pos2 and fsmi == -1 and fsma == 1:
print('\nPASS: Foldx values scaled correctly b/w -1 and 1')
else:
print('\nFAIL: Foldx values scaled numbers MISmatch'
, '\nExpected number:', foldx_pos
, '\nGot:', foldx_pos2
, '\n======================================================')
# rename ddg column to ddg_foldx
foldx_df['ddg']
foldx_df = foldx_df.rename(columns = {'ddg':'ddg_foldx'})
foldx_df['ddg_foldx']
#-------------
# Deepddg
#-------------
deepddg_df.shape
#=======================
# scale Deepddg values
#=======================
# Rescale values in deepddg_change col b/w -1 and 1 so negative numbers
# stay neg and pos numbers stay positive
deepddg_min = deepddg_df['deepddg'].min()
deepddg_max = deepddg_df['deepddg'].max()
deepddg_scale = lambda x : x/abs(deepddg_min) if x < 0 else (x/deepddg_max if x >= 0 else 'failed')
deepddg_df['deepddg_scaled'] = deepddg_df['deepddg'].apply(deepddg_scale)
print('Raw deepddg scores:\n', deepddg_df['deepddg']
, '\n---------------------------------------------------------------'
, '\nScaled deepddg scores:\n', deepddg_df['deepddg_scaled'])
# additional check added
dsmi = deepddg_df['deepddg_scaled'].min()
dsma = deepddg_df['deepddg_scaled'].max()
c = deepddg_df[deepddg_df['deepddg']>=0].count()
deepddg_pos = c.get(key = 'deepddg')
c2 = deepddg_df[deepddg_df['deepddg_scaled']>=0].count()
deepddg_pos2 = c2.get(key = 'deepddg_scaled')
if deepddg_pos == deepddg_pos2 and dsmi == -1 and dsma == 1:
print('\nPASS: deepddg values scaled correctly b/w -1 and 1')
else:
print('\nFAIL: deepddg values scaled numbers MISmatch'
, '\nExpected number:', deepddg_pos
, '\nGot:', deepddg_pos2
, '\n======================================================')
#%%=============================================================================
# Now merges begin
#%%=============================================================================
print('==================================='
, '\nFirst merge: mcsm + foldx'
, '\n===================================')
mcsm_df = pd.read_csv(infile_mcsm, sep = ',')
mcsm_df.shape
# add 3 lowercase aa code for wt and mutant
get_aa_3lower(df = mcsm_df
@ -189,7 +269,7 @@ get_aa_3lower(df = mcsm_df
, col_mut = 'mut_aa_3lower')
#mcsm_df.columns = mcsm_df.columns.str.lower()
foldx_df = pd.read_csv(infile_foldx , sep = ',')
# foldx_df.shape
#mcsm_foldx_dfs = combine_dfs_with_checks(mcsm_df, foldx_df, my_join = o_join)
merging_cols_m1 = detect_common_cols(mcsm_df, foldx_df)
@ -205,8 +285,8 @@ print('==================================='
, '\nSecond merge: mcsm_foldx_dfs + deepddg'
, '\n===================================')
deepddg_df = pd.read_csv(infile_deepddg, sep = ',')
deepddg_df.columns
#deepddg_df = pd.read_csv(infile_deepddg, sep = ',')
#deepddg_df.columns
# merge with mcsm_foldx_dfs and deepddg_df
mcsm_foldx_deepddg_dfs = pd.merge(mcsm_foldx_dfs, deepddg_df, on = 'mutationinformation', how = l_join)
@ -218,9 +298,9 @@ print('==================================='
, '\Third merge: dssp + kd'
, '\n===================================')
dssp_df = pd.read_csv(infile_dssp, sep = ',')
kd_df = pd.read_csv(infile_kd, sep = ',')
rd_df = pd.read_csv(infile_rd, sep = ',')
dssp_df.shape
kd_df.shape
rd_df.shape
#dssp_kd_dfs = combine_dfs_with_checks(dssp_df, kd_df, my_join = o_join)
merging_cols_m2 = detect_common_cols(dssp_df, kd_df)
@ -308,8 +388,8 @@ print('\n======================================='
, '\ncombined_df_clean + afor_df '
, '\n=======================================')
afor_df = pd.read_csv(infile_afor, sep = ',')
afor_cols = afor_df.columns
afor_df.shape
# create a mapping from the gwas mutation column i.e <gene_match>_abcXXXrst
#----------------------
@ -360,16 +440,60 @@ else:
sys.exit('\nFAIL: merge unsuccessful for af and or')
#%%============================================================================
# Output columns
# Output columns: when dynamut, dynamut2 and others weren't being combined
out_filename_comb_afor = gene.lower() + '_comb_afor.csv'
outfile_comb_afor = outdir + '/' + out_filename_comb_afor
print('Output filename:', outfile_comb_afor
, '\n===================================================================')
# write csv
# # write csv
print('Writing file: combined stability and afor')
combined_stab_afor.to_csv(outfile_comb_afor, index = False)
print('\nFinished writing file:'
, '\nNo. of rows:', combined_stab_afor.shape[0]
, '\nNo. of cols:', combined_stab_afor.shape[1])
#%% end of script
#%%============================================================================
# combine dynamut, dynamut2, and mcsm_na
dfs_list = [dynamut_df, dynamut2_df, mcsm_na_df]
dfs_merged = reduce(lambda left,right: pd.merge(left
, right
, on = ['mutationinformation']
, how = 'inner')
, dfs_list)
# drop excess columns
drop_cols = detect_common_cols(dfs_merged, combined_stab_afor)
drop_cols.remove('mutationinformation')
dfs_merged_clean = dfs_merged.drop(drop_cols, axis = 1)
merging_cols_m6 = detect_common_cols(dfs_merged_clean, combined_stab_afor)
len(dfs_merged_clean.columns)
len(combined_stab_afor.columns)
combined_all_params = pd.merge(combined_stab_afor
, dfs_merged_clean
, on = merging_cols_m6
, how = i_join)
expected_ncols = len(dfs_merged_clean.columns) + len(combined_stab_afor.columns) - len(merging_cols_m6)
expected_nrows = len(combined_stab_afor)
if len(combined_all_params.columns) == expected_ncols and len(combined_all_params) == expected_nrows:
print('\nPASS: All dfs combined')
else:
print('\nFAIL:lengths mismatch'
, '\nExpected ncols:', expected_ncols
, '\nGot:', len(dfs_merged_clean.columns)
, '\nExpected nrows:', expected_nrows
, '\nGot:', len(dfs_merged_clean) )
#%% Done for gid on 10/09/2021
# write csv
print('Writing file: all params')
combined_all_params.to_csv(outfile_comb, index = False)
print('\nFinished writing file:'
, '\nNo. of rows:', combined_all_params.shape[0]
, '\nNo. of cols:', combined_all_params.shape[1])
#%% end of script

View file

@ -8,11 +8,11 @@ setwd("~/git/LSHTM_analysis/scripts/plotting")
getwd()
source("Header_TT.R")
source("../functions/my_pairs_panel.R") # with lower panel turned off
source("../functions/plotting_globals.R")
source("../functions/plotting_data.R")
source("../functions/combining_dfs_plotting.R")
source("../functions/bp_subcolours.R")
# source("../functions/my_pairs_panel.R") # with lower panel turned off
# source("../functions/plotting_globals.R")
# source("../functions/plotting_data.R")
# source("../functions/combining_dfs_plotting.R")
# source("../functions/bp_subcolours.R")
#********************
# cmd args passed
@ -41,8 +41,8 @@ import_dirs(drug, gene)
#---------------------------
if (!exists("infile_params") && exists("gene")){
#if (!is.character(infile_params) && exists("gene")){ # when running as cmd
#in_filename_params = paste0(tolower(gene), "_all_params.csv") #for pncA
in_filename_params = paste0(tolower(gene), "_comb_afor.csv") # part combined for gid
in_filename_params = paste0(tolower(gene), "_all_params.csv") #for pncA (and for gid finally) 10/09/21
#in_filename_params = paste0(tolower(gene), "_comb_afor.csv") # part combined for gid
infile_params = paste0(outdir, "/", in_filename_params)
cat("\nInput file for mcsm comb data not specified, assuming filename: ", infile_params, "\n")
}
@ -91,369 +91,139 @@ merged_df3 = all_plot_dfs[[2]]
merged_df2_comp = all_plot_dfs[[3]]
merged_df3_comp = all_plot_dfs[[4]]
#======================================================================
# read other files
infilename_dynamut = paste0("~/git/Data/", drug, "/output/dynamut_results/", gene
, "_complex_dynamut_norm.csv")
#TODO: Think! MOVE TO COMBINE or singular file for deepddg
infilename_dynamut2 = paste0("~/git/Data/", drug, "/output/dynamut_results/dynamut2/", gene
, "_complex_dynamut2_norm.csv")
#============================
# adding deepddg scaled values
# scale data b/w -1 and 1
#============================
n = which(colnames(merged_df3) == "deepddg"); n
infilename_mcsm_na = paste0("~/git/Data/", drug, "/output/mcsm_na_results/", gene
, "_complex_mcsm_na_norm.csv")
infilename_mcsm_f_snps <- paste0("~/git/Data/", drug, "/output/", gene
, "_mcsm_formatted_snps.csv")
dynamut_df = read.csv(infilename_dynamut)
dynamut2_df = read.csv(infilename_dynamut2)
mcsm_na_df = read.csv(infilename_mcsm_na)
mcsm_f_snps = read.csv(infilename_mcsm_f_snps, header = F)
names(mcsm_f_snps) = "mutationinformation"
my_min = min(merged_df3[,n]); my_min
my_max = max(merged_df3[,n]); my_max
####################################################################
# Data for subcols barplot (~heatmpa)
####################################################################
# can include: mutation, or_kin, pwald, af_kin
cols_to_select = c("mutationinformation", "drtype"
, "wild_type"
, "position"
, "mutant_type"
, "chain", "ligand_id", "ligand_distance"
, "duet_stability_change", "duet_outcome", "duet_scaled"
, "ligand_affinity_change", "ligand_outcome", "affinity_scaled"
, "ddg_foldx", "foldx_scaled", "foldx_outcome"
, "deepddg", "deepddg_outcome" # comment out as not available for pnca
, "asa", "rsa", "rd_values", "kd_values"
, "af", "or_mychisq", "pval_fisher"
, "or_fisher", "or_logistic", "pval_logistic"
, "wt_prop_water", "mut_prop_water", "wt_prop_polarity", "mut_prop_polarity"
, "wt_calcprop", "mut_calcprop")
merged_df3$deepddg_scaled = ifelse(merged_df3[,n] < 0
, merged_df3[,n]/abs(my_min)
, merged_df3[,n]/my_max)
# sanity check
my_min = min(merged_df3$deepddg_scaled); my_min
my_max = max(merged_df3$deepddg_scaled); my_max
#=======================
# Data for sub colours
# barplot: PS
#=======================
cat("\nNo. of cols to select:", length(cols_to_select))
subcols_df_ps = merged_df3[, cols_to_select]
cat("\nNo of unique positions for ps:"
, length(unique(subcols_df_ps$position)))
# add count_pos col that counts the no. of nsSNPS at a position
setDT(subcols_df_ps)[, pos_count := .N, by = .(position)]
# should be a factor
if (is.factor(subcols_df_ps$duet_outcome)){
cat("\nDuet_outcome is factor")
table(subcols_df_ps$duet_outcome)
if (my_min == -1 && my_max == 1){
cat("\nPASS: DeepDDG successfully scaled b/w -1 and 1"
#, "\nProceeding with assigning deep outcome category")
, "\n")
}else{
cat("\nConverting duet_outcome to factor")
subcols_df_ps$duet_outcome = as.factor(subcols_df_ps$duet_outcome)
table(subcols_df_ps$duet_outcome)
cat("\nFAIL: could not scale DeepDDG ddg values"
, "Aborting!")
}
# should be -1 and 1
min(subcols_df_ps$duet_scaled)
max(subcols_df_ps$duet_scaled)
tapply(subcols_df_ps$duet_scaled, subcols_df_ps$duet_outcome, min)
tapply(subcols_df_ps$duet_scaled, subcols_df_ps$duet_outcome, max)
####################################################################
# Data for combining other dfs
####################################################################
# check unique values in normalised data
cat("\nNo. of unique values in duet scaled, no rounding:"
, length(unique(subcols_df_ps$duet_scaled)))
source("other_dfs_data.R")
# No rounding
my_grp = subcols_df_ps$duet_scaled; length(my_grp)
####################################################################
# Data for subcols barplot (~heatmap)
####################################################################
# Add rounding is to be used
n = 3
subcols_df_ps$duet_scaledR = round(subcols_df_ps$duet_scaled, n)
cat("\nNo. of unique values in duet scaled", n, "places rounding:"
, length(unique(subcols_df_ps$duet_scaledR)))
my_grp_r = subcols_df_ps$duet_scaledR # rounding
# Add grp cols
subcols_df_ps$group <- paste0(subcols_df_ps$duet_outcome, "_", my_grp, sep = "")
subcols_df_ps$groupR <- paste0(subcols_df_ps$duet_outcome, "_", my_grp_r, sep = "")
# Call the function to create the palette based on the group defined above
subcols_ps <- ColourPalleteMulti(subcols_df_ps, "duet_outcome", "my_grp")
subcolsR_ps <- ColourPalleteMulti(subcols_df_ps, "duet_outcome", "my_grp_r")
print(paste0("Colour palette generated for my_grp: ", length(subcols_ps), " colours"))
print(paste0("Colour palette generated for my_grp_r: ", length(subcolsR_ps), " colours"))
source("coloured_bp_data.R")
####################################################################
# Data for logoplots
####################################################################
#-------------------------
# choose df for logoplot
#-------------------------
logo_data = merged_df3
#logo_data = merged_df3_comp
# quick checks
colnames(logo_data)
str(logo_data)
source("logo_data.R")
c1 = unique(logo_data$position)
nrow(logo_data)
cat("No. of rows in my_data:", nrow(logo_data)
, "\nDistinct positions corresponding to snps:", length(c1)
, "\n===========================================================")
#=======================================================================
#==================
# logo data: OR
#==================
foo = logo_data[, c("position"
, "mutant_type","duet_scaled", "or_mychisq"
, "mut_prop_polarity", "mut_prop_water")]
s1 = c("\nSuccessfully sourced logo_data.R")
cat(s1)
logo_data$log10or = log10(logo_data$or_mychisq)
logo_data_plot = logo_data[, c("position"
, "mutant_type", "or_mychisq", "log10or")]
logo_data_plot_or = logo_data[, c("position", "mutant_type", "or_mychisq")]
wide_df_or <- logo_data_plot_or %>% spread(position, or_mychisq, fill = 0.0)
wide_df_or = as.matrix(wide_df_or)
rownames(wide_df_or) = wide_df_or[,1]
dim(wide_df_or)
wide_df_or = wide_df_or[,-1]
str(wide_df_or)
position_or = as.numeric(colnames(wide_df_or))
#==================
# logo data: logOR
#==================
logo_data_plot_logor = logo_data[, c("position", "mutant_type", "log10or")]
wide_df_logor <- logo_data_plot_logor %>% spread(position, log10or, fill = 0.0)
wide_df_logor = as.matrix(wide_df_logor)
rownames(wide_df_logor) = wide_df_logor[,1]
wide_df_logor = subset(wide_df_logor, select = -c(1) )
colnames(wide_df_logor)
wide_df_logor_m = data.matrix(wide_df_logor)
rownames(wide_df_logor_m)
colnames(wide_df_logor_m)
position_logor = as.numeric(colnames(wide_df_logor_m))
#===============================
# logo data: multiple nsSNPs (>1)
#=================================
#require(data.table)
# get freq count of positions so you can subset freq<1
setDT(logo_data)[, mut_pos_occurrence := .N, by = .(position)]
table(logo_data$position)
table(logo_data$mut_pos_occurrence)
max_mut = max(table(logo_data$position))
# extract freq_pos > 1
my_data_snp = logo_data[logo_data$mut_pos_occurrence!=1,]
u = unique(my_data_snp$position)
max_mult_mut = max(table(my_data_snp$position))
if (nrow(my_data_snp) == nrow(logo_data) - table(logo_data$mut_pos_occurrence)[[1]] ){
cat("PASS: positions with multiple muts extracted"
, "\nNo. of mutations:", nrow(my_data_snp)
, "\nNo. of positions:", length(u)
, "\nMax no. of muts at any position", max_mult_mut)
}else{
cat("FAIL: positions with multiple muts could NOT be extracted"
, "\nExpected:",nrow(logo_data) - table(logo_data$mut_pos_occurrence)[[1]]
, "\nGot:", nrow(my_data_snp) )
}
cat("\nNo. of sites with only 1 mutations:", table(logo_data$mut_pos_occurrence)[[1]])
#--------------------------------------
# matrix for_mychisq mutant type
# frequency of mutant type by position
#---------------------------------------
table(my_data_snp$mutant_type, my_data_snp$position)
tab_mt = table(my_data_snp$mutant_type, my_data_snp$position)
class(tab_mt)
# unclass to convert to matrix
tab_mt = unclass(tab_mt)
tab_mt = as.matrix(tab_mt, rownames = T)
# should be TRUE
is.matrix(tab_mt)
rownames(tab_mt) #aa
colnames(tab_mt) #pos
#-------------------------------------
# matrix for wild type
# frequency of wild type by position
#-------------------------------------
tab_wt = table(my_data_snp$wild_type, my_data_snp$position); tab_wt
tab_wt = unclass(tab_wt)
# remove wt duplicates
wt = my_data_snp[, c("position", "wild_type")]
wt = wt[!duplicated(wt),]
tab_wt = table(wt$wild_type, wt$position); tab_wt # should all be 1
rownames(tab_wt)
rownames(tab_wt)
identical(colnames(tab_mt), colnames(tab_wt))
identical(ncol(tab_mt), ncol(tab_wt))
#----------------------------------
# logo data OR: multiple nsSNPs (>1)
#----------------------------------
logo_data_or_mult = my_data_snp[, c("position", "mutant_type", "or_mychisq")]
#wide_df_or <- logo_data_or %>% spread(position, or_mychisq, fill = 0.0)
wide_df_or_mult <- logo_data_or_mult %>% spread(position, or_mychisq, fill = NA)
wide_df_or_mult = as.matrix(wide_df_or_mult)
rownames(wide_df_or_mult) = wide_df_or_mult[,1]
wide_df_or_mult = wide_df_or_mult[,-1]
str(wide_df_or_mult)
position_or_mult = as.numeric(colnames(wide_df_or_mult))
####################################################################
# Data for Corrplots
####################################################################
cat("\n=========================================="
, "\nCORR PLOTS data: PS"
, "\n===========================================")
df_ps = merged_df2
#--------------------
# adding log cols : NEW UNCOMMENT
#--------------------
#df_ps$log10_or_mychisq = log10(df_ps$or_mychisq)
#df_ps$neglog_pval_fisher = -log10(df_ps$pval_fisher)
##df_ps$log10_or_kin = log10(df_ps$or_kin)
##df_ps$neglog_pwald_kin = -log10(df_ps$pwald_kin)
#df_ps$mutation_info_labels = ifelse(df_ps$mutation_info == dr_muts_col, 1, 0)
#----------------------------
# columns for corr plots:PS
#----------------------------
# subset data to generate pairwise correlations
cols_to_select = c("mutationinformation"
, "duet_scaled"
, "foldx_scaled"
#, "mutation_info_labels"
, "asa"
, "rsa"
, "rd_values"
, "kd_values"
, "log10_or_mychisq"
, "neglog_pval_fisher"
##, "or_kin"
##, "neglog_pwald_kin"
, "af"
##, "af_kin"
, "duet_outcome"
, drug)
corr_data_ps = df_ps[cols_to_select]
dim(corr_data_ps)
#--------------------------------------
# assign nice colnames (for display)
#--------------------------------------
my_corr_colnames = c("Mutation"
, "DUET"
, "FoldX"
#, "Mutation class"
, "ASA"
, "RSA"
, "RD"
, "KD"
, "Log (OR)"
, "-Log (P)"
##, "Adjusted (OR)"
##, "-Log (P wald)"
, "MAF"
##, "AF_kin"
, "duet_outcome"
, drug)
length(my_corr_colnames)
colnames(corr_data_ps)
colnames(corr_data_ps) <- my_corr_colnames
colnames(corr_data_ps)
start = 1
end = which(colnames(corr_data_ps) == drug); end # should be the last column
offset = 1
#===========================
# Corr data for plots: PS
# big_df ps: ~ merged_df2
#===========================
#corr_ps_df2 = corr_data_ps[start:(end-offset)] # without drug
corr_ps_df2 = corr_data_ps[start:end]
head(corr_ps_df2)
#===========================
# Corr data for plots: PS
# short_df ps: ~merged_df3
#===========================
corr_ps_df3 = corr_ps_df2[!duplicated(corr_ps_df2$Mutation),]
na_or = sum(is.na(corr_ps_df3$`Log (OR)`))
check1 = nrow(corr_ps_df3) - na_or
##na_adj_or = sum(is.na(corr_ps_df3$`adjusted (OR)`))
##check2 = nrow(corr_ps_df3) - na_adj_or
if (nrow(corr_ps_df3) == nrow(merged_df3) && nrow(merged_df3_comp) == check1) {
cat( "\nPASS: No. of rows for corr_ps_df3 match"
, "\nPASS: No. of OR values checked: " , check1)
} else {
cat("\nFAIL: Numbers mismatch:"
, "\nExpected nrows: ", nrow(merged_df3)
, "\nGot: ", nrow(corr_ps_df3)
, "\nExpected OR values: ", nrow(merged_df3_comp)
, "\nGot: ", check1)
}
rm(foo)
####################################################################
# Data for DM OM Plots: Long format dfs
####################################################################
source("other_plots_data.R")
#source("other_plots_data.R")
source("dm_om_data.R")
s2 = c("\nSuccessfully sourced other_plots_data.R")
cat(s2)
####################################################################
# Data for Lineage barplots: WF and LF dfs
####################################################################
source("lineage_bp_data.R")
source("lineage_data.R")
s3 = c("\nSuccessfully sourced lineage_data.R")
cat(s3)
####################################################################
# Data for corr plots:
####################################################################
# make sure the above script works because merged_df2_combined is needed
source("corr_data.R")
s4 = c("\nSuccessfully sourced corr_data.R")
cat(s4)
########################################################################
# End of script
########################################################################
if ( all( length(s1), length(s2), length(s3), length(s4) ) >0 ){
cat(
"\n##################################################"
, "\nSuccessful: get_plotting_dfs.R worked!"
, "\n###################################################\n")
} else {
cat(
"\n#################################################"
, "\nFAIL: get_plotting_dfs.R didn't complete fully!Please check"
, "\n###################################################\n" )
}
########################################################################
# clear excess variables
rm(c1, c2, c3, c4, check1
, curr_count, curr_total
, cols_check
, cols_to_select
, cols_to_select_deepddg
, cols_to_select_duet
, cols_to_select_dynamut
, cols_to_select_dynamut2
, cols_to_select_encomddg
, cols_to_select_encomdds
, cols_to_select_mcsm
, cols_to_select_mcsm_na
, cols_to_select_sdm
, infile_metadata
, infile_params
#, infilename_dynamut
#, infilename_dynamut2
#, infilename_mcsm_f_snps
#, infilename_mcsm_na
)
cat("\n######################################################\n"
, "\nSuccessful: get_plotting_dfs.R worked!"
, "\n###################################################\n")
rm(pivot_cols
, pivot_cols_deepddg
, pivot_cols_duet
, pivot_cols_dynamut
, pivot_cols_dynamut2
, pivot_cols_encomddg
, pivot_cols_encomdds
, pivot_cols_foldx
, pivot_cols_mcsm
, pivot_cols_mcsm_na
, pivot_cols_n
, pivot_cols_sdm)
rm(expected_cols
, expected_ncols
, expected_rows
, expected_rows_lf
, fact_cols)

View file

@ -4,21 +4,10 @@
# WF and LF data with lineage sample, and snp counts
# sourced by get_plotting_dfs.R
#########################################################
# working dir and loading libraries
# getwd()
# setwd("~/git/LSHTM_analysis/scripts/plotting")
# getwd()
# make cmd
# globals
# drug = "streptomycin"
# gene = "gid"
# source("get_plotting_dfs.R")
#=======================================================================
#################################################
#=================================================
# Get data with lineage count, and snp diversity
#################################################
#=================================================
table(merged_df2$lineage)
if (table(merged_df2$lineage == "")[[2]]) {
@ -30,12 +19,12 @@ cat("\nMissing samples with lineage classification:", table(merged_df2$lineage =
table(merged_df2$lineage_labels)
class(merged_df2$lineage_labels); nlevels(merged_df2$lineage_labels)
##################################
#==========================================
# WF data: lineages with
# snp count
# total_samples
# snp diversity (perc)
##################################
#==========================================
sel_lineages = levels(merged_df2$lineage_labels)
lin_wf = data.frame(sel_lineages) #4, 1
@ -67,9 +56,9 @@ lin_wf
lin_wf$snp_diversity = lin_wf$num_snps_u/lin_wf$total_samples
lin_wf
#=====================
#----------------------
# Add some formatting
#=====================
#----------------------
# SNP diversity
lin_wf$snp_diversity_f = round( (lin_wf$snp_diversity * 100), digits = 0)
lin_wf$snp_diversity_f = paste0(lin_wf$snp_diversity_f, "%")
@ -100,12 +89,12 @@ lin_wf$sel_lineages = factor(lin_wf$sel_lineages, c("L1"
levels(lin_wf$sel_lineages)
##################################
#=================================
# LF data: lineages with
# snp count
# total_samples
# snp diversity (perc)
##################################
#=================================
names(lin_wf)
tot_cols = ncol(lin_wf)
pivot_cols = c("sel_lineages", "snp_diversity", "snp_diversity_f")
@ -153,3 +142,6 @@ lin_lf$sel_lineages = factor(lin_lf$sel_lineages, c("L1"
, ""))
levels(lin_lf$sel_lineages)
################################################################

View file

@ -16,9 +16,9 @@ source("Header_TT.R") # also loads all my functions
#===========
# input
#===========
#drug = "streptomycin"
#gene = "gid"
source("get_plotting_dfs.R")
drug = "streptomycin"
gene = "gid"
#source("get_plotting_dfs.R")
spec = matrix(c(
"drug" , "d", 1, "character",
@ -47,7 +47,7 @@ plot_lineage_dist_dm_om_ps = paste0(plotdir,"/", lineage_dist_dm_om_ps)
###########################
# Data for plots
# you need merged_df2 or merged_df2_comp
# you need merged_df2_combined or merged_df2_combined_comp
# since this is one-many relationship
# i.e the same SNP can belong to multiple lineages
# using the _comp dataset means
@ -59,10 +59,12 @@ plot_lineage_dist_dm_om_ps = paste0(plotdir,"/", lineage_dist_dm_om_ps)
# Data for plots
#===================
# quick checks
table(merged_df2$mutation_info_labels); levels(merged_df2$lineage_labels)
table(merged_df2$lineage_labels); levels(merged_df2$mutation_info_labels)
table(merged_df2_combined$mutation_info_labels); levels(merged_df2_combined$lineage_labels)
table(merged_df2_combined$lineage_labels); levels(merged_df2_combined$mutation_info_labels)
lin_dist_plot = merged_df2[merged_df2$lineage_labels%in%c("L1", "L2", "L3", "L4"),]
sel_lineages = c("L1", "L2", "L3", "L4")
lin_dist_plot = merged_df2_combined[merged_df2_combined$lineage_labels%in%sel_lineages,]
table(lin_dist_plot$lineage_labels); nlevels(lin_dist_plot$lineage_labels)
# refactor
@ -79,29 +81,55 @@ table(lin_dist_plot$lineage_labels)#{RESULT: No of samples within lineage}
length(unique(lin_dist_plot$mutationinformation))#{Result: No. of unique mutations selected lineages contribute to}
length(lin_dist_plot$mutationinformation)
u2 = unique(merged_df2$mutationinformation)
u2 = unique(merged_df2_combined$mutationinformation)
u = unique(lin_dist_plot$mutationinformation)
check = u2[!u2%in%u]; print(check) #{Muts not present within selected lineages}
#-----------------------------------------------------------------------
# without facet
my_x_and_t = c("duet_scaled", "mCSM-DUET")
my_x_and_t = c("foldx_scaled", "FoldX")
#my_x_and_t = c("deepddg_scaled", "DeepDDG")
my_x_and_t = c("ddg_dynamut2_scaled", "Dynamut2")
my_x_and_t = c("ddg_dynamut_scaled", "Dynamut")
my_x_and_t = c("ddg_mcsm_scaled", "mCSM")
my_x_and_t = c("ddg_sdm_scaled", "SDM")
my_x_and_t = c("ddg_duet_scaled", "DUET-d")
my_x_and_t = c("ddg_encom_scaled", "EnCOM-Stability")
my_x_and_t = c("dds_encom_scaled", "EnCOM-Flexibility")
my_x_and_t = c("mcsm_na_scaled", "mCSM-NA")
# TO DO
my_x_and_t = c("affinity_scaled", "mCSM-Lig") #ligdist< 10
#=====================
# Plot: without facet
#=====================
linP_dm_om = lineage_distP(lin_dist_plot
, with_facet = F
, x_axis = "deepddg"
, x_axis = my_x_and_t[1]
, x_lab = my_x_and_t[2]
, y_axis = "lineage_labels"
, x_lab = "DeepDDG"
, leg_label = "Mutation Class"
)
, with_facet = F)
linP_dm_om
# with facet
#=====================
# Plot: with facet
#=====================
linP_dm_om_facet = lineage_distP(lin_dist_plot
, with_facet = T
, facet_wrap_var = "mutation_info_labels"
, leg_label = "Mutation Class"
, leg_pos_wf = "none"
, leg_dir_wf = "horizontal"
)
, x_axis = my_x_and_t[1]
, x_lab = my_x_and_t[2]
, y_axis = "lineage_labels"
, with_facet = T
, facet_wrap_var = "mutation_info_labels"
, leg_label = "Mutation Class"
, leg_pos_wf = "none"
, leg_dir_wf = "horizontal")
linP_dm_om_facet
#=================
@ -109,6 +137,7 @@ linP_dm_om_facet
# without facet
#=================
svg(plot_lineage_dist_dm_om_ps)
linP_dm_om
dev.off()

View file

@ -1,538 +0,0 @@
#!/usr/bin/env Rscript
#########################################################
# TASK: Script to format data for dm om plots:
# generating LF data
# sourced by get_plotting_dfs.R
#########################################################
# working dir and loading libraries
# getwd()
# setwd("~/git/LSHTM_analysis/scripts/plotting")
# getwd()
# make cmd
# globals
# drug = "streptomycin"
# gene = "gid"
# source("get_plotting_dfs.R")
#=======================================================================
# MOVE TO COMBINE or singular file for deepddg
#
# cols_to_select = c("mutation", "mutationinformation"
# , "wild_type", "position", "mutant_type"
# , "mutation_info")
#
# merged_df3_short = merged_df3[, cols_to_select]
# infilename_mcsm_f_snps <- paste0("~/git/Data/", drug, "/output/", gene
# , "_mcsm_formatted_snps.csv")
#
# mcsm_f_snps<- read.csv(infilename_mcsm_f_snps, header = F)
# names(mcsm_f_snps) <- "mutationinformation"
# write merged_df3 to generate structural figure on chimera
#write.csv(merged_df3_short, "merged_df3_short.csv")
#========================================================================
# MOVE TO COMBINE or singular file for deepddg
#============================
# adding deepddg scaled values
# scale data b/w -1 and 1
#============================
n = which(colnames(merged_df3) == "deepddg"); n
my_min = min(merged_df3[,n]); my_min
my_max = max(merged_df3[,n]); my_max
merged_df3$deepddg_scaled = ifelse(merged_df3[,n] < 0
, merged_df3[,n]/abs(my_min)
, merged_df3[,n]/my_max)
# sanity check
my_min = min(merged_df3$deepddg_scaled); my_min
my_max = max(merged_df3$deepddg_scaled); my_max
if (my_min == -1 && my_max == 1){
cat("\nPASS: DeepDDG successfully scaled b/w -1 and 1"
#, "\nProceeding with assigning deep outcome category")
, "\n")
}else{
cat("\nFAIL: could not scale DeepDDG ddg values"
, "Aborting!")
}
#========================================================================
# cols to select
cols_mcsm_df <- merged_df3[, c("mutationinformation", "mutation"
, "mutation_info", "position"
, LigDist_colname
, "duet_stability_change", "duet_scaled", "duet_outcome"
, "ligand_affinity_change", "affinity_scaled", "ligand_outcome"
, "ddg_foldx", "foldx_scaled", "foldx_outcome"
, "deepddg", "deepddg_scaled", "deepddg_outcome"
, "asa", "rsa"
, "rd_values", "kd_values"
, "log10_or_mychisq", "neglog_pval_fisher", "af")]
cols_mcsm_na_df <- mcsm_na_df[, c("mutationinformation"
, "mcsm_na_affinity", "mcsm_na_scaled"
, "mcsm_na_outcome")]
# entire dynamut_df
cols_dynamut2_df <- dynamut2_df[, c("mutationinformation"
, "ddg_dynamut2", "ddg_dynamut2_scaled"
, "ddg_dynamut2_outcome")]
n_comb_cols = length(cols_mcsm_df) + length(cols_mcsm_na_df) +
length(dynamut_df) + length(cols_dynamut2_df); n_comb_cols
i1<- intersect(names(cols_mcsm_df), names(cols_mcsm_na_df))
i2<- intersect(names(dynamut_df), names(cols_dynamut2_df))
merging_cols <- intersect(i1, i2)
cat("\nmerging_cols:", merging_cols)
if (merging_cols == "mutationinformation") {
cat("\nStage 1: Found common col between dfs, checking values in it...")
c1 <- all(mcsm_f_snps[[merging_cols]]%in%cols_mcsm_df[[merging_cols]])
c2 <- all(mcsm_f_snps[[merging_cols]]%in%cols_mcsm_na_df[[merging_cols]])
c3 <- all(mcsm_f_snps[[merging_cols]]%in%dynamut_df[[merging_cols]])
c4 <- all(mcsm_f_snps[[merging_cols]]%in%cols_dynamut2_df[[merging_cols]])
cols_check <- c(c1, c2, c3, c4)
expected_cols = n_comb_cols - ( length(cols_check) - 1)
if (all(cols_check)){
cat("\nStage 2: Proceeding with merging dfs:\n")
comb_df <- Reduce(inner_join, list(cols_mcsm_df
, cols_mcsm_na_df
, dynamut_df
, cols_dynamut2_df))
comb_df_s = arrange(comb_df, position)
# if ( nrow(comb_df_s) == nrow(mcsm_f_snps) && ncol(comb_df_s) == expected_cols) {
# cat("\Stage3, PASS: dfs merged sucessfully"
# , "\nnrow of merged_df: ", nrow(comb_df_s)
# , "\nncol of merged_df:", ncol(comb_df_s))
# }
}
}
#names(comb_df_s)
cat("\n!!!IT GOT TO HERE!!!!")
#=======================================================================
fact_cols = colnames(comb_df_s)[grepl( "_outcome|_info", colnames(comb_df_s) )]
fact_cols
lapply(comb_df_s[, fact_cols], class)
comb_df_s[, fact_cols] <- lapply(comb_df_s[, fact_cols], as.factor)
if (any(lapply(comb_df_s[, fact_cols], class) == "character")){
cat("\nChanging cols to factor")
comb_df_s[, fact_cols] <- lapply(comb_df_s[, fact_cols],as.factor)
if (all(lapply(comb_df_s[, fact_cols], class) == "factor")){
cat("\nSuccessful: cols changed to factor")
}
}
lapply(comb_df_s[, fact_cols], class)
#=======================================================================
table(comb_df_s$mutation_info)
# further checks to make sure dr and other muts are indeed unique
dr_muts = comb_df_s[comb_df_s$mutation_info == dr_muts_col,]
dr_muts_names = unique(dr_muts$mutation)
other_muts = comb_df_s[comb_df_s$mutation_info == other_muts_col,]
other_muts_names = unique(other_muts$mutation)
if ( table(dr_muts_names%in%other_muts_names)[[1]] == length(dr_muts_names) &&
table(other_muts_names%in%dr_muts_names)[[1]] == length(other_muts_names) ){
cat("PASS: dr and other muts are indeed unique")
}else{
cat("FAIL: dr and others muts are NOT unique!")
quit()
}
# pretty display names i.e. labels to reduce major code duplication later
foo_cnames = data.frame(colnames(comb_df_s))
names(foo_cnames) <- "old_name"
stability_suffix <- paste0(delta_symbol, delta_symbol, "G")
flexibility_suffix <- paste0(delta_symbol, delta_symbol, "S")
lig_dn = paste0("Ligand distance (", angstroms_symbol, ")"); lig_dn
duet_dn = paste0("DUET ", stability_suffix); duet_dn
foldx_dn = paste0("FoldX ", stability_suffix); foldx_dn
deepddg_dn = paste0("Deepddg " , stability_suffix); deepddg_dn
mcsm_na_dn = paste0("mCSM-NA affinity ", stability_suffix); mcsm_na_dn
dynamut_dn = paste0("Dynamut ", stability_suffix); dynamut_dn
dynamut2_dn = paste0("Dynamut2 " , stability_suffix); dynamut2_dn
encom_ddg_dn = paste0("EnCOM " , stability_suffix); encom_ddg_dn
encom_dds_dn = paste0("EnCOM " , flexibility_suffix ); encom_dds_dn
sdm_dn = paste0("SDM " , stability_suffix); sdm_dn
mcsm_dn = paste0("mCSM " , stability_suffix ); mcsm_dn
# Change colnames of some columns using datatable
comb_df_sl = comb_df_s
names(comb_df_sl)
setnames(comb_df_sl
, old = c("asa", "rsa", "rd_values", "kd_values"
, "log10_or_mychisq", "neglog_pval_fisher", "af"
, LigDist_colname
, "duet_scaled"
, "foldx_scaled"
, "deepddg_scaled"
, "mcsm_na_scaled"
, "ddg_dynamut_scaled"
, "ddg_dynamut2_scaled"
, "ddg_encom_scaled"
, "dds_encom_scaled"
, "ddg_sdm"
, "ddg_mcsm")
, new = c("ASA", "RSA", "RD", "KD"
, "Log10 (OR)", "-Log (P)", "MAF"
, lig_dn
, duet_dn
, foldx_dn
, deepddg_dn
, mcsm_na_dn
, dynamut_dn
, dynamut2_dn
, encom_ddg_dn
, encom_dds_dn
, sdm_dn
, mcsm_dn)
)
foo_cnames <- cbind(foo_cnames, colnames(comb_df_sl))
# some more pretty labels
table(comb_df_sl$mutation_info)
levels(comb_df_sl$mutation_info)[levels(comb_df_sl$mutation_info)==dr_muts_col] <- "DM"
levels(comb_df_sl$mutation_info)[levels(comb_df_sl$mutation_info)==other_muts_col] <- "OM"
table(comb_df_sl$mutation_info)
#######################################################################
#======================
# Selecting dfs
# with appropriate cols
#=======================
static_cols_start = c("mutationinformation"
, "position"
, "mutation"
, "mutation_info")
static_cols_end = c(lig_dn
, "ASA"
, "RSA"
, "RD"
, "KD")
# ordering is important!
#########################################################################
#==============
# DUET: LF
#==============
cols_to_select_duet = c(static_cols_start, c("duet_outcome", duet_dn), static_cols_end)
wf_duet = comb_df_sl[, cols_to_select_duet]
#pivot_cols_ps = cols_to_select_ps[1:5]; pivot_cols_ps
pivot_cols_duet = cols_to_select_duet[1: (length(static_cols_start) + 1)]; pivot_cols_duet
expected_rows_lf = nrow(wf_duet) * (length(wf_duet) - length(pivot_cols_duet))
expected_rows_lf
# LF data: duet
lf_duet = gather(wf_duet
, key = param_type
, value = param_value
, all_of(duet_dn):tail(static_cols_end,1)
, factor_key = TRUE)
if (nrow(lf_duet) == expected_rows_lf){
cat("\nPASS: long format data created for ", duet_dn)
}else{
cat("\nFAIL: long format data could not be created for duet")
quit()
}
############################################################################
#==============
# FoldX: LF
#==============
cols_to_select_foldx= c(static_cols_start, c("foldx_outcome", foldx_dn), static_cols_end)
wf_foldx = comb_df_sl[, cols_to_select_foldx]
pivot_cols_foldx = cols_to_select_foldx[1: (length(static_cols_start) + 1)]; pivot_cols_foldx
expected_rows_lf = nrow(wf_foldx) * (length(wf_foldx) - length(pivot_cols_foldx))
expected_rows_lf
# LF data: duet
print("TESTXXXXXXXXXXXXXXXXXXXXX---------------------->>>>")
lf_foldx <<- gather(wf_foldx
, key = param_type
, value = param_value
, all_of(foldx_dn):tail(static_cols_end,1)
, factor_key = TRUE)
if (nrow(lf_foldx) == expected_rows_lf){
cat("\nPASS: long format data created for ", foldx_dn)
}else{
cat("\nFAIL: long format data could not be created for duet")
quit()
}
############################################################################
#==============
# Deepddg: LF
#==============
cols_to_select_deepddg = c(static_cols_start, c("deepddg_outcome", deepddg_dn), static_cols_end)
wf_deepddg = comb_df_sl[, cols_to_select_deepddg]
pivot_cols_deepddg = cols_to_select_deepddg[1: (length(static_cols_start) + 1)]; pivot_cols_deepddg
expected_rows_lf = nrow(wf_deepddg) * (length(wf_deepddg) - length(pivot_cols_deepddg))
expected_rows_lf
# LF data: duet
lf_deepddg = gather(wf_deepddg
, key = param_type
, value = param_value
, all_of(deepddg_dn):tail(static_cols_end,1)
, factor_key = TRUE)
if (nrow(lf_deepddg) == expected_rows_lf){
cat("\nPASS: long format data created for ", deepddg_dn)
}else{
cat("\nFAIL: long format data could not be created for duet")
quit()
}
############################################################################
#==============
# mCSM-NA: LF
#==============
cols_to_select_mcsm_na = c(static_cols_start, c("mcsm_na_outcome", mcsm_na_dn), static_cols_end)
wf_mcsm_na = comb_df_sl[, cols_to_select_mcsm_na]
pivot_cols_mcsm_na = cols_to_select_mcsm_na[1: (length(static_cols_start) + 1)]; pivot_cols_mcsm_na
expected_rows_lf = nrow(wf_mcsm_na) * (length(wf_mcsm_na) - length(pivot_cols_mcsm_na))
expected_rows_lf
# LF data: duet
lf_mcsm_na = gather(wf_mcsm_na
, key = param_type
, value = param_value
, all_of(mcsm_na_dn):tail(static_cols_end,1)
, factor_key = TRUE)
if (nrow(lf_mcsm_na) == expected_rows_lf){
cat("\nPASS: long format data created for ", mcsm_na_dn)
}else{
cat("\nFAIL: long format data could not be created for duet")
quit()
}
############################################################################
#==============
# Dynamut: LF
#==============
cols_to_select_dynamut = c(static_cols_start, c("ddg_dynamut_outcome", dynamut_dn), static_cols_end)
wf_dynamut = comb_df_sl[, cols_to_select_dynamut]
pivot_cols_dynamut = cols_to_select_dynamut[1: (length(static_cols_start) + 1)]; pivot_cols_dynamut
expected_rows_lf = nrow(wf_dynamut) * (length(wf_dynamut) - length(pivot_cols_dynamut))
expected_rows_lf
# LF data: duet
lf_dynamut = gather(wf_dynamut
, key = param_type
, value = param_value
, all_of(dynamut_dn):tail(static_cols_end,1)
, factor_key = TRUE)
if (nrow(lf_dynamut) == expected_rows_lf){
cat("\nPASS: long format data created for ", dynamut_dn)
}else{
cat("\nFAIL: long format data could not be created for duet")
quit()
}
############################################################################
#==============
# Dynamut2: LF
#==============
cols_to_select_dynamut2 = c(static_cols_start, c("ddg_dynamut2_outcome", dynamut2_dn), static_cols_end)
wf_dynamut2 = comb_df_sl[, cols_to_select_dynamut2]
pivot_cols_dynamut2 = cols_to_select_dynamut2[1: (length(static_cols_start) + 1)]; pivot_cols_dynamut2
expected_rows_lf = nrow(wf_dynamut2) * (length(wf_dynamut2) - length(pivot_cols_dynamut2))
expected_rows_lf
# LF data: duet
lf_dynamut2 = gather(wf_dynamut2
, key = param_type
, value = param_value
, all_of(dynamut2_dn):tail(static_cols_end,1)
, factor_key = TRUE)
if (nrow(lf_dynamut2) == expected_rows_lf){
cat("\nPASS: long format data created for ", dynamut2_dn)
}else{
cat("\nFAIL: long format data could not be created for duet")
quit()
}
############################################################################
#==============
# EnCOM ddg: LF
#==============
cols_to_select_encomddg = c(static_cols_start, c("ddg_encom_outcome", encom_ddg_dn), static_cols_end)
wf_encomddg = comb_df_sl[, cols_to_select_encomddg]
pivot_cols_encomddg = cols_to_select_encomddg[1: (length(static_cols_start) + 1)]; pivot_cols_encomddg
expected_rows_lf = nrow(wf_encomddg ) * (length(wf_encomddg ) - length(pivot_cols_encomddg))
expected_rows_lf
# LF data: encomddg
lf_encomddg = gather(wf_encomddg
, key = param_type
, value = param_value
, all_of(encom_ddg_dn):tail(static_cols_end,1)
, factor_key = TRUE)
if (nrow(lf_encomddg) == expected_rows_lf){
cat("\nPASS: long format data created for ", encom_ddg_dn)
}else{
cat("\nFAIL: long format data could not be created for duet")
quit()
}
############################################################################
#==============
# EnCOM dds: LF
#==============
cols_to_select_encomdds = c(static_cols_start, c("dds_encom_outcome", encom_dds_dn), static_cols_end)
wf_encomdds = comb_df_sl[, cols_to_select_encomdds]
pivot_cols_encomdds = cols_to_select_encomdds[1: (length(static_cols_start) + 1)]; pivot_cols_encomdds
expected_rows_lf = nrow(wf_encomdds) * (length(wf_encomdds) - length(pivot_cols_encomdds))
expected_rows_lf
# LF data: encomddg
lf_encomdds = gather(wf_encomdds
, key = param_type
, value = param_value
, all_of(encom_dds_dn):tail(static_cols_end,1)
, factor_key = TRUE)
if (nrow(lf_encomdds) == expected_rows_lf){
cat("\nPASS: long format data created for", encom_dds_dn)
}else{
cat("\nFAIL: long format data could not be created for duet")
quit()
}
############################################################################
#==============
# SDM: LF
#==============
cols_to_select_sdm = c(static_cols_start, c("ddg_sdm_outcome", sdm_dn), static_cols_end)
wf_sdm = comb_df_sl[, cols_to_select_sdm]
pivot_cols_sdm = cols_to_select_sdm[1: (length(static_cols_start) + 1)]; pivot_cols_sdm
expected_rows_lf = nrow(wf_sdm) * (length(wf_sdm) - length(pivot_cols_sdm))
expected_rows_lf
# LF data: encomddg
lf_sdm = gather(wf_sdm
, key = param_type
, value = param_value
, all_of(sdm_dn):tail(static_cols_end,1)
, factor_key = TRUE)
if (nrow(lf_sdm) == expected_rows_lf){
cat("\nPASS: long format data created for", sdm_dn)
}else{
cat("\nFAIL: long format data could not be created for duet")
quit()
}
############################################################################
#==============
# mCSM: LF
#==============
cols_to_select_mcsm = c(static_cols_start, c("ddg_mcsm_outcome", mcsm_dn), static_cols_end)
wf_mcsm = comb_df_sl[, cols_to_select_mcsm]
pivot_cols_mcsm = cols_to_select_mcsm[1: (length(static_cols_start) + 1)]; pivot_cols_mcsm
expected_rows_lf = nrow(wf_mcsm) * (length(wf_mcsm) - length(pivot_cols_mcsm))
expected_rows_lf
# LF data: encomddg
lf_mcsm = gather(wf_mcsm
, key = param_type
, value = param_value
, all_of(mcsm_dn):tail(static_cols_end,1)
, factor_key = TRUE)
if (nrow(lf_mcsm) == expected_rows_lf){
cat("\nPASS: long format data created for", mcsm_dn)
}else{
cat("\nFAIL: long format data could not be created for duet")
quit()
}
############################################################################
# clear excess variables
rm(all_plot_dfs
, cols_dynamut2_df
, cols_mcsm_df
, cols_mcsm_na_df
, comb_df
, corr_data_ps
, corr_ps_df3
, df_lf_ps
, foo
, foo_cnames
, gene_metadata
, logo_data
, logo_data_or_mult
, logo_data_plot
, logo_data_plot_logor
, logo_data_plot_or
, my_data_snp
, my_df
, my_df_u
, other_muts
, pd_df
, subcols_df_ps
, tab_mt
, wide_df_logor
, wide_df_logor_m
, wide_df_or
, wide_df_or_mult
, wt)
rm(c3, c4, check1
, cols_check
, cols_to_select
, cols_to_select_deepddg
, cols_to_select_duet
, cols_to_select_dynamut
, cols_to_select_dynamut2
, cols_to_select_encomddg
, cols_to_select_encomdds
, cols_to_select_mcsm
, cols_to_select_mcsm_na
, cols_to_select_sdm)