changed df to adults df to extract relevant info
This commit is contained in:
parent
9e5b202f5d
commit
bbdd2d12e5
5 changed files with 78 additions and 1007 deletions
|
@ -3,7 +3,7 @@ getwd()
|
||||||
setwd('~/git/mosaic_2020/')
|
setwd('~/git/mosaic_2020/')
|
||||||
getwd()
|
getwd()
|
||||||
########################################################################
|
########################################################################
|
||||||
# TASK: Extract relevant columns from mosaic data
|
# TASK: Extract relevant columns from mosaic adults data
|
||||||
# sam
|
# sam
|
||||||
# serum
|
# serum
|
||||||
# npa
|
# npa
|
||||||
|
@ -14,17 +14,17 @@ getwd()
|
||||||
source("read_data.R")
|
source("read_data.R")
|
||||||
|
|
||||||
# clear unnecessary variables
|
# clear unnecessary variables
|
||||||
#rm()
|
rm(all_df)
|
||||||
########################################################################
|
########################################################################
|
||||||
|
|
||||||
#=========
|
#=========
|
||||||
# sam
|
# sam
|
||||||
#=========
|
#=========
|
||||||
sam_regex = regex(".*_sam[1-3]{1}$", ignore_case = T)
|
sam_regex = regex(".*_sam[1-3]{1}$", ignore_case = T)
|
||||||
sam_cols_i = str_extract(colnames(all_df), sam_regex) # not boolean
|
sam_cols_i = str_extract(colnames(adult_df), sam_regex) # not boolean
|
||||||
#sam_cols_b = colnames(all_df)%in%sam_cols_i # boolean
|
#sam_cols_b = colnames(adult_df)%in%sam_cols_i # boolean
|
||||||
|
|
||||||
sam_cols = colnames(all_df)[colnames(all_df)%in%sam_cols_i]
|
sam_cols = colnames(adult_df)[colnames(adult_df)%in%sam_cols_i]
|
||||||
|
|
||||||
# this contains log columns + daysamp_samXX: omitting these
|
# this contains log columns + daysamp_samXX: omitting these
|
||||||
sam_regex_log_days = regex("log|day.*_sam[1-3]{1}$", ignore_case = T, perl = T)
|
sam_regex_log_days = regex("log|day.*_sam[1-3]{1}$", ignore_case = T, perl = T)
|
||||||
|
@ -48,7 +48,7 @@ cat("Extracting SAM cols + metadata_cols")
|
||||||
|
|
||||||
if ( length(sam_cols_to_extract) == length(meta_data_cols) + length(sam_cols_clean) ){
|
if ( length(sam_cols_to_extract) == length(meta_data_cols) + length(sam_cols_clean) ){
|
||||||
cat("Extracing", length(sam_cols_to_extract), "columns for sam")
|
cat("Extracing", length(sam_cols_to_extract), "columns for sam")
|
||||||
sam_df = all_df[, sam_cols_to_extract]
|
sam_df = adult_df[, sam_cols_to_extract]
|
||||||
}else{
|
}else{
|
||||||
cat("FAIL: length mismatch"
|
cat("FAIL: length mismatch"
|
||||||
, "Expeceted to extract:", length(meta_data_cols) + length(sam_cols_clean), "columns"
|
, "Expeceted to extract:", length(meta_data_cols) + length(sam_cols_clean), "columns"
|
||||||
|
@ -61,10 +61,10 @@ colnames_sam_df = colnames(sam_df); colnames_sam_df
|
||||||
# serum
|
# serum
|
||||||
#=========
|
#=========
|
||||||
serum_regex = regex(".*_serum[1-3]{1}$", ignore_case = T)
|
serum_regex = regex(".*_serum[1-3]{1}$", ignore_case = T)
|
||||||
serum_cols_i = str_extract(colnames(all_df), serum_regex) # not boolean
|
serum_cols_i = str_extract(colnames(adult_df), serum_regex) # not boolean
|
||||||
#serum_cols_b = colnames(all_df)%in%serum_cols_i # boolean
|
#serum_cols_b = colnames(adult_df)%in%serum_cols_i # boolean
|
||||||
|
|
||||||
serum_cols = colnames(all_df)[colnames(all_df)%in%serum_cols_i]
|
serum_cols = colnames(adult_df)[colnames(adult_df)%in%serum_cols_i]
|
||||||
|
|
||||||
# this contains log columns + dayserump_serumXX: omitting these
|
# this contains log columns + dayserump_serumXX: omitting these
|
||||||
serum_regex_log_days = regex("log|day.*_serum[1-3]{1}$", ignore_case = T, perl = T)
|
serum_regex_log_days = regex("log|day.*_serum[1-3]{1}$", ignore_case = T, perl = T)
|
||||||
|
@ -88,7 +88,7 @@ cat("Extracting SERUM cols + metadata_cols")
|
||||||
|
|
||||||
if ( length(serum_cols_to_extract) == length(meta_data_cols) + length(serum_cols_clean) ){
|
if ( length(serum_cols_to_extract) == length(meta_data_cols) + length(serum_cols_clean) ){
|
||||||
cat("Extracing", length(serum_cols_to_extract), "columns for serum")
|
cat("Extracing", length(serum_cols_to_extract), "columns for serum")
|
||||||
serum_df = all_df[, serum_cols_to_extract]
|
serum_df = adult_df[, serum_cols_to_extract]
|
||||||
}else{
|
}else{
|
||||||
cat("FAIL: length mismatch"
|
cat("FAIL: length mismatch"
|
||||||
, "Expeceted to extract:", length(meta_data_cols) + length(serum_cols_clean), "columns"
|
, "Expeceted to extract:", length(meta_data_cols) + length(serum_cols_clean), "columns"
|
||||||
|
@ -101,10 +101,10 @@ colnames_serum_df = colnames(serum_df); colnames_serum_df
|
||||||
# npa
|
# npa
|
||||||
#=========
|
#=========
|
||||||
npa_regex = regex(".*_npa[1-3]{1}$", ignore_case = T)
|
npa_regex = regex(".*_npa[1-3]{1}$", ignore_case = T)
|
||||||
npa_cols_i = str_extract(colnames(all_df), npa_regex) # not boolean
|
npa_cols_i = str_extract(colnames(adult_df), npa_regex) # not boolean
|
||||||
#npa_cols_b = colnames(all_df)%in%npa_cols_i # boolean
|
#npa_cols_b = colnames(adult_df)%in%npa_cols_i # boolean
|
||||||
|
|
||||||
npa_cols = colnames(all_df)[colnames(all_df)%in%npa_cols_i]
|
npa_cols = colnames(adult_df)[colnames(adult_df)%in%npa_cols_i]
|
||||||
|
|
||||||
# this contains log columns + daynpap_npaXX: omitting these
|
# this contains log columns + daynpap_npaXX: omitting these
|
||||||
npa_regex_log_days = regex("log|day|vl_samptime|ct.*_npa[1-3]{1}$", ignore_case = T, perl = T)
|
npa_regex_log_days = regex("log|day|vl_samptime|ct.*_npa[1-3]{1}$", ignore_case = T, perl = T)
|
||||||
|
@ -128,7 +128,7 @@ cat("Extracting NPA cols + metadata_cols")
|
||||||
|
|
||||||
if ( length(npa_cols_to_extract) == length(meta_data_cols) + length(npa_cols_clean) ){
|
if ( length(npa_cols_to_extract) == length(meta_data_cols) + length(npa_cols_clean) ){
|
||||||
cat("Extracing", length(npa_cols_to_extract), "columns for npa")
|
cat("Extracing", length(npa_cols_to_extract), "columns for npa")
|
||||||
npa_df = all_df[, npa_cols_to_extract]
|
npa_df = adult_df[, npa_cols_to_extract]
|
||||||
}else{
|
}else{
|
||||||
cat("FAIL: length mismatch"
|
cat("FAIL: length mismatch"
|
||||||
, "Expeceted to extract:", length(meta_data_cols) + length(npa_cols_clean), "columns"
|
, "Expeceted to extract:", length(meta_data_cols) + length(npa_cols_clean), "columns"
|
||||||
|
@ -137,8 +137,11 @@ if ( length(npa_cols_to_extract) == length(meta_data_cols) + length(npa_cols_cle
|
||||||
|
|
||||||
colnames_npa_df = colnames(npa_df); colnames_npa_df
|
colnames_npa_df = colnames(npa_df); colnames_npa_df
|
||||||
|
|
||||||
|
#==============
|
||||||
|
# quick checks
|
||||||
|
#==============
|
||||||
colnames_check = as.data.frame(cbind(colnames_sam_df, colnames_serum_df, colnames_npa_df))
|
colnames_check = as.data.frame(cbind(colnames_sam_df, colnames_serum_df, colnames_npa_df))
|
||||||
tail(colnames_check)
|
tail(colnames_check) # gives a warning message due to differeing no. of rows for cbind!
|
||||||
|
|
||||||
# put NA where a match doesn't exist
|
# put NA where a match doesn't exist
|
||||||
# unmatched lengths
|
# unmatched lengths
|
||||||
|
@ -168,12 +171,16 @@ quick_check = as.data.frame(cbind(metadata_all$mosaic
|
||||||
, metadata_all$adult
|
, metadata_all$adult
|
||||||
, metadata_all$age
|
, metadata_all$age
|
||||||
, metadata_all$obesity
|
, metadata_all$obesity
|
||||||
, metadata_all$obese2))
|
, metadata_all$obese2
|
||||||
|
))
|
||||||
colnames(quick_check) = c("mosaic", "adult", "age", "obesity", "obese2")
|
colnames(quick_check) = c("mosaic", "adult", "age", "obesity", "obese2")
|
||||||
|
|
||||||
##########################################################################
|
##########################################################################
|
||||||
# LF data
|
# LF data
|
||||||
##########################################################################
|
##########################################################################
|
||||||
|
cols_to_omit = c("adult", "obese2"
|
||||||
|
, "height", "height_unit", "weight"
|
||||||
|
, "weight_unit", "visual_est_bmi", "bmi_rating")
|
||||||
|
|
||||||
#==============
|
#==============
|
||||||
# lf data: sam
|
# lf data: sam
|
||||||
|
@ -181,19 +188,11 @@ colnames(quick_check) = c("mosaic", "adult", "age", "obesity", "obese2")
|
||||||
str(sam_df)
|
str(sam_df)
|
||||||
table(sam_df$obesity); table(sam_df$obese2)
|
table(sam_df$obesity); table(sam_df$obese2)
|
||||||
|
|
||||||
sam_df_adults = sam_df[sam_df$adult == 1,]
|
#sam_df_adults = sam_df[sam_df$adult == 1,] # resolved at source and only dealing wit age as adult
|
||||||
|
sam_df_adults = sam_df
|
||||||
cols_to_omit = c("type"
|
|
||||||
#, "flustat"
|
|
||||||
#, "obesity"
|
|
||||||
#, "obese2"
|
|
||||||
, "height", "height_unit", "weight"
|
|
||||||
, "weight_unit", "visual_est_bmi", "bmi_rating")
|
|
||||||
|
|
||||||
#sam_df_adults_clean = sam_df_adults[!cols_to_omit]
|
|
||||||
|
|
||||||
wf_cols = colnames(sam_df_adults)[!colnames(sam_df_adults)%in%cols_to_omit]
|
wf_cols = colnames(sam_df_adults)[!colnames(sam_df_adults)%in%cols_to_omit]
|
||||||
sam_df_adults_clean = sam_df_adults[wf_cols]
|
sam_wf = sam_df_adults[wf_cols]
|
||||||
|
|
||||||
pivot_cols = meta_data_cols
|
pivot_cols = meta_data_cols
|
||||||
# subselect pivot_cols
|
# subselect pivot_cols
|
||||||
|
@ -208,25 +207,25 @@ if (length(pivot_cols) == length(meta_data_cols) - length(cols_to_omit)){
|
||||||
quit()
|
quit()
|
||||||
}
|
}
|
||||||
|
|
||||||
expected_rows_sam_lf = nrow(sam_df_adults_clean) * (length(sam_df_adults_clean) - length(pivot_cols)); expected_rows_sam_lf
|
expected_rows_sam_lf = nrow(sam_wf) * (length(sam_wf) - length(pivot_cols)); expected_rows_sam_lf
|
||||||
|
|
||||||
# using regex:
|
# using regex:
|
||||||
sam_adults_lf = sam_df_adults_clean %>%
|
sam_lf = sam_wf %>%
|
||||||
tidyr::pivot_longer(-all_of(pivot_cols)
|
tidyr::pivot_longer(-all_of(pivot_cols)
|
||||||
, names_to = c("mediator", "sample_type", "timepoint")
|
, names_to = c("mediator", "sample_type", "timepoint")
|
||||||
, names_pattern = "(.*)_(.*)([1-3]{1})"
|
, names_pattern = "(.*)_(.*)([1-3]{1})"
|
||||||
, values_to = "value")
|
, values_to = "value")
|
||||||
|
|
||||||
if (
|
if (
|
||||||
(nrow(sam_adults_lf) == expected_rows_sam_lf) & (sum(table(is.na(sam_adults_lf$mediator))) == expected_rows_sam_lf)
|
(nrow(sam_lf) == expected_rows_sam_lf) & (sum(table(is.na(sam_lf$mediator))) == expected_rows_sam_lf)
|
||||||
) {
|
) {
|
||||||
cat(paste0("PASS: long format data has correct no. of rows and NA in mediator:"
|
cat(paste0("PASS: long format data has correct no. of rows and NA in mediator:"
|
||||||
, "\nNo. of rows: ", nrow(sam_adults_lf)
|
, "\nNo. of rows: ", nrow(sam_lf)
|
||||||
, "\nNo. of cols: ", ncol(sam_adults_lf)))
|
, "\nNo. of cols: ", ncol(sam_lf)))
|
||||||
} else{
|
} else{
|
||||||
cat(paste0("FAIL:long format data has unexpected no. of rows or NAs in mediator"
|
cat(paste0("FAIL:long format data has unexpected no. of rows or NAs in mediator"
|
||||||
, "\nExpected no. of rows: ", expected_rows_sam_lf
|
, "\nExpected no. of rows: ", expected_rows_sam_lf
|
||||||
, "\nGot: ", nrow(sam_adults_lf)
|
, "\nGot: ", nrow(sam_lf)
|
||||||
, "\ncheck expected rows calculation!"))
|
, "\ncheck expected rows calculation!"))
|
||||||
quit()
|
quit()
|
||||||
}
|
}
|
||||||
|
@ -241,11 +240,11 @@ if (
|
||||||
str(serum_df)
|
str(serum_df)
|
||||||
table(serum_df$obesity); table(serum_df$obese2)
|
table(serum_df$obesity); table(serum_df$obese2)
|
||||||
|
|
||||||
serum_df_adults = serum_df[serum_df$adult == 1,]
|
#serum_df_adults = serum_df[serum_df$adult == 1,] # extract based on age
|
||||||
|
serum_df_adults = serum_df
|
||||||
|
|
||||||
#serum_df_adults_clean = serum_df_adults[!cols_to_omit]
|
|
||||||
wf_cols = colnames(serum_df_adults)[!colnames(serum_df_adults)%in%cols_to_omit]
|
wf_cols = colnames(serum_df_adults)[!colnames(serum_df_adults)%in%cols_to_omit]
|
||||||
serum_df_adults_clean = serum_df_adults[wf_cols]
|
serum_wf = serum_df_adults[wf_cols]
|
||||||
|
|
||||||
pivot_cols = meta_data_cols
|
pivot_cols = meta_data_cols
|
||||||
pivot_cols = meta_data_cols[!meta_data_cols%in%cols_to_omit];pivot_cols
|
pivot_cols = meta_data_cols[!meta_data_cols%in%cols_to_omit];pivot_cols
|
||||||
|
@ -259,25 +258,25 @@ if (length(pivot_cols) == length(meta_data_cols) - length(cols_to_omit)){
|
||||||
quit()
|
quit()
|
||||||
}
|
}
|
||||||
|
|
||||||
expected_rows_serum_lf = nrow(serum_df_adults_clean) * (length(serum_df_adults_clean) - length(pivot_cols)); expected_rows_serum_lf
|
expected_rows_serum_lf = nrow(serum_wf) * (length(serum_wf) - length(pivot_cols)); expected_rows_serum_lf
|
||||||
|
|
||||||
# using regex:
|
# using regex:
|
||||||
serum_adults_lf = serum_df_adults_clean %>%
|
serum_lf = serum_wf %>%
|
||||||
tidyr::pivot_longer(-all_of(pivot_cols)
|
tidyr::pivot_longer(-all_of(pivot_cols)
|
||||||
, names_to = c("mediator", "sample_type", "timepoint")
|
, names_to = c("mediator", "sample_type", "timepoint")
|
||||||
, names_pattern = "(.*)_(.*)([1-3]{1})"
|
, names_pattern = "(.*)_(.*)([1-3]{1})"
|
||||||
, values_to = "value")
|
, values_to = "value")
|
||||||
|
|
||||||
if (
|
if (
|
||||||
(nrow(serum_adults_lf) == expected_rows_serum_lf) & (sum(table(is.na(serum_adults_lf$mediator))) == expected_rows_serum_lf)
|
(nrow(serum_lf) == expected_rows_serum_lf) & (sum(table(is.na(serum_lf$mediator))) == expected_rows_serum_lf)
|
||||||
) {
|
) {
|
||||||
cat(paste0("PASS: long format data has correct no. of rows and NA in mediator:"
|
cat(paste0("PASS: long format data has correct no. of rows and NA in mediator:"
|
||||||
, "\nNo. of rows: ", nrow(serum_adults_lf)
|
, "\nNo. of rows: ", nrow(serum_lf)
|
||||||
, "\nNo. of cols: ", ncol(serum_adults_lf)))
|
, "\nNo. of cols: ", ncol(serum_lf)))
|
||||||
} else{
|
} else{
|
||||||
cat(paste0("FAIL:long format data has unexpected no. of rows or NAs in mediator"
|
cat(paste0("FAIL:long format data has unexpected no. of rows or NAs in mediator"
|
||||||
, "\nExpected no. of rows: ", expected_rows_serum_lf
|
, "\nExpected no. of rows: ", expected_rows_serum_lf
|
||||||
, "\nGot: ", nrow(serum_adults_lf)
|
, "\nGot: ", nrow(serum_lf)
|
||||||
, "\ncheck expected rows calculation!"))
|
, "\ncheck expected rows calculation!"))
|
||||||
quit()
|
quit()
|
||||||
}
|
}
|
||||||
|
@ -288,11 +287,11 @@ if (
|
||||||
str(npa_df)
|
str(npa_df)
|
||||||
table(npa_df$obesity); table(npa_df$obese2)
|
table(npa_df$obesity); table(npa_df$obese2)
|
||||||
|
|
||||||
npa_df_adults = npa_df[npa_df$adult == 1,]
|
#npa_df_adults = npa_df[npa_df$adult == 1,] # extract based on age
|
||||||
#npa_df_adults_clean = npa_df_adults[!cols_to_omit]
|
npa_df_adults = npa_df
|
||||||
|
|
||||||
wf_cols = colnames(npa_df_adults)[!colnames(npa_df_adults)%in%cols_to_omit]
|
wf_cols = colnames(npa_df_adults)[!colnames(npa_df_adults)%in%cols_to_omit]
|
||||||
npa_df_adults_clean = npa_df_adults[wf_cols]
|
npa_wf = npa_df_adults[wf_cols]
|
||||||
|
|
||||||
pivot_cols = meta_data_cols
|
pivot_cols = meta_data_cols
|
||||||
pivot_cols = meta_data_cols[!meta_data_cols%in%cols_to_omit];pivot_cols
|
pivot_cols = meta_data_cols[!meta_data_cols%in%cols_to_omit];pivot_cols
|
||||||
|
@ -306,25 +305,25 @@ if (length(pivot_cols) == length(meta_data_cols) - length(cols_to_omit)){
|
||||||
quit()
|
quit()
|
||||||
}
|
}
|
||||||
|
|
||||||
expected_rows_npa_lf = nrow(npa_df_adults_clean) * (length(npa_df_adults_clean) - length(pivot_cols)); expected_rows_npa_lf
|
expected_rows_npa_lf = nrow(npa_wf) * (length(npa_wf) - length(pivot_cols)); expected_rows_npa_lf
|
||||||
|
|
||||||
# using regex:
|
# using regex:
|
||||||
npa_adults_lf = npa_df_adults_clean %>%
|
npa_lf = npa_wf %>%
|
||||||
tidyr::pivot_longer(-all_of(pivot_cols)
|
tidyr::pivot_longer(-all_of(pivot_cols)
|
||||||
, names_to = c("mediator", "sample_type", "timepoint")
|
, names_to = c("mediator", "sample_type", "timepoint")
|
||||||
, names_pattern = "(.*)_(.*)([1-3]{1})"
|
, names_pattern = "(.*)_(.*)([1-3]{1})"
|
||||||
, values_to = "value")
|
, values_to = "value")
|
||||||
|
|
||||||
if (
|
if (
|
||||||
(nrow(npa_adults_lf) == expected_rows_npa_lf) & (sum(table(is.na(npa_adults_lf$mediator))) == expected_rows_npa_lf)
|
(nrow(npa_lf) == expected_rows_npa_lf) & (sum(table(is.na(npa_lf$mediator))) == expected_rows_npa_lf)
|
||||||
) {
|
) {
|
||||||
cat(paste0("PASS: long format data has correct no. of rows and NA in mediator:"
|
cat(paste0("PASS: long format data has correct no. of rows and NA in mediator:"
|
||||||
, "\nNo. of rows: ", nrow(npa_adults_lf)
|
, "\nNo. of rows: ", nrow(npa_lf)
|
||||||
, "\nNo. of cols: ", ncol(npa_adults_lf)))
|
, "\nNo. of cols: ", ncol(npa_lf)))
|
||||||
} else{
|
} else{
|
||||||
cat(paste0("FAIL:long format data has unexpected no. of rows or NAs in mediator"
|
cat(paste0("FAIL:long format data has unexpected no. of rows or NAs in mediator"
|
||||||
, "\nExpected no. of rows: ", expected_rows_npa_lf
|
, "\nExpected no. of rows: ", expected_rows_npa_lf
|
||||||
, "\nGot: ", nrow(npa_adults_lf)
|
, "\nGot: ", nrow(npa_lf)
|
||||||
, "\ncheck expected rows calculation!"))
|
, "\ncheck expected rows calculation!"))
|
||||||
quit()
|
quit()
|
||||||
}
|
}
|
||||||
|
@ -334,12 +333,14 @@ if (
|
||||||
rm(sam_regex, sam_regex_log_days, sam_cols, sam_cols_clean, sam_cols_i, sam_cols_to_extract, sam_cols_to_omit)
|
rm(sam_regex, sam_regex_log_days, sam_cols, sam_cols_clean, sam_cols_i, sam_cols_to_extract, sam_cols_to_omit)
|
||||||
rm(serum_regex, serum_regex_log_days, serum_cols, serum_cols_clean, serum_cols_i, serum_cols_to_extract, serum_cols_to_omit)
|
rm(serum_regex, serum_regex_log_days, serum_cols, serum_cols_clean, serum_cols_i, serum_cols_to_extract, serum_cols_to_omit)
|
||||||
rm(npa_regex, npa_regex_log_days, npa_cols, npa_cols_clean, npa_cols_i, npa_cols_to_extract, npa_cols_to_omit)
|
rm(npa_regex, npa_regex_log_days, npa_cols, npa_cols_clean, npa_cols_i, npa_cols_to_extract, npa_cols_to_omit)
|
||||||
rm(all_df)
|
rm(adult_df)
|
||||||
rm(colnames_check)
|
rm(colnames_check)
|
||||||
rm(i, j, expected_cols, start, wf_cols, extra_cols, cols_to_omit)
|
rm(i, j
|
||||||
|
#, expected_cols
|
||||||
|
, start, wf_cols, extra_cols, cols_to_omit)
|
||||||
|
|
||||||
# rm not_clean dfs
|
# rm not_clean dfs
|
||||||
rm(sam_df_adults, serum_df_adults, npa_df_adults)
|
rm(sam_df_adults, serum_df_adults, npa_df_adults)
|
||||||
|
|
||||||
# rm df containing non-adults
|
# rm df
|
||||||
rm(sam_df, serum_df, npa_df)
|
rm(sam_df, serum_df, npa_df)
|
||||||
|
|
26
read_data.R
26
read_data.R
|
@ -26,9 +26,16 @@ all_df <- read.csv("/home/backup/MOSAIC/MEDIATOR_Data/master_file/Mosaic_master_
|
||||||
, fileEncoding = 'latin1')
|
, fileEncoding = 'latin1')
|
||||||
|
|
||||||
# meta data columns
|
# meta data columns
|
||||||
meta_data_cols = c("mosaic", "gender", "age", "adult", "flustat", "type"
|
meta_data_cols = c("mosaic", "gender", "age"
|
||||||
, "obesity", "obese2", "height", "height_unit", "weight"
|
, "adult"
|
||||||
, "weight_unit", "visual_est_bmi", "bmi_rating")
|
, "flustat", "type"
|
||||||
|
, "obesity"
|
||||||
|
, "obese2"
|
||||||
|
, "height", "height_unit"
|
||||||
|
, "weight", "weight_unit"
|
||||||
|
, "ia_height_ftin", "ia_height_m", "ia_weight"
|
||||||
|
, "visual_est_bmi", "bmi_rating"
|
||||||
|
)
|
||||||
|
|
||||||
# check if these columns to select are present in the data
|
# check if these columns to select are present in the data
|
||||||
meta_data_cols%in%colnames(all_df)
|
meta_data_cols%in%colnames(all_df)
|
||||||
|
@ -36,6 +43,19 @@ all(meta_data_cols%in%colnames(all_df))
|
||||||
|
|
||||||
metadata_all = all_df[, meta_data_cols]
|
metadata_all = all_df[, meta_data_cols]
|
||||||
|
|
||||||
|
|
||||||
|
#==============
|
||||||
|
# adult patients
|
||||||
|
#==============
|
||||||
|
|
||||||
|
adult_df = all_df[all_df$age>=18,]
|
||||||
|
|
||||||
|
if (table(adult_df$adult == 1)[[1]] == nrow(adult_df) ){
|
||||||
|
cat ("PASS: adult df extracted successfully")
|
||||||
|
} else{
|
||||||
|
cat ("FAIL: adult df number mismatch!")
|
||||||
|
}
|
||||||
|
|
||||||
#============
|
#============
|
||||||
# hc
|
# hc
|
||||||
#============
|
#============
|
||||||
|
|
|
@ -1,315 +0,0 @@
|
||||||
#!/usr/bin/Rscript
|
|
||||||
getwd()
|
|
||||||
setwd("~/git/mosaic_2020/")
|
|
||||||
getwd()
|
|
||||||
############################################################
|
|
||||||
# TASK: unpaired (time) analysis of mediators: NPA
|
|
||||||
############################################################
|
|
||||||
#=============
|
|
||||||
# Input
|
|
||||||
#=============
|
|
||||||
source("data_extraction_formatting.R")
|
|
||||||
|
|
||||||
# clear variables
|
|
||||||
rm(sam_adults_lf, sam_df_adults_clean
|
|
||||||
, serum_adults_lf, serum_df_adults_clean)
|
|
||||||
rm(colnames_sam_df, expected_rows_sam_lf
|
|
||||||
, colnames_serum_df, expected_rows_serum_lf)
|
|
||||||
|
|
||||||
rm(pivot_cols)
|
|
||||||
|
|
||||||
my_sample_type = "npa"
|
|
||||||
#=============
|
|
||||||
# Output: unpaired analysis of time for npa
|
|
||||||
#=============
|
|
||||||
outfile_name = paste0("stats_time_unpaired_", my_sample_type, ".csv")
|
|
||||||
stats_time_unpaired = paste0(outdir_stats, outfile_name)
|
|
||||||
#%%========================================================
|
|
||||||
# data assignment for stats
|
|
||||||
wf = npa_df_adults_clean
|
|
||||||
lf = npa_adults_lf
|
|
||||||
#%%========================================================
|
|
||||||
table(lf$timepoint)
|
|
||||||
lf$timepoint = paste0("t", lf$timepoint)
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# Unpaired stats at each timepoint b/w groups: wilcoxon UNpaired analysis with correction
|
|
||||||
#######################################################################
|
|
||||||
# with adjustment: fdr and BH are identical
|
|
||||||
my_adjust_method = "BH"
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# unpaired: t1
|
|
||||||
#==============
|
|
||||||
lf_t1 = lf[lf$timepoint == "t1",]
|
|
||||||
sum(is.na(lf_t1$value))
|
|
||||||
|
|
||||||
foo = lf_t1[which(is.na(lf_t1$value)),]
|
|
||||||
ci = which(is.na(lf_t1$value))
|
|
||||||
|
|
||||||
#lf_t1_comp = lf_t1[-ci,]
|
|
||||||
lf_t1_comp = lf_t1[-which(is.na(lf_t1$value)),]
|
|
||||||
stats_un_t1 = compare_means(value~obesity
|
|
||||||
, group.by = "mediator"
|
|
||||||
#, data = lf_t1
|
|
||||||
, data = lf_t1_comp
|
|
||||||
, paired = FALSE
|
|
||||||
, p.adjust.method = my_adjust_method)
|
|
||||||
|
|
||||||
foo$mosaic[!unique(foo$mosaic)%in%unique(lf_t1_comp$mosaic)]
|
|
||||||
|
|
||||||
stats_un_t1$timepoint = "t1"
|
|
||||||
|
|
||||||
stats_un_t1 = as.data.frame(stats_un_t1)
|
|
||||||
class(stats_un_t1)
|
|
||||||
|
|
||||||
# calculate n_obs for each mediator
|
|
||||||
n_t1 = data.frame(table(lf_t1_comp$mediator))
|
|
||||||
colnames(n_t1) = c("mediator", "n_obs")
|
|
||||||
n_t1$mediator = as.character(n_t1$mediator)
|
|
||||||
|
|
||||||
# merge stats + n_obs df
|
|
||||||
merging_cols = intersect(names(stats_un_t1), names(n_t1)); merging_cols
|
|
||||||
if (all(n_t1$mediator%in%stats_un_t1$mediator)) {
|
|
||||||
cat("PASS: merging stats and n_obs on column/s:", merging_cols)
|
|
||||||
stats_un_t1 = merge(stats_un_t1, n_t1, by = merging_cols, all = T)
|
|
||||||
cat("\nsuccessfull merge:"
|
|
||||||
, "\nnrow:", nrow(stats_un_t1)
|
|
||||||
, "\nncol:", ncol(stats_un_t1))
|
|
||||||
}else{
|
|
||||||
nf = n_t1$mediator[!n_t1$mediator%in%stats_un_t1$mediator]
|
|
||||||
stats_un_t1 = merge(stats_un_t1, n_t1, by = merging_cols, all.y = T)
|
|
||||||
cat("\nMerged with caution:"
|
|
||||||
, "\nnrows mismatch:", nf
|
|
||||||
, "not found in stats possibly due to all obs being LLODs"
|
|
||||||
, "\nintroduced NAs for:", nf
|
|
||||||
, "\nnrow:", nrow(stats_un_t1)
|
|
||||||
, "\nncol:", ncol(stats_un_t1))
|
|
||||||
}
|
|
||||||
|
|
||||||
# add bonferroni adjustment as well
|
|
||||||
stats_un_t1$p_adj_bonferroni = p.adjust(stats_un_t1$p, method = "bonferroni")
|
|
||||||
|
|
||||||
rm(n_t1)
|
|
||||||
rm(lf_t1_comp)
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# unpaired: t2
|
|
||||||
#==============
|
|
||||||
lf_t2 = lf[lf$timepoint == "t2",]
|
|
||||||
lf_t2_comp = lf_t2[-which(is.na(lf_t2$value)),]
|
|
||||||
|
|
||||||
stats_un_t2 = compare_means(value~obesity
|
|
||||||
, group.by = "mediator"
|
|
||||||
#, data = lf_t2
|
|
||||||
, data = lf_t2_comp
|
|
||||||
, paired = FALSE
|
|
||||||
, p.adjust.method = my_adjust_method)
|
|
||||||
stats_un_t2$timepoint = "t2"
|
|
||||||
|
|
||||||
stats_un_t2 = as.data.frame(stats_un_t2)
|
|
||||||
class(stats_un_t2)
|
|
||||||
|
|
||||||
# calculate n_obs for each mediator
|
|
||||||
n_t2 = data.frame(table(lf_t2_comp$mediator))
|
|
||||||
colnames(n_t2) = c("mediator", "n_obs")
|
|
||||||
n_t2$mediator = as.character(n_t2$mediator)
|
|
||||||
|
|
||||||
# merge stats + n_obs df
|
|
||||||
merging_cols = intersect(names(stats_un_t2), names(n_t2)); merging_cols
|
|
||||||
if (all(n_t2$mediator%in%stats_un_t2$mediator)) {
|
|
||||||
cat("PASS: merging stats and n_obs on column/s:", merging_cols)
|
|
||||||
stats_un_t2 = merge(stats_un_t2, n_t2, by = merging_cols, all = T)
|
|
||||||
cat("\nsuccessfull merge:"
|
|
||||||
, "\nnrow:", nrow(stats_un_t2)
|
|
||||||
, "\nncol:", ncol(stats_un_t2))
|
|
||||||
}else{
|
|
||||||
nf = n_t2$mediator[!n_t2$mediator%in%stats_un_t2$mediator]
|
|
||||||
stats_un_t2 = merge(stats_un_t2, n_t2, by = merging_cols, all.y = T)
|
|
||||||
cat("\nMerged with caution:"
|
|
||||||
, "\nnrows mismatch:", nf
|
|
||||||
, "not found in stats possibly due to all obs being LLODs"
|
|
||||||
, "\nintroduced NAs for:", nf
|
|
||||||
, "\nnrow:", nrow(stats_un_t2)
|
|
||||||
, "\nncol:", ncol(stats_un_t2))
|
|
||||||
}
|
|
||||||
|
|
||||||
# add bonferroni adjustment as well
|
|
||||||
stats_un_t2$p_adj_bonferroni = p.adjust(stats_un_t2$p, method = "bonferroni")
|
|
||||||
|
|
||||||
rm(n_t2)
|
|
||||||
rm(lf_t2_comp)
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# unpaired: t3
|
|
||||||
#==============
|
|
||||||
lf_t3 = lf[lf$timepoint == "t3",]
|
|
||||||
lf_t3_comp = lf_t3[-which(is.na(lf_t3$value)),]
|
|
||||||
|
|
||||||
stats_un_t3 = compare_means(value~obesity
|
|
||||||
, group.by = "mediator"
|
|
||||||
#, data = lf_t3
|
|
||||||
, data = lf_t3_comp
|
|
||||||
, paired = FALSE
|
|
||||||
, p.adjust.method = my_adjust_method)
|
|
||||||
|
|
||||||
stats_un_t3$timepoint = "t3"
|
|
||||||
|
|
||||||
stats_un_t3 = as.data.frame(stats_un_t3)
|
|
||||||
class(stats_un_t3)
|
|
||||||
|
|
||||||
# calculate n_obs for each mediator
|
|
||||||
n_t3 = data.frame(table(lf_t3_comp$mediator))
|
|
||||||
colnames(n_t3) = c("mediator", "n_obs")
|
|
||||||
n_t3$mediator = as.character(n_t3$mediator)
|
|
||||||
|
|
||||||
# merge stats + n_obs df
|
|
||||||
merging_cols = intersect(names(stats_un_t3), names(n_t3)); merging_cols
|
|
||||||
if (all(n_t3$mediator%in%stats_un_t3$mediator)) {
|
|
||||||
cat("PASS: merging stats and n_obs on column/s:", merging_cols)
|
|
||||||
stats_un_t3 = merge(stats_un_t3, n_t3, by = merging_cols, all = T)
|
|
||||||
cat("\nsuccessfull merge:"
|
|
||||||
, "\nnrow:", nrow(stats_un_t3)
|
|
||||||
, "\nncol:", ncol(stats_un_t3))
|
|
||||||
}else{
|
|
||||||
nf = n_t3$mediator[!n_t3$mediator%in%stats_un_t3$mediator]
|
|
||||||
stats_un_t3 = merge(stats_un_t3, n_t3, by = merging_cols, all.y = T)
|
|
||||||
cat("\nMerged with caution:"
|
|
||||||
, "\nnrows mismatch:", nf
|
|
||||||
, "not found in stats possibly due to all obs being LLODs"
|
|
||||||
, "\nintroduced NAs for:", nf
|
|
||||||
, "\nnrow:", nrow(stats_un_t3)
|
|
||||||
, "\nncol:", ncol(stats_un_t3))
|
|
||||||
}
|
|
||||||
|
|
||||||
# add bonferroni adjustment as well
|
|
||||||
stats_un_t3$p_adj_bonferroni = p.adjust(stats_un_t3$p, method = "bonferroni")
|
|
||||||
|
|
||||||
rm(n_t3)
|
|
||||||
rm(lf_t3_comp)
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# Rbind these dfs
|
|
||||||
#==============
|
|
||||||
str(stats_un_t1);str(stats_un_t2); str(stats_un_t3)
|
|
||||||
|
|
||||||
n_dfs = 3
|
|
||||||
|
|
||||||
if ( all.equal(nrow(stats_un_t1), nrow(stats_un_t2), nrow(stats_un_t3)) &&
|
|
||||||
all.equal(ncol(stats_un_t1), ncol(stats_un_t2), ncol(stats_un_t3)) ) {
|
|
||||||
expected_rows = nrow(stats_un_t1) * n_dfs
|
|
||||||
expected_cols = ncol(stats_un_t1)
|
|
||||||
print("PASS: expected_rows and cols variables generated for downstream sanity checks")
|
|
||||||
}else{
|
|
||||||
cat("FAIL: dfs have different no. of rows and cols"
|
|
||||||
, "\nCheck harcoded value of n_dfs"
|
|
||||||
, "\nexpected_rows and cols could not be generated")
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( all.equal(colnames(stats_un_t1), colnames(stats_un_t2), colnames(stats_un_t3)) ){
|
|
||||||
print("PASS: colnames match. Rbind the 3 dfs...")
|
|
||||||
combined_unpaired_stats = rbind(stats_un_t1, stats_un_t2, stats_un_t3)
|
|
||||||
} else{
|
|
||||||
cat("FAIL: cannot combined dfs. Colnames don't match!")
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( nrow(combined_unpaired_stats) == expected_rows && ncol(combined_unpaired_stats) == expected_cols ){
|
|
||||||
cat("PASS: combined_df has expected dimension"
|
|
||||||
, "\nNo. of rows in combined_df:", nrow(combined_unpaired_stats)
|
|
||||||
, "\nNo. of cols in combined_df:", ncol(combined_unpaired_stats) )
|
|
||||||
}else{
|
|
||||||
cat("FAIL: combined_df dimension mismatch")
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
|
|
||||||
#######################################################################
|
|
||||||
#=================
|
|
||||||
# formatting df
|
|
||||||
#=================
|
|
||||||
# delete: unnecessary column
|
|
||||||
combined_unpaired_stats = subset(combined_unpaired_stats, select = -c(.y.))
|
|
||||||
|
|
||||||
# add sample_type
|
|
||||||
cat("Adding sample type info as a column", my_sample_type, "...")
|
|
||||||
combined_unpaired_stats$sample_type = my_sample_type
|
|
||||||
|
|
||||||
# add: reflect stats method correctly i.e paired or unpaired
|
|
||||||
# incase there are NA due to LLODs, the gsub won't work!
|
|
||||||
#combined_unpaired_stats$method = gsub("Wilcoxon", "Wilcoxon_unpaired", combined_unpaired_stats$method)
|
|
||||||
combined_unpaired_stats$method = "wilcoxon unpaired"
|
|
||||||
combined_unpaired_stats$method
|
|
||||||
|
|
||||||
# add an extra column for padjust_signif: my_adjust_method
|
|
||||||
combined_unpaired_stats$padjust_signif = combined_unpaired_stats$p.adj
|
|
||||||
# add appropriate symbols for padjust_signif: my_adjust_method
|
|
||||||
combined_unpaired_stats = dplyr::mutate(combined_unpaired_stats, padjust_signif = case_when(padjust_signif == 0.05 ~ "."
|
|
||||||
, padjust_signif <=0.0001 ~ '****'
|
|
||||||
, padjust_signif <=0.001 ~ '***'
|
|
||||||
, padjust_signif <=0.01 ~ '**'
|
|
||||||
, padjust_signif <0.05 ~ '*'
|
|
||||||
, TRUE ~ 'ns'))
|
|
||||||
# add an extra column for p_bon_signif
|
|
||||||
combined_unpaired_stats$p_bon_signif = combined_unpaired_stats$p_adj_bonferroni
|
|
||||||
# add appropriate symbols for p_bon_signif
|
|
||||||
combined_unpaired_stats = dplyr::mutate(combined_unpaired_stats, p_bon_signif = case_when(p_bon_signif == 0.05 ~ "."
|
|
||||||
, p_bon_signif <=0.0001 ~ '****'
|
|
||||||
, p_bon_signif <=0.001 ~ '***'
|
|
||||||
, p_bon_signif <=0.01 ~ '**'
|
|
||||||
, p_bon_signif <0.05 ~ '*'
|
|
||||||
, TRUE ~ 'ns'))
|
|
||||||
# reorder columns
|
|
||||||
print("preparing to reorder columns...")
|
|
||||||
colnames(combined_unpaired_stats)
|
|
||||||
my_col_order2 = c("mediator"
|
|
||||||
, "timepoint"
|
|
||||||
, "sample_type"
|
|
||||||
, "n_obs"
|
|
||||||
, "group1"
|
|
||||||
, "group2"
|
|
||||||
, "method"
|
|
||||||
, "p"
|
|
||||||
, "p.format"
|
|
||||||
, "p.signif"
|
|
||||||
, "p.adj"
|
|
||||||
, "padjust_signif"
|
|
||||||
, "p_adj_bonferroni"
|
|
||||||
, "p_bon_signif")
|
|
||||||
|
|
||||||
if( length(my_col_order2) == ncol(combined_unpaired_stats) && (all(my_col_order2%in%colnames(combined_unpaired_stats))) ){
|
|
||||||
print("PASS: Reordering columns...")
|
|
||||||
combined_unpaired_stats_f = combined_unpaired_stats[, my_col_order2]
|
|
||||||
print("Successful: column reordering")
|
|
||||||
print("formatted df called:'combined_unpaired_stats_f'")
|
|
||||||
cat('\nformatted df has the following dimensions\n')
|
|
||||||
print(dim(combined_unpaired_stats_f ))
|
|
||||||
} else{
|
|
||||||
cat(paste0("FAIL:Cannot reorder columns, length mismatch"
|
|
||||||
, "\nExpected column order for: ", ncol(combined_unpaired_stats)
|
|
||||||
, "\nGot:", length(my_col_order2)))
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
# assign nice column names like replace "." with "_"
|
|
||||||
colnames(combined_unpaired_stats_f) = c("mediator"
|
|
||||||
, "timepoint"
|
|
||||||
, "sample_type"
|
|
||||||
, "n_obs"
|
|
||||||
, "group1"
|
|
||||||
, "group2"
|
|
||||||
, "method"
|
|
||||||
, "p"
|
|
||||||
, "p_format"
|
|
||||||
, "p_signif"
|
|
||||||
, paste0("p_adj_fdr_", my_adjust_method)
|
|
||||||
, paste0("p_", my_adjust_method, "_signif")
|
|
||||||
, "p_adj_bonferroni"
|
|
||||||
, "p_bon_signif")
|
|
||||||
|
|
||||||
colnames(combined_unpaired_stats_f)
|
|
||||||
|
|
||||||
#******************
|
|
||||||
# write output file
|
|
||||||
#******************
|
|
||||||
cat("UNpaired stats for groups will be:", stats_time_unpaired)
|
|
||||||
write.csv(combined_unpaired_stats_f, stats_time_unpaired, row.names = FALSE)
|
|
|
@ -1,319 +0,0 @@
|
||||||
#!/usr/bin/Rscript
|
|
||||||
getwd()
|
|
||||||
setwd("~/git/mosaic_2020/")
|
|
||||||
getwd()
|
|
||||||
############################################################
|
|
||||||
# TASK: unpaired (time) analysis of mediators: SAM
|
|
||||||
############################################################
|
|
||||||
#=============
|
|
||||||
# Input
|
|
||||||
#=============
|
|
||||||
source("data_extraction_formatting.R")
|
|
||||||
|
|
||||||
# clear variables
|
|
||||||
rm(npa_adults_lf, npa_df_adults_clean
|
|
||||||
, serum_adults_lf, serum_df_adults_clean)
|
|
||||||
rm(colnames_npa_df, expected_rows_npa_lf
|
|
||||||
, colnames_serum_df, expected_rows_serum_lf)
|
|
||||||
|
|
||||||
rm(pivot_cols)
|
|
||||||
|
|
||||||
my_sample_type = "sam"
|
|
||||||
#=============
|
|
||||||
# Output: unpaired analysis of time for sam
|
|
||||||
#=============
|
|
||||||
outfile_name = paste0("stats_time_unpaired_", my_sample_type, ".csv")
|
|
||||||
stats_time_unpaired = paste0(outdir_stats, outfile_name)
|
|
||||||
#%%========================================================
|
|
||||||
# data assignment for stats
|
|
||||||
wf = sam_df_adults_clean
|
|
||||||
lf = sam_adults_lf
|
|
||||||
#%%========================================================
|
|
||||||
table(lf$timepoint)
|
|
||||||
lf$timepoint = paste0("t", lf$timepoint)
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# Unpaired stats at each timepoint b/w groups: wilcoxon UNpaired analysis with correction
|
|
||||||
#######################################################################
|
|
||||||
# with adjustment: fdr and BH are identical
|
|
||||||
my_adjust_method = "BH"
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# unpaired: t1
|
|
||||||
#==============
|
|
||||||
lf_t1 = lf[lf$timepoint == "t1",]
|
|
||||||
sum(is.na(lf_t1$value))
|
|
||||||
|
|
||||||
foo = lf_t1[which(is.na(lf_t1$value)),]
|
|
||||||
ci = which(is.na(lf_t1$value))
|
|
||||||
|
|
||||||
#lf_t1_comp = lf_t1[-ci,]
|
|
||||||
lf_t1_comp = lf_t1[-which(is.na(lf_t1$value)),]
|
|
||||||
stats_un_t1 = compare_means(value~obesity
|
|
||||||
, group.by = "mediator"
|
|
||||||
#, data = lf_t1
|
|
||||||
, data = lf_t1_comp
|
|
||||||
, paired = FALSE
|
|
||||||
, p.adjust.method = my_adjust_method)
|
|
||||||
|
|
||||||
foo$mosaic[!unique(foo$mosaic)%in%unique(lf_t1_comp$mosaic)]
|
|
||||||
|
|
||||||
stats_un_t1$timepoint = "t1"
|
|
||||||
|
|
||||||
stats_un_t1 = as.data.frame(stats_un_t1)
|
|
||||||
class(stats_un_t1)
|
|
||||||
|
|
||||||
# calculate n_obs for each mediator
|
|
||||||
n_t1 = data.frame(table(lf_t1_comp$mediator))
|
|
||||||
colnames(n_t1) = c("mediator", "n_obs")
|
|
||||||
n_t1$mediator = as.character(n_t1$mediator)
|
|
||||||
|
|
||||||
# merge stats + n_obs df
|
|
||||||
merging_cols = intersect(names(stats_un_t1), names(n_t1)); merging_cols
|
|
||||||
if (all(n_t1$mediator%in%stats_un_t1$mediator)) {
|
|
||||||
cat("PASS: merging stats and n_obs on column/s:", merging_cols)
|
|
||||||
stats_un_t1 = merge(stats_un_t1, n_t1, by = merging_cols, all = T)
|
|
||||||
cat("\nsuccessfull merge:"
|
|
||||||
, "\nnrow:", nrow(stats_un_t1)
|
|
||||||
, "\nncol:", ncol(stats_un_t1))
|
|
||||||
}else{
|
|
||||||
nf = n_t1$mediator[!n_t1$mediator%in%stats_un_t1$mediator]
|
|
||||||
stats_un_t1 = merge(stats_un_t1, n_t1, by = merging_cols, all.y = T)
|
|
||||||
cat("\nMerged with caution:"
|
|
||||||
, "\nnrows mismatch:", nf
|
|
||||||
, "not found in stats possibly due to all obs being LLODs"
|
|
||||||
, "\nintroduced NAs for:", nf
|
|
||||||
, "\nnrow:", nrow(stats_un_t1)
|
|
||||||
, "\nncol:", ncol(stats_un_t1))
|
|
||||||
}
|
|
||||||
|
|
||||||
# add bonferroni adjustment as well
|
|
||||||
stats_un_t1$p_adj_bonferroni = p.adjust(stats_un_t1$p, method = "bonferroni")
|
|
||||||
|
|
||||||
rm(n_t1)
|
|
||||||
rm(lf_t1_comp)
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# unpaired: t2
|
|
||||||
#==============
|
|
||||||
lf_t2 = lf[lf$timepoint == "t2",]
|
|
||||||
lf_t2_comp = lf_t2[-which(is.na(lf_t2$value)),]
|
|
||||||
|
|
||||||
stats_un_t2 = compare_means(value~obesity
|
|
||||||
, group.by = "mediator"
|
|
||||||
#, data = lf_t2
|
|
||||||
, data = lf_t2_comp
|
|
||||||
, paired = FALSE
|
|
||||||
, p.adjust.method = my_adjust_method)
|
|
||||||
stats_un_t2$timepoint = "t2"
|
|
||||||
|
|
||||||
stats_un_t2 = as.data.frame(stats_un_t2)
|
|
||||||
class(stats_un_t2)
|
|
||||||
|
|
||||||
# calculate n_obs for each mediator
|
|
||||||
n_t2 = data.frame(table(lf_t2_comp$mediator))
|
|
||||||
colnames(n_t2) = c("mediator", "n_obs")
|
|
||||||
n_t2$mediator = as.character(n_t2$mediator)
|
|
||||||
|
|
||||||
# merge stats + n_obs df
|
|
||||||
merging_cols = intersect(names(stats_un_t2), names(n_t2)); merging_cols
|
|
||||||
if (all(n_t2$mediator%in%stats_un_t2$mediator)) {
|
|
||||||
cat("PASS: merging stats and n_obs on column/s:", merging_cols)
|
|
||||||
stats_un_t2 = merge(stats_un_t2, n_t2, by = merging_cols, all = T)
|
|
||||||
cat("\nsuccessfull merge:"
|
|
||||||
, "\nnrow:", nrow(stats_un_t2)
|
|
||||||
, "\nncol:", ncol(stats_un_t2))
|
|
||||||
}else{
|
|
||||||
nf = n_t2$mediator[!n_t2$mediator%in%stats_un_t2$mediator]
|
|
||||||
stats_un_t2 = merge(stats_un_t2, n_t2, by = merging_cols, all.y = T)
|
|
||||||
cat("\nMerged with caution:"
|
|
||||||
, "\nnrows mismatch:", nf
|
|
||||||
, "not found in stats possibly due to all obs being LLODs"
|
|
||||||
, "\nintroduced NAs for:", nf
|
|
||||||
, "\nnrow:", nrow(stats_un_t2)
|
|
||||||
, "\nncol:", ncol(stats_un_t2))
|
|
||||||
}
|
|
||||||
|
|
||||||
# add bonferroni adjustment as well
|
|
||||||
stats_un_t2$p_adj_bonferroni = p.adjust(stats_un_t2$p, method = "bonferroni")
|
|
||||||
|
|
||||||
rm(n_t2)
|
|
||||||
rm(lf_t2_comp)
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# unpaired: t3
|
|
||||||
#==============
|
|
||||||
lf_t3 = lf[lf$timepoint == "t3",]
|
|
||||||
lf_t3_comp = lf_t3[-which(is.na(lf_t3$value)),]
|
|
||||||
|
|
||||||
stats_un_t3 = compare_means(value~obesity
|
|
||||||
, group.by = "mediator"
|
|
||||||
#, data = lf_t3
|
|
||||||
, data = lf_t3_comp
|
|
||||||
, paired = FALSE
|
|
||||||
, p.adjust.method = my_adjust_method)
|
|
||||||
|
|
||||||
stats_un_t3$timepoint = "t3"
|
|
||||||
|
|
||||||
stats_un_t3 = as.data.frame(stats_un_t3)
|
|
||||||
class(stats_un_t3)
|
|
||||||
|
|
||||||
# calculate n_obs for each mediator
|
|
||||||
n_t3 = data.frame(table(lf_t3_comp$mediator))
|
|
||||||
colnames(n_t3) = c("mediator", "n_obs")
|
|
||||||
n_t3$mediator = as.character(n_t3$mediator)
|
|
||||||
|
|
||||||
# merge stats + n_obs df
|
|
||||||
merging_cols = intersect(names(stats_un_t3), names(n_t3)); merging_cols
|
|
||||||
if (all(n_t3$mediator%in%stats_un_t3$mediator)) {
|
|
||||||
cat("PASS: merging stats and n_obs on column/s:", merging_cols)
|
|
||||||
stats_un_t3 = merge(stats_un_t3, n_t3, by = merging_cols, all = T)
|
|
||||||
cat("\nsuccessfull merge:"
|
|
||||||
, "\nnrow:", nrow(stats_un_t3)
|
|
||||||
, "\nncol:", ncol(stats_un_t3))
|
|
||||||
}else{
|
|
||||||
nf = n_t3$mediator[!n_t3$mediator%in%stats_un_t3$mediator]
|
|
||||||
stats_un_t3 = merge(stats_un_t3, n_t3, by = merging_cols, all.y = T)
|
|
||||||
cat("\nMerged with caution:"
|
|
||||||
, "\nnrows mismatch:", nf
|
|
||||||
, "not found in stats possibly due to all obs being LLODs"
|
|
||||||
, "\nintroduced NAs for:", nf
|
|
||||||
, "\nnrow:", nrow(stats_un_t3)
|
|
||||||
, "\nncol:", ncol(stats_un_t3))
|
|
||||||
}
|
|
||||||
|
|
||||||
# check: satisfied!!!!
|
|
||||||
# FIXME: supply the col name automatically?
|
|
||||||
wilcox.test(wf$ifna2a_sam3[wf$obesity == 1], wf$ifna2a_sam3[wf$obesity == 0])
|
|
||||||
|
|
||||||
# add bonferroni adjustment as well
|
|
||||||
stats_un_t3$p_adj_bonferroni = p.adjust(stats_un_t3$p, method = "bonferroni")
|
|
||||||
|
|
||||||
rm(n_t3)
|
|
||||||
rm(lf_t3_comp)
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# Rbind these dfs
|
|
||||||
#==============
|
|
||||||
str(stats_un_t1);str(stats_un_t2); str(stats_un_t3)
|
|
||||||
|
|
||||||
n_dfs = 3
|
|
||||||
|
|
||||||
if ( all.equal(nrow(stats_un_t1), nrow(stats_un_t2), nrow(stats_un_t3)) &&
|
|
||||||
all.equal(ncol(stats_un_t1), ncol(stats_un_t2), ncol(stats_un_t3)) ) {
|
|
||||||
expected_rows = nrow(stats_un_t1) * n_dfs
|
|
||||||
expected_cols = ncol(stats_un_t1)
|
|
||||||
print("PASS: expected_rows and cols variables generated for downstream sanity checks")
|
|
||||||
}else{
|
|
||||||
cat("FAIL: dfs have different no. of rows and cols"
|
|
||||||
, "\nCheck harcoded value of n_dfs"
|
|
||||||
, "\nexpected_rows and cols could not be generated")
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( all.equal(colnames(stats_un_t1), colnames(stats_un_t2), colnames(stats_un_t3)) ){
|
|
||||||
print("PASS: colnames match. Rbind the 3 dfs...")
|
|
||||||
combined_unpaired_stats = rbind(stats_un_t1, stats_un_t2, stats_un_t3)
|
|
||||||
} else{
|
|
||||||
cat("FAIL: cannot combined dfs. Colnames don't match!")
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( nrow(combined_unpaired_stats) == expected_rows && ncol(combined_unpaired_stats) == expected_cols ){
|
|
||||||
cat("PASS: combined_df has expected dimension"
|
|
||||||
, "\nNo. of rows in combined_df:", nrow(combined_unpaired_stats)
|
|
||||||
, "\nNo. of cols in combined_df:", ncol(combined_unpaired_stats) )
|
|
||||||
}else{
|
|
||||||
cat("FAIL: combined_df dimension mismatch")
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
|
|
||||||
#######################################################################
|
|
||||||
#=================
|
|
||||||
# formatting df
|
|
||||||
#=================
|
|
||||||
# delete: unnecessary column
|
|
||||||
combined_unpaired_stats = subset(combined_unpaired_stats, select = -c(.y.))
|
|
||||||
|
|
||||||
# add sample_type
|
|
||||||
cat("Adding sample type info as a column", my_sample_type, "...")
|
|
||||||
combined_unpaired_stats$sample_type = my_sample_type
|
|
||||||
|
|
||||||
# add: reflect stats method correctly i.e paired or unpaired
|
|
||||||
# incase there are NA due to LLODs, the gsub won't work!
|
|
||||||
#combined_unpaired_stats$method = gsub("Wilcoxon", "Wilcoxon_unpaired", combined_unpaired_stats$method)
|
|
||||||
combined_unpaired_stats$method = "wilcoxon unpaired"
|
|
||||||
combined_unpaired_stats$method
|
|
||||||
|
|
||||||
# add an extra column for padjust_signif: my_adjust_method
|
|
||||||
combined_unpaired_stats$padjust_signif = combined_unpaired_stats$p.adj
|
|
||||||
# add appropriate symbols for padjust_signif: my_adjust_method
|
|
||||||
combined_unpaired_stats = dplyr::mutate(combined_unpaired_stats, padjust_signif = case_when(padjust_signif == 0.05 ~ "."
|
|
||||||
, padjust_signif <=0.0001 ~ '****'
|
|
||||||
, padjust_signif <=0.001 ~ '***'
|
|
||||||
, padjust_signif <=0.01 ~ '**'
|
|
||||||
, padjust_signif <0.05 ~ '*'
|
|
||||||
, TRUE ~ 'ns'))
|
|
||||||
# add an extra column for p_bon_signif
|
|
||||||
combined_unpaired_stats$p_bon_signif = combined_unpaired_stats$p_adj_bonferroni
|
|
||||||
# add appropriate symbols for p_bon_signif
|
|
||||||
combined_unpaired_stats = dplyr::mutate(combined_unpaired_stats, p_bon_signif = case_when(p_bon_signif == 0.05 ~ "."
|
|
||||||
, p_bon_signif <=0.0001 ~ '****'
|
|
||||||
, p_bon_signif <=0.001 ~ '***'
|
|
||||||
, p_bon_signif <=0.01 ~ '**'
|
|
||||||
, p_bon_signif <0.05 ~ '*'
|
|
||||||
, TRUE ~ 'ns'))
|
|
||||||
# reorder columns
|
|
||||||
print("preparing to reorder columns...")
|
|
||||||
colnames(combined_unpaired_stats)
|
|
||||||
my_col_order2 = c("mediator"
|
|
||||||
, "timepoint"
|
|
||||||
, "sample_type"
|
|
||||||
, "n_obs"
|
|
||||||
, "group1"
|
|
||||||
, "group2"
|
|
||||||
, "method"
|
|
||||||
, "p"
|
|
||||||
, "p.format"
|
|
||||||
, "p.signif"
|
|
||||||
, "p.adj"
|
|
||||||
, "padjust_signif"
|
|
||||||
, "p_adj_bonferroni"
|
|
||||||
, "p_bon_signif")
|
|
||||||
|
|
||||||
if( length(my_col_order2) == ncol(combined_unpaired_stats) && (all(my_col_order2%in%colnames(combined_unpaired_stats))) ){
|
|
||||||
print("PASS: Reordering columns...")
|
|
||||||
combined_unpaired_stats_f = combined_unpaired_stats[, my_col_order2]
|
|
||||||
print("Successful: column reordering")
|
|
||||||
print("formatted df called:'combined_unpaired_stats_f'")
|
|
||||||
cat('\nformatted df has the following dimensions\n')
|
|
||||||
print(dim(combined_unpaired_stats_f ))
|
|
||||||
} else{
|
|
||||||
cat(paste0("FAIL:Cannot reorder columns, length mismatch"
|
|
||||||
, "\nExpected column order for: ", ncol(combined_unpaired_stats)
|
|
||||||
, "\nGot:", length(my_col_order2)))
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
# assign nice column names like replace "." with "_"
|
|
||||||
colnames(combined_unpaired_stats_f) = c("mediator"
|
|
||||||
, "timepoint"
|
|
||||||
, "sample_type"
|
|
||||||
, "n_obs"
|
|
||||||
, "group1"
|
|
||||||
, "group2"
|
|
||||||
, "method"
|
|
||||||
, "p"
|
|
||||||
, "p_format"
|
|
||||||
, "p_signif"
|
|
||||||
, paste0("p_adj_fdr_", my_adjust_method)
|
|
||||||
, paste0("p_", my_adjust_method, "_signif")
|
|
||||||
, "p_adj_bonferroni"
|
|
||||||
, "p_bon_signif")
|
|
||||||
|
|
||||||
colnames(combined_unpaired_stats_f)
|
|
||||||
|
|
||||||
#******************
|
|
||||||
# write output file
|
|
||||||
#******************
|
|
||||||
cat("UNpaired stats for groups will be:", stats_time_unpaired)
|
|
||||||
write.csv(combined_unpaired_stats_f, stats_time_unpaired, row.names = FALSE)
|
|
|
@ -1,316 +0,0 @@
|
||||||
#!/usr/bin/Rscript
|
|
||||||
getwd()
|
|
||||||
setwd("~/git/mosaic_2020/")
|
|
||||||
getwd()
|
|
||||||
############################################################
|
|
||||||
# TASK: unpaired (time) analysis of mediators: serum
|
|
||||||
############################################################
|
|
||||||
#=============
|
|
||||||
# Input
|
|
||||||
#=============
|
|
||||||
source("data_extraction_formatting.R")
|
|
||||||
|
|
||||||
# clear variables
|
|
||||||
rm(sam_adults_lf, sam_df_adults_clean
|
|
||||||
, npa_adults_lf, npa_df_adults_clean)
|
|
||||||
rm(colnames_sam_df, expected_rows_sam_lf
|
|
||||||
, colnames_npa_df, expected_rows_npa_lf)
|
|
||||||
|
|
||||||
rm(pivot_cols)
|
|
||||||
|
|
||||||
my_sample_type = "serum"
|
|
||||||
#=============
|
|
||||||
# Output: unpaired analysis of time for serum
|
|
||||||
#=============
|
|
||||||
outfile_name = paste0("stats_time_unpaired_", my_sample_type, ".csv")
|
|
||||||
stats_time_unpaired = paste0(outdir_stats, outfile_name)
|
|
||||||
#%%========================================================
|
|
||||||
# data assignment for stats
|
|
||||||
wf = serum_df_adults_clean
|
|
||||||
lf = serum_adults_lf
|
|
||||||
#%%========================================================
|
|
||||||
table(lf$timepoint)
|
|
||||||
lf$timepoint = paste0("t", lf$timepoint)
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# Unpaired stats at each timepoint b/w groups: wilcoxon UNpaired analysis with correction
|
|
||||||
#######################################################################
|
|
||||||
# with adjustment: fdr and BH are identical
|
|
||||||
my_adjust_method = "BH"
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# unpaired: t1
|
|
||||||
#==============
|
|
||||||
lf_t1 = lf[lf$timepoint == "t1",]
|
|
||||||
sum(is.na(lf_t1$value))
|
|
||||||
|
|
||||||
foo = lf_t1[which(is.na(lf_t1$value)),]
|
|
||||||
ci = which(is.na(lf_t1$value))
|
|
||||||
|
|
||||||
#lf_t1_comp = lf_t1[-ci,]
|
|
||||||
lf_t1_comp = lf_t1[-which(is.na(lf_t1$value)),]
|
|
||||||
stats_un_t1 = compare_means(value~obesity
|
|
||||||
, group.by = "mediator"
|
|
||||||
#, data = lf_t1
|
|
||||||
, data = lf_t1_comp
|
|
||||||
, paired = FALSE
|
|
||||||
, p.adjust.method = my_adjust_method)
|
|
||||||
|
|
||||||
foo$mosaic[!unique(foo$mosaic)%in%unique(lf_t1_comp$mosaic)]
|
|
||||||
|
|
||||||
stats_un_t1$timepoint = "t1"
|
|
||||||
|
|
||||||
stats_un_t1 = as.data.frame(stats_un_t1)
|
|
||||||
class(stats_un_t1)
|
|
||||||
|
|
||||||
# calculate n_obs for each mediator
|
|
||||||
n_t1 = data.frame(table(lf_t1_comp$mediator))
|
|
||||||
colnames(n_t1) = c("mediator", "n_obs")
|
|
||||||
n_t1$mediator = as.character(n_t1$mediator)
|
|
||||||
|
|
||||||
# merge stats + n_obs df
|
|
||||||
merging_cols = intersect(names(stats_un_t1), names(n_t1)); merging_cols
|
|
||||||
if (all(n_t1$mediator%in%stats_un_t1$mediator)) {
|
|
||||||
cat("PASS: merging stats and n_obs on column/s:", merging_cols)
|
|
||||||
stats_un_t1 = merge(stats_un_t1, n_t1, by = merging_cols, all = T)
|
|
||||||
cat("\nsuccessfull merge:"
|
|
||||||
, "\nnrow:", nrow(stats_un_t1)
|
|
||||||
, "\nncol:", ncol(stats_un_t1))
|
|
||||||
}else{
|
|
||||||
nf = n_t1$mediator[!n_t1$mediator%in%stats_un_t1$mediator]
|
|
||||||
stats_un_t1 = merge(stats_un_t1, n_t1, by = merging_cols, all.y = T)
|
|
||||||
cat("\nMerged with caution:"
|
|
||||||
, "\nnrows mismatch:", nf
|
|
||||||
, "not found in stats possibly due to all obs being LLODs"
|
|
||||||
, "\nintroduced NAs for:", nf
|
|
||||||
, "\nnrow:", nrow(stats_un_t1)
|
|
||||||
, "\nncol:", ncol(stats_un_t1))
|
|
||||||
}
|
|
||||||
|
|
||||||
# add bonferroni adjustment as well
|
|
||||||
stats_un_t1$p_adj_bonferroni = p.adjust(stats_un_t1$p, method = "bonferroni")
|
|
||||||
|
|
||||||
rm(n_t1)
|
|
||||||
rm(lf_t1_comp)
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# unpaired: t2
|
|
||||||
#==============
|
|
||||||
lf_t2 = lf[lf$timepoint == "t2",]
|
|
||||||
lf_t2_comp = lf_t2[-which(is.na(lf_t2$value)),]
|
|
||||||
|
|
||||||
stats_un_t2 = compare_means(value~obesity
|
|
||||||
, group.by = "mediator"
|
|
||||||
#, data = lf_t2
|
|
||||||
, data = lf_t2_comp
|
|
||||||
, paired = FALSE
|
|
||||||
, p.adjust.method = my_adjust_method)
|
|
||||||
stats_un_t2$timepoint = "t2"
|
|
||||||
|
|
||||||
stats_un_t2 = as.data.frame(stats_un_t2)
|
|
||||||
class(stats_un_t2)
|
|
||||||
|
|
||||||
# calculate n_obs for each mediator
|
|
||||||
n_t2 = data.frame(table(lf_t2_comp$mediator))
|
|
||||||
colnames(n_t2) = c("mediator", "n_obs")
|
|
||||||
n_t2$mediator = as.character(n_t2$mediator)
|
|
||||||
|
|
||||||
# merge stats + n_obs df
|
|
||||||
merging_cols = intersect(names(stats_un_t2), names(n_t2)); merging_cols
|
|
||||||
if (all(n_t2$mediator%in%stats_un_t2$mediator)) {
|
|
||||||
cat("PASS: merging stats and n_obs on column/s:", merging_cols)
|
|
||||||
stats_un_t2 = merge(stats_un_t2, n_t2, by = merging_cols, all = T)
|
|
||||||
cat("\nsuccessfull merge:"
|
|
||||||
, "\nnrow:", nrow(stats_un_t2)
|
|
||||||
, "\nncol:", ncol(stats_un_t2))
|
|
||||||
}else{
|
|
||||||
nf = n_t2$mediator[!n_t2$mediator%in%stats_un_t2$mediator]
|
|
||||||
stats_un_t2 = merge(stats_un_t2, n_t2, by = merging_cols, all.y = T)
|
|
||||||
cat("\nMerged with caution:"
|
|
||||||
, "\nnrows mismatch:", nf
|
|
||||||
, "not found in stats possibly due to all obs being LLODs"
|
|
||||||
, "\nintroduced NAs for:", nf
|
|
||||||
, "\nnrow:", nrow(stats_un_t2)
|
|
||||||
, "\nncol:", ncol(stats_un_t2))
|
|
||||||
}
|
|
||||||
|
|
||||||
# add bonferroni adjustment as well
|
|
||||||
stats_un_t2$p_adj_bonferroni = p.adjust(stats_un_t2$p, method = "bonferroni")
|
|
||||||
|
|
||||||
rm(n_t2)
|
|
||||||
rm(lf_t2_comp)
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# unpaired: t3
|
|
||||||
#==============
|
|
||||||
lf_t3 = lf[lf$timepoint == "t3",]
|
|
||||||
lf_t3_comp = lf_t3[-which(is.na(lf_t3$value)),]
|
|
||||||
|
|
||||||
stats_un_t3 = compare_means(value~obesity
|
|
||||||
, group.by = "mediator"
|
|
||||||
#, data = lf_t3
|
|
||||||
, data = lf_t3_comp
|
|
||||||
, paired = FALSE
|
|
||||||
, p.adjust.method = my_adjust_method)
|
|
||||||
|
|
||||||
stats_un_t3$timepoint = "t3"
|
|
||||||
|
|
||||||
stats_un_t3 = as.data.frame(stats_un_t3)
|
|
||||||
class(stats_un_t3)
|
|
||||||
|
|
||||||
|
|
||||||
# calculate n_obs for each mediator
|
|
||||||
n_t3 = data.frame(table(lf_t3_comp$mediator))
|
|
||||||
colnames(n_t3) = c("mediator", "n_obs")
|
|
||||||
n_t3$mediator = as.character(n_t3$mediator)
|
|
||||||
|
|
||||||
# merge stats + n_obs df
|
|
||||||
merging_cols = intersect(names(stats_un_t3), names(n_t3)); merging_cols
|
|
||||||
if (all(n_t3$mediator%in%stats_un_t3$mediator)) {
|
|
||||||
cat("PASS: merging stats and n_obs on column/s:", merging_cols)
|
|
||||||
stats_un_t3 = merge(stats_un_t3, n_t3, by = merging_cols, all = T)
|
|
||||||
cat("\nsuccessfull merge:"
|
|
||||||
, "\nnrow:", nrow(stats_un_t3)
|
|
||||||
, "\nncol:", ncol(stats_un_t3))
|
|
||||||
}else{
|
|
||||||
nf = n_t3$mediator[!n_t3$mediator%in%stats_un_t3$mediator]
|
|
||||||
stats_un_t3 = merge(stats_un_t3, n_t3, by = merging_cols, all.y = T)
|
|
||||||
cat("\nMerged with caution:"
|
|
||||||
, "\nnrows mismatch:", nf
|
|
||||||
, "not found in stats possibly due to all obs being LLODs"
|
|
||||||
, "\nintroduced NAs for:", nf
|
|
||||||
, "\nnrow:", nrow(stats_un_t3)
|
|
||||||
, "\nncol:", ncol(stats_un_t3))
|
|
||||||
}
|
|
||||||
|
|
||||||
# add bonferroni adjustment as well
|
|
||||||
stats_un_t3$p_adj_bonferroni = p.adjust(stats_un_t3$p, method = "bonferroni")
|
|
||||||
|
|
||||||
rm(n_t3)
|
|
||||||
rm(lf_t3_comp)
|
|
||||||
|
|
||||||
#==============
|
|
||||||
# Rbind these dfs
|
|
||||||
#==============
|
|
||||||
str(stats_un_t1);str(stats_un_t2); str(stats_un_t3)
|
|
||||||
|
|
||||||
n_dfs = 3
|
|
||||||
|
|
||||||
if ( all.equal(nrow(stats_un_t1), nrow(stats_un_t2), nrow(stats_un_t3)) &&
|
|
||||||
all.equal(ncol(stats_un_t1), ncol(stats_un_t2), ncol(stats_un_t3)) ) {
|
|
||||||
expected_rows = nrow(stats_un_t1) * n_dfs
|
|
||||||
expected_cols = ncol(stats_un_t1)
|
|
||||||
print("PASS: expected_rows and cols variables generated for downstream sanity checks")
|
|
||||||
}else{
|
|
||||||
cat("FAIL: dfs have different no. of rows and cols"
|
|
||||||
, "\nCheck harcoded value of n_dfs"
|
|
||||||
, "\nexpected_rows and cols could not be generated")
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( all.equal(colnames(stats_un_t1), colnames(stats_un_t2), colnames(stats_un_t3)) ){
|
|
||||||
print("PASS: colnames match. Rbind the 3 dfs...")
|
|
||||||
combined_unpaired_stats = rbind(stats_un_t1, stats_un_t2, stats_un_t3)
|
|
||||||
} else{
|
|
||||||
cat("FAIL: cannot combined dfs. Colnames don't match!")
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( nrow(combined_unpaired_stats) == expected_rows && ncol(combined_unpaired_stats) == expected_cols ){
|
|
||||||
cat("PASS: combined_df has expected dimension"
|
|
||||||
, "\nNo. of rows in combined_df:", nrow(combined_unpaired_stats)
|
|
||||||
, "\nNo. of cols in combined_df:", ncol(combined_unpaired_stats) )
|
|
||||||
}else{
|
|
||||||
cat("FAIL: combined_df dimension mismatch")
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
|
|
||||||
#######################################################################
|
|
||||||
#=================
|
|
||||||
# formatting df
|
|
||||||
#=================
|
|
||||||
# delete: unnecessary column
|
|
||||||
combined_unpaired_stats = subset(combined_unpaired_stats, select = -c(.y.))
|
|
||||||
|
|
||||||
# add sample_type
|
|
||||||
cat("Adding sample type info as a column", my_sample_type, "...")
|
|
||||||
combined_unpaired_stats$sample_type = my_sample_type
|
|
||||||
|
|
||||||
# add: reflect stats method correctly i.e paired or unpaired
|
|
||||||
# incase there are NA due to LLODs, the gsub won't work!
|
|
||||||
#combined_unpaired_stats$method = gsub("Wilcoxon", "Wilcoxon_unpaired", combined_unpaired_stats$method)
|
|
||||||
combined_unpaired_stats$method = "wilcoxon unpaired"
|
|
||||||
combined_unpaired_stats$method
|
|
||||||
|
|
||||||
# add an extra column for padjust_signif: my_adjust_method
|
|
||||||
combined_unpaired_stats$padjust_signif = combined_unpaired_stats$p.adj
|
|
||||||
# add appropriate symbols for padjust_signif: my_adjust_method
|
|
||||||
combined_unpaired_stats = dplyr::mutate(combined_unpaired_stats, padjust_signif = case_when(padjust_signif == 0.05 ~ "."
|
|
||||||
, padjust_signif <=0.0001 ~ '****'
|
|
||||||
, padjust_signif <=0.001 ~ '***'
|
|
||||||
, padjust_signif <=0.01 ~ '**'
|
|
||||||
, padjust_signif <0.05 ~ '*'
|
|
||||||
, TRUE ~ 'ns'))
|
|
||||||
# add an extra column for p_bon_signif
|
|
||||||
combined_unpaired_stats$p_bon_signif = combined_unpaired_stats$p_adj_bonferroni
|
|
||||||
# add appropriate symbols for p_bon_signif
|
|
||||||
combined_unpaired_stats = dplyr::mutate(combined_unpaired_stats, p_bon_signif = case_when(p_bon_signif == 0.05 ~ "."
|
|
||||||
, p_bon_signif <=0.0001 ~ '****'
|
|
||||||
, p_bon_signif <=0.001 ~ '***'
|
|
||||||
, p_bon_signif <=0.01 ~ '**'
|
|
||||||
, p_bon_signif <0.05 ~ '*'
|
|
||||||
, TRUE ~ 'ns'))
|
|
||||||
# reorder columns
|
|
||||||
print("preparing to reorder columns...")
|
|
||||||
colnames(combined_unpaired_stats)
|
|
||||||
my_col_order2 = c("mediator"
|
|
||||||
, "timepoint"
|
|
||||||
, "sample_type"
|
|
||||||
, "n_obs"
|
|
||||||
, "group1"
|
|
||||||
, "group2"
|
|
||||||
, "method"
|
|
||||||
, "p"
|
|
||||||
, "p.format"
|
|
||||||
, "p.signif"
|
|
||||||
, "p.adj"
|
|
||||||
, "padjust_signif"
|
|
||||||
, "p_adj_bonferroni"
|
|
||||||
, "p_bon_signif")
|
|
||||||
|
|
||||||
if( length(my_col_order2) == ncol(combined_unpaired_stats) && (all(my_col_order2%in%colnames(combined_unpaired_stats))) ){
|
|
||||||
print("PASS: Reordering columns...")
|
|
||||||
combined_unpaired_stats_f = combined_unpaired_stats[, my_col_order2]
|
|
||||||
print("Successful: column reordering")
|
|
||||||
print("formatted df called:'combined_unpaired_stats_f'")
|
|
||||||
cat('\nformatted df has the following dimensions\n')
|
|
||||||
print(dim(combined_unpaired_stats_f ))
|
|
||||||
} else{
|
|
||||||
cat(paste0("FAIL:Cannot reorder columns, length mismatch"
|
|
||||||
, "\nExpected column order for: ", ncol(combined_unpaired_stats)
|
|
||||||
, "\nGot:", length(my_col_order2)))
|
|
||||||
quit()
|
|
||||||
}
|
|
||||||
# assign nice column names like replace "." with "_"
|
|
||||||
colnames(combined_unpaired_stats_f) = c("mediator"
|
|
||||||
, "timepoint"
|
|
||||||
, "sample_type"
|
|
||||||
, "n_obs"
|
|
||||||
, "group1"
|
|
||||||
, "group2"
|
|
||||||
, "method"
|
|
||||||
, "p"
|
|
||||||
, "p_format"
|
|
||||||
, "p_signif"
|
|
||||||
, paste0("p_adj_fdr_", my_adjust_method)
|
|
||||||
, paste0("p_", my_adjust_method, "_signif")
|
|
||||||
, "p_adj_bonferroni"
|
|
||||||
, "p_bon_signif")
|
|
||||||
|
|
||||||
colnames(combined_unpaired_stats_f)
|
|
||||||
|
|
||||||
#******************
|
|
||||||
# write output file
|
|
||||||
#******************
|
|
||||||
cat("UNpaired stats for groups will be:", stats_time_unpaired)
|
|
||||||
write.csv(combined_unpaired_stats_f, stats_time_unpaired, row.names = FALSE)
|
|
Loading…
Add table
Add a link
Reference in a new issue