added Header, read_data.R & data_extraction scripts
This commit is contained in:
parent
69e8ac0ea8
commit
de5b07edc7
3 changed files with 383 additions and 0 deletions
9
Header_TT.R
Normal file
9
Header_TT.R
Normal file
|
@ -0,0 +1,9 @@
|
|||
#!/usr/bin/Rscript
|
||||
#install.packages("stringr")
|
||||
library(stringr)
|
||||
library(tidyverse)
|
||||
library(ggpubr)
|
||||
library(rstatix)
|
||||
library(Hmisc)
|
||||
library(qwraps2)
|
||||
|
345
data_extraction_formatting.R
Normal file
345
data_extraction_formatting.R
Normal file
|
@ -0,0 +1,345 @@
|
|||
#!/usr/bin/Rscript
|
||||
getwd()
|
||||
setwd('~/git/mosaic_2020/')
|
||||
getwd()
|
||||
########################################################################
|
||||
# TASK: Extract relevant columns from mosaic data
|
||||
# sam
|
||||
# serum
|
||||
# npa
|
||||
########################################################################
|
||||
#====================
|
||||
# Input: source data
|
||||
#====================
|
||||
source("read_data.R")
|
||||
|
||||
# clear unnecessary variables
|
||||
#rm()
|
||||
########################################################################
|
||||
|
||||
#=========
|
||||
# sam
|
||||
#=========
|
||||
sam_regex = regex(".*_sam[1-3]{1}$", ignore_case = T)
|
||||
sam_cols_i = str_extract(colnames(all_df), sam_regex) # not boolean
|
||||
#sam_cols_b = colnames(all_df)%in%sam_cols_i # boolean
|
||||
|
||||
sam_cols = colnames(all_df)[colnames(all_df)%in%sam_cols_i]
|
||||
|
||||
# this contains log columns + daysamp_samXX: omitting these
|
||||
sam_regex_log_days = regex("log|day.*_sam[1-3]{1}$", ignore_case = T, perl = T)
|
||||
sam_cols_to_omit = sam_cols[grepl(sam_regex_log_days, sam_cols)]; sam_cols_to_omit
|
||||
|
||||
sam_cols_clean = sam_cols[!sam_cols%in%sam_cols_to_omit]; sam_cols_clean
|
||||
length(sam_cols_clean)
|
||||
|
||||
if( length(sam_cols_clean) == length(sam_cols) - length(sam_cols_to_omit) ){
|
||||
cat("PASS: clean cols extracted"
|
||||
, "\nNo. of clean SAM cols to extract:", length(sam_cols_clean))
|
||||
}else{
|
||||
cat("FAIL: length mismatch. Aborting further cols extraction"
|
||||
, "Expected length:", length(sam_cols) - length(sam_cols_to_omit)
|
||||
, "Got:", length(sam_cols_clean) )
|
||||
}
|
||||
|
||||
sam_cols_to_extract = c(meta_data_cols, sam_cols_clean)
|
||||
|
||||
cat("Extracting SAM cols + metadata_cols")
|
||||
|
||||
if ( length(sam_cols_to_extract) == length(meta_data_cols) + length(sam_cols_clean) ){
|
||||
cat("Extracing", length(sam_cols_to_extract), "columns for sam")
|
||||
sam_df = all_df[, sam_cols_to_extract]
|
||||
}else{
|
||||
cat("FAIL: length mismatch"
|
||||
, "Expeceted to extract:", length(meta_data_cols) + length(sam_cols_clean), "columns"
|
||||
, "Got:", length(sam_cols_to_extract))
|
||||
}
|
||||
|
||||
colnames_sam_df = colnames(sam_df); colnames_sam_df
|
||||
|
||||
#=========
|
||||
# serum
|
||||
#=========
|
||||
serum_regex = regex(".*_serum[1-3]{1}$", ignore_case = T)
|
||||
serum_cols_i = str_extract(colnames(all_df), serum_regex) # not boolean
|
||||
#serum_cols_b = colnames(all_df)%in%serum_cols_i # boolean
|
||||
|
||||
serum_cols = colnames(all_df)[colnames(all_df)%in%serum_cols_i]
|
||||
|
||||
# this contains log columns + dayserump_serumXX: omitting these
|
||||
serum_regex_log_days = regex("log|day.*_serum[1-3]{1}$", ignore_case = T, perl = T)
|
||||
serum_cols_to_omit = serum_cols[grepl(serum_regex_log_days, serum_cols)]; serum_cols_to_omit
|
||||
|
||||
serum_cols_clean = serum_cols[!serum_cols%in%serum_cols_to_omit]; serum_cols_clean
|
||||
length(serum_cols_clean)
|
||||
|
||||
if( length(serum_cols_clean) == length(serum_cols) - length(serum_cols_to_omit) ){
|
||||
cat("PASS: clean cols extracted"
|
||||
, "\nNo. of clean serum cols to extract:", length(serum_cols_clean))
|
||||
}else{
|
||||
cat("FAIL: length mismatch. Aborting further cols extraction"
|
||||
, "Expected length:", length(serum_cols) - length(serum_cols_to_omit)
|
||||
, "Got:", length(serum_cols_clean) )
|
||||
}
|
||||
|
||||
serum_cols_to_extract = c(meta_data_cols, serum_cols_clean)
|
||||
|
||||
cat("Extracting SERUM cols + metadata_cols")
|
||||
|
||||
if ( length(serum_cols_to_extract) == length(meta_data_cols) + length(serum_cols_clean) ){
|
||||
cat("Extracing", length(serum_cols_to_extract), "columns for serum")
|
||||
serum_df = all_df[, serum_cols_to_extract]
|
||||
}else{
|
||||
cat("FAIL: length mismatch"
|
||||
, "Expeceted to extract:", length(meta_data_cols) + length(serum_cols_clean), "columns"
|
||||
, "Got:", length(serum_cols_to_extract))
|
||||
}
|
||||
|
||||
colnames_serum_df = colnames(serum_df); colnames_serum_df
|
||||
|
||||
#=========
|
||||
# npa
|
||||
#=========
|
||||
npa_regex = regex(".*_npa[1-3]{1}$", ignore_case = T)
|
||||
npa_cols_i = str_extract(colnames(all_df), npa_regex) # not boolean
|
||||
#npa_cols_b = colnames(all_df)%in%npa_cols_i # boolean
|
||||
|
||||
npa_cols = colnames(all_df)[colnames(all_df)%in%npa_cols_i]
|
||||
|
||||
# this contains log columns + daynpap_npaXX: omitting these
|
||||
npa_regex_log_days = regex("log|day|vl_samptime|ct.*_npa[1-3]{1}$", ignore_case = T, perl = T)
|
||||
npa_cols_to_omit = npa_cols[grepl(npa_regex_log_days, npa_cols)]; npa_cols_to_omit
|
||||
|
||||
npa_cols_clean = npa_cols[!npa_cols%in%npa_cols_to_omit]; npa_cols_clean
|
||||
length(npa_cols_clean)
|
||||
|
||||
if( length(npa_cols_clean) == length(npa_cols) - length(npa_cols_to_omit) ){
|
||||
cat("PASS: clean cols extracted"
|
||||
, "\nNo. of clean npa cols to extract:", length(npa_cols_clean))
|
||||
}else{
|
||||
cat("FAIL: length mismatch. Aborting further cols extraction"
|
||||
, "Expected length:", length(npa_cols) - length(npa_cols_to_omit)
|
||||
, "Got:", length(npa_cols_clean) )
|
||||
}
|
||||
|
||||
npa_cols_to_extract = c(meta_data_cols, npa_cols_clean)
|
||||
|
||||
cat("Extracting NPA cols + metadata_cols")
|
||||
|
||||
if ( length(npa_cols_to_extract) == length(meta_data_cols) + length(npa_cols_clean) ){
|
||||
cat("Extracing", length(npa_cols_to_extract), "columns for npa")
|
||||
npa_df = all_df[, npa_cols_to_extract]
|
||||
}else{
|
||||
cat("FAIL: length mismatch"
|
||||
, "Expeceted to extract:", length(meta_data_cols) + length(npa_cols_clean), "columns"
|
||||
, "Got:", length(npa_cols_to_extract))
|
||||
}
|
||||
|
||||
colnames_npa_df = colnames(npa_df); colnames_npa_df
|
||||
|
||||
colnames_check = as.data.frame(cbind(colnames_sam_df, colnames_serum_df, colnames_npa_df))
|
||||
tail(colnames_check)
|
||||
|
||||
# put NA where a match doesn't exist
|
||||
# unmatched lengths
|
||||
#colnames_check[117,1] <- NA
|
||||
#colnames_check[117,2] <- NA
|
||||
|
||||
if ( ncol(sam_df) == ncol(serum_df) ){
|
||||
start = ncol(sam_df)+1
|
||||
extra_cols = start:ncol(npa_df)
|
||||
}
|
||||
|
||||
colnames_check_f = colnames_check
|
||||
tail(colnames_check_f)
|
||||
|
||||
for (i in extra_cols){
|
||||
for (j in 1:2) {
|
||||
cat("\ni:", i
|
||||
,"\nj:", j)
|
||||
colnames_check_f[i,j] <- NA
|
||||
#colnames_check_f[i, j]< - NA
|
||||
|
||||
}
|
||||
}
|
||||
tail(colnames_check_f)
|
||||
# write file?
|
||||
quick_check = as.data.frame(cbind(metadata_all$mosaic
|
||||
, metadata_all$adult
|
||||
, metadata_all$age
|
||||
, metadata_all$obesity
|
||||
, metadata_all$obese2))
|
||||
colnames(quick_check) = c("mosaic", "adult", "age", "obesity", "obese2")
|
||||
|
||||
##########################################################################
|
||||
# LF data
|
||||
##########################################################################
|
||||
|
||||
#==============
|
||||
# lf data: sam
|
||||
#==============
|
||||
str(sam_df)
|
||||
table(sam_df$obesity); table(sam_df$obese2)
|
||||
|
||||
sam_df_adults = sam_df[sam_df$adult == 1,]
|
||||
|
||||
cols_to_omit = c("type"
|
||||
#, "flustat"
|
||||
#, "obesity"
|
||||
#, "obese2"
|
||||
, "height", "height_unit", "weight"
|
||||
, "weight_unit", "visual_est_bmi", "bmi_rating")
|
||||
|
||||
#sam_df_adults_clean = sam_df_adults[!cols_to_omit]
|
||||
|
||||
wf_cols = colnames(sam_df_adults)[!colnames(sam_df_adults)%in%cols_to_omit]
|
||||
sam_df_adults_clean = sam_df_adults[wf_cols]
|
||||
|
||||
pivot_cols = meta_data_cols
|
||||
# subselect pivot_cols
|
||||
pivot_cols = meta_data_cols[!meta_data_cols%in%cols_to_omit];pivot_cols
|
||||
|
||||
if (length(pivot_cols) == length(meta_data_cols) - length(cols_to_omit)){
|
||||
cat("PASS: pivot cols successfully extracted")
|
||||
}else{
|
||||
cat("FAIL: length mismatch! pivot cols could not be extracted"
|
||||
, "\nExpected length:", length(meta_data_cols) - length(cols_to_omit)
|
||||
, "\nGot:",length(pivot_cols) )
|
||||
quit()
|
||||
}
|
||||
|
||||
expected_rows_sam_lf = nrow(sam_df_adults_clean) * (length(sam_df_adults_clean) - length(pivot_cols)); expected_rows_sam_lf
|
||||
|
||||
# using regex:
|
||||
sam_adults_lf = sam_df_adults_clean %>%
|
||||
tidyr::pivot_longer(-all_of(pivot_cols)
|
||||
, names_to = c("mediator", "sample_type", "timepoint")
|
||||
, names_pattern = "(.*)_(.*)([1-3]{1})"
|
||||
, values_to = "value")
|
||||
|
||||
if (
|
||||
(nrow(sam_adults_lf) == expected_rows_sam_lf) & (sum(table(is.na(sam_adults_lf$mediator))) == expected_rows_sam_lf)
|
||||
) {
|
||||
cat(paste0("PASS: long format data has correct no. of rows and NA in mediator:"
|
||||
, "\nNo. of rows: ", nrow(sam_adults_lf)
|
||||
, "\nNo. of cols: ", ncol(sam_adults_lf)))
|
||||
} else{
|
||||
cat(paste0("FAIL:long format data has unexpected no. of rows or NAs in mediator"
|
||||
, "\nExpected no. of rows: ", expected_rows_sam_lf
|
||||
, "\nGot: ", nrow(sam_adults_lf)
|
||||
, "\ncheck expected rows calculation!"))
|
||||
quit()
|
||||
}
|
||||
|
||||
#library(data.table)
|
||||
#foo = sam_df_adults[1:10]
|
||||
#long <- melt(setDT(sam_df_adults), id.vars = pivot_cols, variable.name = "levels")
|
||||
|
||||
#==============
|
||||
# lf data: serum
|
||||
#==============
|
||||
str(serum_df)
|
||||
table(serum_df$obesity); table(serum_df$obese2)
|
||||
|
||||
serum_df_adults = serum_df[serum_df$adult == 1,]
|
||||
|
||||
#serum_df_adults_clean = serum_df_adults[!cols_to_omit]
|
||||
wf_cols = colnames(serum_df_adults)[!colnames(serum_df_adults)%in%cols_to_omit]
|
||||
serum_df_adults_clean = serum_df_adults[wf_cols]
|
||||
|
||||
pivot_cols = meta_data_cols
|
||||
pivot_cols = meta_data_cols[!meta_data_cols%in%cols_to_omit];pivot_cols
|
||||
|
||||
if (length(pivot_cols) == length(meta_data_cols) - length(cols_to_omit)){
|
||||
cat("PASS: pivot cols successfully extracted")
|
||||
}else{
|
||||
cat("FAIL: length mismatch! pivot cols could not be extracted"
|
||||
, "\nExpected length:", length(meta_data_cols) - length(cols_to_omit)
|
||||
, "\nGot:",length(pivot_cols) )
|
||||
quit()
|
||||
}
|
||||
|
||||
expected_rows_serum_lf = nrow(serum_df_adults_clean) * (length(serum_df_adults_clean) - length(pivot_cols)); expected_rows_serum_lf
|
||||
|
||||
# using regex:
|
||||
serum_adults_lf = serum_df_adults_clean %>%
|
||||
tidyr::pivot_longer(-all_of(pivot_cols)
|
||||
, names_to = c("mediator", "sample_type", "timepoint")
|
||||
, names_pattern = "(.*)_(.*)([1-3]{1})"
|
||||
, values_to = "value")
|
||||
|
||||
if (
|
||||
(nrow(serum_adults_lf) == expected_rows_serum_lf) & (sum(table(is.na(serum_adults_lf$mediator))) == expected_rows_serum_lf)
|
||||
) {
|
||||
cat(paste0("PASS: long format data has correct no. of rows and NA in mediator:"
|
||||
, "\nNo. of rows: ", nrow(serum_adults_lf)
|
||||
, "\nNo. of cols: ", ncol(serum_adults_lf)))
|
||||
} else{
|
||||
cat(paste0("FAIL:long format data has unexpected no. of rows or NAs in mediator"
|
||||
, "\nExpected no. of rows: ", expected_rows_serum_lf
|
||||
, "\nGot: ", nrow(serum_adults_lf)
|
||||
, "\ncheck expected rows calculation!"))
|
||||
quit()
|
||||
}
|
||||
|
||||
#==============
|
||||
# lf data: npa
|
||||
#==============
|
||||
str(npa_df)
|
||||
table(npa_df$obesity); table(npa_df$obese2)
|
||||
|
||||
npa_df_adults = npa_df[npa_df$adult == 1,]
|
||||
#npa_df_adults_clean = npa_df_adults[!cols_to_omit]
|
||||
|
||||
wf_cols = colnames(npa_df_adults)[!colnames(npa_df_adults)%in%cols_to_omit]
|
||||
npa_df_adults_clean = npa_df_adults[wf_cols]
|
||||
|
||||
pivot_cols = meta_data_cols
|
||||
pivot_cols = meta_data_cols[!meta_data_cols%in%cols_to_omit];pivot_cols
|
||||
|
||||
if (length(pivot_cols) == length(meta_data_cols) - length(cols_to_omit)){
|
||||
cat("PASS: pivot cols successfully extracted")
|
||||
}else{
|
||||
cat("FAIL: length mismatch! pivot cols could not be extracted"
|
||||
, "\nExpected length:", length(meta_data_cols) - length(cols_to_omit)
|
||||
, "\nGot:",length(pivot_cols) )
|
||||
quit()
|
||||
}
|
||||
|
||||
expected_rows_npa_lf = nrow(npa_df_adults_clean) * (length(npa_df_adults_clean) - length(pivot_cols)); expected_rows_npa_lf
|
||||
|
||||
# using regex:
|
||||
npa_adults_lf = npa_df_adults_clean %>%
|
||||
tidyr::pivot_longer(-all_of(pivot_cols)
|
||||
, names_to = c("mediator", "sample_type", "timepoint")
|
||||
, names_pattern = "(.*)_(.*)([1-3]{1})"
|
||||
, values_to = "value")
|
||||
|
||||
if (
|
||||
(nrow(npa_adults_lf) == expected_rows_npa_lf) & (sum(table(is.na(npa_adults_lf$mediator))) == expected_rows_npa_lf)
|
||||
) {
|
||||
cat(paste0("PASS: long format data has correct no. of rows and NA in mediator:"
|
||||
, "\nNo. of rows: ", nrow(npa_adults_lf)
|
||||
, "\nNo. of cols: ", ncol(npa_adults_lf)))
|
||||
} else{
|
||||
cat(paste0("FAIL:long format data has unexpected no. of rows or NAs in mediator"
|
||||
, "\nExpected no. of rows: ", expected_rows_npa_lf
|
||||
, "\nGot: ", nrow(npa_adults_lf)
|
||||
, "\ncheck expected rows calculation!"))
|
||||
quit()
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# remove unnecessary variables
|
||||
rm(sam_regex, sam_regex_log_days, sam_cols, sam_cols_clean, sam_cols_i, sam_cols_to_extract, sam_cols_to_omit)
|
||||
rm(serum_regex, serum_regex_log_days, serum_cols, serum_cols_clean, serum_cols_i, serum_cols_to_extract, serum_cols_to_omit)
|
||||
rm(npa_regex, npa_regex_log_days, npa_cols, npa_cols_clean, npa_cols_i, npa_cols_to_extract, npa_cols_to_omit)
|
||||
rm(all_df)
|
||||
rm(colnames_check)
|
||||
rm(i, j, expected_cols, start, wf_cols, extra_cols, cols_to_omit)
|
||||
|
||||
# rm not_clean dfs
|
||||
rm(sam_df_adults, serum_df_adults, npa_df_adults)
|
||||
|
||||
# rm df containing non-adults
|
||||
rm(sam_df, serum_df, npa_df)
|
29
read_data.R
Normal file
29
read_data.R
Normal file
|
@ -0,0 +1,29 @@
|
|||
#!/usr/bin/Rscript
|
||||
getwd()
|
||||
setwd("~/git/mosaic_2020/")
|
||||
getwd()
|
||||
########################################################################
|
||||
# TASK: read data
|
||||
########################################################################
|
||||
# load libraries, packages and local imports
|
||||
source("Header_TT.R")
|
||||
########################################################################
|
||||
# TODO: turn this to a repo
|
||||
all_df <- read.csv("/home/pub/Work/MOSAIC/MOSAIC_from_work/MASTER/MOSAIC_2015_MASTER_Aki_stata_20150721/Mosaic_master_file_from_stata.csv"
|
||||
, fileEncoding='latin1')
|
||||
#hc_data<-
|
||||
# meta data columns
|
||||
meta_data_cols = c("mosaic", "gender", "age", "adult", "flustat", "type"
|
||||
, "obesity", "obese2", "height", "height_unit", "weight"
|
||||
, "weight_unit", "visual_est_bmi", "bmi_rating")
|
||||
|
||||
# check if these columns to select are present in the data
|
||||
meta_data_cols%in%colnames(all_df)
|
||||
all(meta_data_cols%in%colnames(all_df))
|
||||
|
||||
metadata_all = all_df[, meta_data_cols]
|
||||
########################################################################
|
||||
#
|
||||
#outdir =
|
||||
#outdir_plots =
|
||||
outdir_stats = paste0("~/git/mosaic_2020/output/stats/")
|
Loading…
Add table
Add a link
Reference in a new issue