initialised dir with data extraction script

This commit is contained in:
Tanushree Tunstall 2020-10-16 17:43:22 +01:00
commit 0dab1d5097
2 changed files with 394 additions and 0 deletions

View file

@ -0,0 +1,237 @@
all_df <- read.csv("/home/pub/Work/MOSAIC/MOSAIC_from_work/MASTER/MOSAIC_2015_MASTER_Aki_stata_20150721/Mosaic_master_file_from_stata.csv"
, fileEncoding='latin1') # as there is some weird encoding problems!
meta_data_cols = c("mosaic", "gender", "age", "adult", "flustat", "type"
, "obesity", "obese2", "height", "height_unit", "weight"
, "weight_unit", "visual_est_bmi", "bmi_rating")
# check if these columns to select are present in the data
meta_data_cols%in%colnames(all_df)
all(meta_data_cols%in%colnames(all_df))
metadata_all = all_df[, meta_data_cols]
# use regex
library(stringr)
#=========
# sam
#=========
sam_regex = regex(".*_sam[1-3]{1}$", ignore_case = T)
sam_cols_i = str_extract(colnames(all_df), sam_regex) # not boolean
#sam_cols_b = colnames(all_df)%in%sam_cols_i # boolean
sam_cols = colnames(all_df)[colnames(all_df)%in%sam_cols_i]
# this contains log columns + daysamp_samXX: omitting these
sam_regex_log_days = regex("log|day.*_sam[1-3]{1}$", ignore_case = T, perl = T)
sam_cols_to_omit = sam_cols[grepl(sam_regex_log_days, sam_cols)]; sam_cols_to_omit
sam_cols_clean = sam_cols[!sam_cols%in%sam_cols_to_omit]; sam_cols_clean
length(sam_cols_clean)
if( length(sam_cols_clean) == length(sam_cols) - length(sam_cols_to_omit) ){
cat("PASS: clean cols extracted"
, "\nNo. of clean SAM cols to extract:", length(sam_cols_clean))
}else{
cat("FAIL: length mismatch. Aborting further cols extraction"
, "Expected length:", length(sam_cols) - length(sam_cols_to_omit)
, "Got:", length(sam_cols_clean) )
}
sam_cols_to_extract = c(meta_data_cols, sam_cols_clean)
cat("Extracting SAM cols + metadata_cols")
if ( length(sam_cols_to_extract) == length(meta_data_cols) + length(sam_cols_clean) ){
cat("Extracing", length(sam_cols_to_extract), "columns for sam")
sam_df = all_df[, sam_cols_to_extract]
}else{
cat("FAIL: length mismatch"
, "Expeceted to extract:", length(meta_data_cols) + length(sam_cols_clean), "columns"
, "Got:", length(sam_cols_to_extract))
}
colnames_sam_df = colnames(sam_df); colnames_sam_df
#=========
# serum
#=========
serum_regex = regex(".*_serum[1-3]{1}$", ignore_case = T)
serum_cols_i = str_extract(colnames(all_df), serum_regex) # not boolean
#serum_cols_b = colnames(all_df)%in%serum_cols_i # boolean
serum_cols = colnames(all_df)[colnames(all_df)%in%serum_cols_i]
# this contains log columns + dayserump_serumXX: omitting these
serum_regex_log_days = regex("log|day.*_serum[1-3]{1}$", ignore_case = T, perl = T)
serum_cols_to_omit = serum_cols[grepl(serum_regex_log_days, serum_cols)]; serum_cols_to_omit
serum_cols_clean = serum_cols[!serum_cols%in%serum_cols_to_omit]; serum_cols_clean
length(serum_cols_clean)
if( length(serum_cols_clean) == length(serum_cols) - length(serum_cols_to_omit) ){
cat("PASS: clean cols extracted"
, "\nNo. of clean serum cols to extract:", length(serum_cols_clean))
}else{
cat("FAIL: length mismatch. Aborting further cols extraction"
, "Expected length:", length(serum_cols) - length(serum_cols_to_omit)
, "Got:", length(serum_cols_clean) )
}
serum_cols_to_extract = c(meta_data_cols, serum_cols_clean)
cat("Extracting SERUM cols + metadata_cols")
if ( length(serum_cols_to_extract) == length(meta_data_cols) + length(serum_cols_clean) ){
cat("Extracing", length(serum_cols_to_extract), "columns for serum")
serum_df = all_df[, serum_cols_to_extract]
}else{
cat("FAIL: length mismatch"
, "Expeceted to extract:", length(meta_data_cols) + length(serum_cols_clean), "columns"
, "Got:", length(serum_cols_to_extract))
}
colnames_serum_df = colnames(serum_df); colnames_serum_df
#=========
# npa
#=========
npa_regex = regex(".*_npa[1-3]{1}$", ignore_case = T)
npa_cols_i = str_extract(colnames(all_df), npa_regex) # not boolean
#npa_cols_b = colnames(all_df)%in%npa_cols_i # boolean
npa_cols = colnames(all_df)[colnames(all_df)%in%npa_cols_i]
# this contains log columns + daynpap_npaXX: omitting these
npa_regex_log_days = regex("log|day|vl_samptime|ct.*_npa[1-3]{1}$", ignore_case = T, perl = T)
npa_cols_to_omit = npa_cols[grepl(npa_regex_log_days, npa_cols)]; npa_cols_to_omit
npa_cols_clean = npa_cols[!npa_cols%in%npa_cols_to_omit]; npa_cols_clean
length(npa_cols_clean)
if( length(npa_cols_clean) == length(npa_cols) - length(npa_cols_to_omit) ){
cat("PASS: clean cols extracted"
, "\nNo. of clean npa cols to extract:", length(npa_cols_clean))
}else{
cat("FAIL: length mismatch. Aborting further cols extraction"
, "Expected length:", length(npa_cols) - length(npa_cols_to_omit)
, "Got:", length(npa_cols_clean) )
}
npa_cols_to_extract = c(meta_data_cols, npa_cols_clean)
cat("Extracting NPA cols + metadata_cols")
if ( length(npa_cols_to_extract) == length(meta_data_cols) + length(npa_cols_clean) ){
cat("Extracing", length(npa_cols_to_extract), "columns for npa")
npa_df = all_df[, npa_cols_to_extract]
}else{
cat("FAIL: length mismatch"
, "Expeceted to extract:", length(meta_data_cols) + length(npa_cols_clean), "columns"
, "Got:", length(npa_cols_to_extract))
}
colnames_npa_df = colnames(npa_df); colnames_npa_df
colnames_check = as.data.frame(cbind(colnames_sam_df, colnames_serum_df, colnames_npa_df))
tail(colnames_check)
# unmatched lengths
#colnames_check[117,1] <- NA
#colnames_check[117,2] <- NA
if ( ncol(sam_df) == ncol(serum_df) ){
start = ncol(sam_df)+1
extra_cols = start:ncol(npa_df)
}
colnames_check_f = colnames_check
tail(colnames_check_f)
for (i in extra_cols){
for (j in 1:2) {
cat("\ni:", i
,"\nj:", j)
colnames_check_f[i,j] <- NA
#colnames_check_f[i, j]< - NA
}
}
tail(colnames_check_f)
# write file?
##########################################################################
# LF data
##########################################################################
#=========
# lf data: sam
#=========
str(sam_df)
table(sam_df$obesity); table(sam_df$obese2)
sam_df_adults = sam_df[sam_df$adult == 1,]
cols_to_omit = c("adult", "flustat", "type", "obesity"
, "height", "height_unit", "weight", "weight_unit","visual_est_bmi", "bmi_rating")
pivot_cols = meta_data_cols
pivot_cols = meta_data_cols[!meta_data_cols%in%cols_to_omit];pivot_cols
if (length(pivot_cols) == length(meta_data_cols) - length(cols_to_omit)){
cat("PASS: pivot cols successfully extracted")
}else{
cat("FAIL: length mismatch! pivot cols could not be extracted"
, "\nExpected length:", length(meta_data_cols) - length(cols_to_omit)
, "\nGot:",length(pivot_cols) )
quit()
}
expected_rows_sam_lf = nrow(sam_df_adults) * (length(sam_df_adults) - length(pivot_cols)); expected_rows_sam_lf
# using regex:
sam_adults_lf = sam_df_adults %>%
tidyr::pivot_longer(-all_of(pivot_cols), names_to = c("mediator", "sample_type", "timepoint"),
names_pattern = "(.*)_(.*)([1-3]{1})",
values_to = "value")
if ((nrow(sam_lf) == expected_rows_sam_lf) & (sum(table(is.na(sam_adults_lf$mediator))) == expected_rows_sam_lf)) {
cat(paste0("PASS: long format data has correct no. of rows and NA in mediator:"
, "\nNo. of rows: ", nrow(sam_lf)
, "\nNo. of cols: ", ncol(sam_lf)))
} else{
cat(paste0("FAIL:long format data has unexpected no. of rows or NAs in mediator"
, "\nExpected no. of rows: ", expected_rows_sam_lf
, "\nGot: ", nrow(sam_lf)
, "\ncheck expected rows calculation!"))
quit()
}
# remove unnecessary variables
rm(sam_regex, sam_regex_log_days, sam_cols, sam_cols_b, sam_cols_clean, sam_cols_i, sam_cols_to_extract, sam_cols_to_omit)
rm(serum_regex, serum_regex_log_days, serum_cols, serum_cols_clean, serum_cols_i, serum_cols_to_extract, serum_cols_to_omit)
rm(npa_regex, npa_regex_log_days, npa_cols, npa_cols_clean, npa_cols_i, npa_cols_to_extract, npa_cols_to_omit)
rm(all_df)
rm(colnames_check)

157
mosaic_bmi_test.R Normal file
View file

@ -0,0 +1,157 @@
df = read.csv("/home/backup/MOSAIC/MEDIATOR_Data/master_file/Mosaic_master_file_from_stata.csv"
, fileEncoding = "latin1"
, sep = ",")
install.packages("readr")
library(readr)
df2 = read_csv("/home/backup/MOSAIC/MEDIATOR_Data/master_file/Mosaic_master_file_from_stata.csv"
, col_names = T)
foo = as.data.frame(colnames(df2))
head(foo)
head(df2)
# sam
sam_df = read.csv("/home/backup/MOSAIC/MEDIATOR_Data/mediator_data_analysis/SAM-oct-2015/SAM_adults_publication/SAM_only.csv"
, fileEncoding = "latin1")
# extract the 36 HC
# master file for HC: called "Mediators_for_HC.csv" in /home/tanu/MASTERS/Birkbeck/MSc_Project/MOSAIC/MEDIATOR_Data/master_file
all_healthy<- read.csv(file.choose())
serum_hc<- subset(all_healthy, Timepoint == "HC" & Sample == "Serum")
length(unique(serum_hc$MOSAIC)) # check:36
#=====================
# File for patients(comprehensive): called "Mosaic_master_file_from_stata.csv"
all_df <- read.csv("/home/pub/Work/MOSAIC/MOSAIC_from_work/MASTER/MOSAIC_2015_MASTER_Aki_stata_20150721/Mosaic_master_file_from_stata.csv"
, fileEncoding='latin1') # as there is some weird encoding problems!
meta_data_cols = c("mosaic", "gender", "age", "adult", "flustat", "type"
, "obesity", "obese2", "height", "height_unit", "weight"
, "weight_unit", "visual_est_bmi", "bmi_rating")
# check if these columns to select are present in the data
meta_data_cols%in%colnames(all_df)
all(meta_data_cols%in%colnames(all_df))
metadata_all = all_df[, meta_data_cols]
sam = all_df[, grepl("_sam1", colnames(all_df))]
# use regex
library(stringr)
serum_regex = regex(".*_serum[1-3]{1}$", ignore_case = T)
npa_regex = regex(".*_npa[1-3]{1}$", ignore_case = T)
#=========
# sam
#=========
sam_regex = regex(".*_sam[1-3]{1}$", ignore_case = T)
sam_cols_i = str_extract(colnames(all_df), sam_regex) # not boolean
sam_cols_b = colnames(all_df)%in%sam_cols_i # boolean
sam_cols = colnames(all_df)[colnames(all_df)%in%sam_cols_i]
# this contains log columns as well as daysamp_samXX
sam_regex_log_days = regex("log|day.*_sam[1-3]{1}$", ignore_case = T, perl =T)
sam_cols_to_omit = sam_cols[grepl(sam_regex_log_days, sam_cols)]; sam_cols_to_omit
sam_cols_clean = sam_cols[!sam_cols%in%sam_cols_to_omit]; sam_cols_clean
length(sam_cols_clean)
if( length(sam_cols_clean) == length(sam_cols) - length(sam_cols_to_omit) ){
cat("PASS: clean cols extracted"
, "\nNo. of clean cols to extract", length(sam_cols_clean))
}
sam_cols_to_extract = c(meta_data_cols, sam_cols_clean)
if ( length(sam_cols_to_extract) == length(meta_data_cols) + length(sam_cols_clean) ){
cat("Extracing", length(sam_cols_to_extract), "columns for sam")
sam_df = all_df[, sam_cols_to_extract]
}else{
cat("FAIL: length mismatch"
, "Expeceted to extract:", length(meta_data_cols) + length(sam_cols_clean), "columns"
, "Got:", length(sam_cols_to_extract))
}
colnames_sam_df = colnames(sam_df); colnames_sam_df
#=========
# serum
#=========
serum_cols_i = str_extract(colnames(all_df), serum_regex)
table(colnames(all_df)%in%serum_cols_i)
serum_cols = colnames(all_df)[colnames(all_df)%in%serum_cols_i]
serum_cols_to_extract = c(meta_data_cols, serum_cols)
if ( length(serum_cols_to_extract) == length(meta_data_cols) + length(serum_cols) ){
cat("Extracing", length(serum_cols_to_extract), "columns for serum")
serum_df = all_df[, serum_cols_to_extract]
}else{
cat("FAIL: length mismatch"
, "Expeceted to extract:", length(meta_data_cols) + length(serum_cols), "columns"
, "Got:", length(serum_cols_to_extract))
}
#=========
# npa
#=========
npa_cols_i= str_extract(colnames(all_df), npa_regex)
table(colnames(all_df)%in%npa_cols_i)
npa_cols = colnames(all_df)[colnames(all_df)%in%npa_cols_i]
npa_cols_to_extract = c(meta_data_cols, npa_cols)
if ( length(npa_cols_to_extract) == length(meta_data_cols) + length(npa_cols) ){
cat("Extracing", length(npa_cols_to_extract), "columns for NPA")
npa_df = all_df[, npa_cols_to_extract]
}else{
cat("FAIL: length mismatch"
, "Expeceted to extract:", length(meta_data_cols) + length(npa_cols), "columns"
, "Got:", length(npa_cols_to_extract))
}
#################################
#=========
# lf data: sam
#=========
sam_df_v2 = sam_df
sam_df = sam_df[1:10, ]
pivot_cols = meta_data_cols
expected_rows_sam_lf = nrow(sam_df) * (length(sam_df) - length(pivot_cols)); expected_rows_sam_lf
# using regex:
sam_lf = sam_df %>%
tidyr::pivot_longer(-all_of(pivot_cols), names_to = c("mediator", "sample_type", "timepoint"),
names_pattern = "(.*)_(.*)([1-3]{1})",
values_to = "value")
if ((nrow(sam_lf) == expected_rows_sam_lf) & (sum(table(is.na(sam_lf$mediator))) == expected_rows_sam_lf)) {
cat(paste0("PASS: long format data has correct no. of rows and NA in mediator:"
, "\nNo. of rows: ", nrow(sam_lf)
, "\nNo. of cols: ", ncol(sam_lf)))
} else{
cat(paste0("FAIL:long format data has unexpected no. of rows or NAs in mediator"
, "\nExpected no. of rows: ", expected_rows_sam_lf
, "\nGot: ", nrow(sam_lf)
, "\ncheck expected rows calculation!"))
quit()
}