modelBased_projections_errors.Rmd 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202
  1. ---
  2. title: "Axonal Length"
  3. author: "Sergio D?ez Hermano"
  4. date: '`r format(Sys.Date(),"%e de %B, %Y")`'
  5. output:
  6. html_document:
  7. highlight: tango
  8. toc: yes
  9. toc_depth: 4
  10. toc_float:
  11. collapsed: no
  12. ---
  13. ```{r setup, include=FALSE}
  14. require(knitr)
  15. # include this code chunk as-is to set options
  16. opts_chunk$set(comment = NA, prompt = FALSE, fig.height=5, fig.width=5, dpi=300, fig.align = "center", echo = TRUE, message = FALSE, warning = FALSE, cache=FALSE)
  17. Sys.setlocale("LC_TIME", "C")
  18. ```
  19. ##Load libraries and functions
  20. ```{r}
  21. library(R.matlab)
  22. library(lattice)
  23. library(dplyr)
  24. library(ggplot2)
  25. library(ggridges)
  26. library(hrbrthemes)
  27. library(viridis)
  28. library(ggpubr)
  29. library(mltools)
  30. library(data.table)
  31. library(caret)
  32. library(interactions)
  33. library(performance)
  34. library(MASS)
  35. library(geepack)
  36. library(pstools)
  37. library(MXM)
  38. library(rmcorr)
  39. library(multcomp)
  40. library(Hmisc)
  41. options(digits = 10)
  42. # https://rstudio-pubs-static.s3.amazonaws.com/297778_5fce298898d64c81a4127cf811a9d486.html
  43. #########################################
  44. # Deviation from empirical variance GEE #
  45. #########################################
  46. var.crit <- function(mymodel) sum(abs(mymodel[["geese"]]$vbeta.naiv - mymodel[["geese"]]$vbeta))
  47. ############################
  48. # Distances between planes #
  49. ############################
  50. distPlan <- function(mydata) {
  51. # Vector to store distances
  52. distPlanes <- vector()
  53. # Estimate distances between planes for each subject
  54. for (i in unique(mydata$id)) {
  55. LPvals <- mydata[mydata$id == i, 3]
  56. distPlanes <- c(distPlanes,
  57. (LPvals[1] - LPvals[2])/max(LPvals[1], LPvals[2]),
  58. (LPvals[1] - LPvals[3])/max(LPvals[1], LPvals[3]),
  59. (LPvals[2] - LPvals[3])/max(LPvals[2], LPvals[3]))
  60. }
  61. # Concatenate factor with substraction information
  62. numSubjects <- length(unique(mydata$id))
  63. subsFactor <- rep(c("XY - XZ", "XY - YZ", "YZ - XZ"), rep = numSubjects)
  64. distFrame <- data.frame(id = mydata$id,
  65. type = mydata$neurType,
  66. distPlanes = distPlanes,
  67. substraction = subsFactor)
  68. return(distFrame)
  69. }
  70. ##########################
  71. # Bootstrap coefficients #
  72. ##########################
  73. bootCoef <- function(mydata, axontype, projection, corrMat, nreps) {
  74. # Select data
  75. modData <- mydata[mydata$neurType == axontype & mydata$Plane == projection, ]
  76. # Fit the model
  77. modGee <- geeglm(LR ~ LP, data = modData, id = id, corstr = corrMat)
  78. # Ensure reproducibility
  79. set.seed(12345)
  80. # Set up a matrix to store the results (1 intercept + 1 predictor = 2)
  81. coefmat <- matrix(NA, nreps, 2)
  82. # We need the fitted values and the residuals from the model
  83. resids <- residuals(modGee)
  84. preds <- fitted(modGee)
  85. # Repeatedly generate bootstrapped responses
  86. for(i in 1:nreps) {
  87. booty <- preds + sample(resids, rep=TRUE)
  88. modGee2 <- update(modGee, booty ~ .)
  89. coefmat[i,] <- coef(modGee2)
  90. }
  91. # Create a dataframe to store predictors values
  92. colnames(coefmat) <- names(coef(modGee2))
  93. coefmat <- data.frame(coefmat)
  94. return(coefmat)
  95. }
  96. ##############################
  97. # Bootstrap coefficients gEE #
  98. ##############################
  99. # For models with interactions
  100. bootCoefint <- function(mydata, modForm, corrMat, nreps) {
  101. # Fit model
  102. modGee <- geeglm(formula(modForm), data = mydata, id = id, corstr = corrMat)
  103. # Ensure reproducibility
  104. # set.seed(12345)
  105. # Set up a matrix to store the results (1 intercept + 1 predictor = 2)
  106. coefmat <- matrix(NA, nreps, length(coef(modGee)))
  107. # We need the fitted values and the residuals from the model
  108. resids <- residuals(modGee)
  109. preds <- fitted(modGee)
  110. # Repeatedly generate bootstrapped responses
  111. for(i in 1:nreps) {
  112. booty <- preds + sample(resids, rep=TRUE)
  113. modGee2 <- update(modGee, booty ~ .)
  114. coefmat[i,] <- coef(modGee2)
  115. print(i)
  116. }
  117. # Create a dataframe to store predictors values
  118. colnames(coefmat) <- names(coef(modGee2))
  119. coefmat <- data.frame(coefmat)
  120. return(coefmat)
  121. }
  122. #############################
  123. # Bootstrap coefficients LM #
  124. #############################
  125. # For models with interactions
  126. bootCoefintLM <- function(mydata, modForm, nreps) {
  127. # Fit model
  128. modLM <- lm(formula(modForm), data = mydata)
  129. # Ensure reproducibility
  130. # set.seed(12345)
  131. # Set up a matrix to store the results (1 intercept + 1 predictor = 2)
  132. coefmat <- matrix(NA, nreps, length(coef(modLM)))
  133. # We need the fitted values and the residuals from the model
  134. resids <- residuals(modLM)
  135. preds <- fitted(modLM)
  136. # Repeatedly generate bootstrapped responses
  137. for(i in 1:nreps) {
  138. booty <- preds + sample(resids, rep=TRUE)
  139. modLM2 <- update(modLM, booty ~ .)
  140. coefmat[i,] <- coef(modLM2)
  141. print(i)
  142. }
  143. # Create a dataframe to store predictors values
  144. colnames(coefmat) <- names(coef(modLM2))
  145. coefmat <- data.frame(coefmat)
  146. return(coefmat)
  147. }
  148. ################
  149. # Bootstrap PI #
  150. ################
  151. the.replication <- function(reg, s, x_Np1 = 0){
  152. # Make bootstrap residuals
  153. ep.star <- sample(s, size=length(reg$residuals),replace=TRUE)
  154. # Make bootstrap Y
  155. y.star <- fitted(reg) + ep.star
  156. # Do bootstrap regression
  157. lp <- model.frame(reg)[,2]
  158. nt <- model.frame(reg)[,3]
  159. bs.reg <- lm(y.star ~ lp:nt - 1)
  160. # Create bootstrapped adjusted residuals
  161. bs.lev <- influence(bs.reg)$hat
  162. bs.s <- residuals(bs.reg)/sqrt(1-bs.lev)
  163. bs.s <- bs.s - mean(bs.s)
  164. # Calculate draw on prediction error
  165. # xb.xb <- coef(my.reg)["(Intercept)"] - coef(bs.reg)["(Intercept)"]
  166. xb.xb <- (coef(my.reg)[1] - coef(bs.reg)[1])*x_Np1
  167. return(unname(xb.xb + sample(bs.s, size=1)))
  168. }
  169. ############################
  170. # Bootstrap PI generalized #
  171. ############################
  172. the.replication.gen <- function(reg, s, axType, x_Np1 = 0){
  173. # Make bootstrap residuals
  174. ep.star <- sample(s, size=length(reg$residuals), replace=TRUE)
  175. # Make bootstrap Y
  176. y.star <- fitted(reg) + ep.star
  177. # Do bootstrap regression
  178. lp <- model.frame(reg)[,2]
  179. nt <- model.frame(reg)[,3]
  180. bs.reg <- lm(y.star ~ lp:nt - 1)
  181. # Create bootstrapped adjusted residuals
  182. bs.lev <- influence(bs.reg)$hat
  183. bs.s <- residuals(bs.reg)/sqrt(1-bs.lev)
  184. bs.s <- bs.s - mean(bs.s)
  185. # Calculate draw on prediction error
  186. # xb.xb <- coef(my.reg)["(Intercept)"] - coef(bs.reg)["(Intercept)"]
  187. xb.xb <- (coef(my.reg)[axType] - coef(bs.reg)[axType])*x_Np1
  188. return(unname(xb.xb + sample(bs.s, size=1)))
  189. }
  190. ```
  191. ##Load data
  192. ```{r}
  193. # Get file paths
  194. axonNames <- list.files(paste("ProyeccionData/", sep=""), full.names=T)
  195. # Load matlab file
  196. axonFiles <- lapply(axonNames, function(x) readMat(x))
  197. names(axonFiles) <- c("Type1", "Type2", "Type3", "Type4")
  198. # VAR NAMES
  199. # REAL_LENGTH, PROYECTION_LENGTH_XY, PROYECTION_LENGTH_XZ, PROYECTION_LENGTH_YZ
  200. # Extract data
  201. realLength <- lapply(axonFiles, function(x) x$REAL.LENGTH)
  202. planeXY <- lapply(axonFiles, function(x) x$PROYECTION.LENGTH.XY)
  203. planeXZ <- lapply(axonFiles, function(x) x$PROYECTION.LENGTH.XZ)
  204. planeYZ <- lapply(axonFiles, function(x) x$PROYECTION.LENGTH.YZ)
  205. # Get number of neurons per type
  206. numberTypes <- unlist(lapply(realLength, function(x) length(x)))
  207. # Store in data frames by plane
  208. xyData <- data.frame(id = 1:length(unlist(realLength)),
  209. LR = unlist(realLength), LP = unlist(planeXY), Plane = rep("XY", sum(numberTypes)),
  210. neurType = rep(c("Type1", "Type2", "Type3", "Type4"), numberTypes))
  211. xzData <- data.frame(id = 1:length(unlist(realLength)),
  212. LR = unlist(realLength), LP = unlist(planeXZ), Plane = rep("XZ", sum(numberTypes)),
  213. neurType = rep(c("Type1", "Type2", "Type3", "Type4"), numberTypes))
  214. yzData <- data.frame(id = 1:length(unlist(realLength)),
  215. LR = unlist(realLength), LP = unlist(planeYZ), Plane = rep("YZ", sum(numberTypes)),
  216. neurType = rep(c("Type1", "Type2", "Type3", "Type4"), numberTypes))
  217. # Merge into a single data frame and sort by id
  218. allData <- do.call("rbind", list(xyData, xzData, yzData))
  219. allData <- allData[order(allData$id),]
  220. # Add different number for every neuron-plane combination
  221. allData$NeurPlane <- c(rep(c(1,2,3), numberTypes[1]), rep(c(4,5,6), numberTypes[2]),
  222. rep(c(7,8,9), numberTypes[3]), rep(c(10,11,12), numberTypes[4]))
  223. allData$interact <-interaction(allData$neurType, allData$Plane, lex.order=T)
  224. # Create data frame w/o Type4
  225. dataNo4 <- allData[allData$neurType != "Type4", ]; dataNo4$neurType <- factor(dataNo4$neurType)
  226. # Data in short format
  227. dataShort <- data.frame(id = 1:length(unlist(realLength)),
  228. LR = unlist(realLength), XY = unlist(planeXY), XZ = unlist(planeXZ), YZ = unlist(planeYZ),
  229. neurType = rep(c("Type1", "Type2", "Type3", "Type4"), numberTypes))
  230. ```
  231. ##Error distributions for median(beta)
  232. ```{r}
  233. # Estimate error per axon type and plane using median
  234. bootNeur <- readRDS("bootCoefs_perNeuron_OLS.rds")
  235. colnames(bootNeur) <- c("Type1", "Type2", "Type3", "Type4")
  236. medianAlpha <- apply(bootNeur, 2, median)
  237. dataError <- allData
  238. dataError$alpha <- dataError$LR/dataError$LP
  239. dataError$neurError <- NA
  240. pos <- 1
  241. for (i in c("Type1", "Type2", "Type3", "Type4")) {
  242. for (j in c("XY", "XZ", "YZ")) {
  243. # Subset and divide alpha
  244. alphaVec <- dataError[dataError$neurType == i & dataError$Plane == j, 8]
  245. dataError[dataError$neurType == i & dataError$Plane == j, 9] <- 100*(1 - medianAlpha[pos]/alphaVec)
  246. }
  247. pos <- pos + 1
  248. }
  249. ```
  250. ###Plot
  251. ####Fixed error
  252. ```{r}
  253. # Plot
  254. png(filename=paste("errorDistributions_5error_ols.png", sep=""), type="cairo",
  255. units="in", width=8, height=10, pointsize=12,
  256. res=300)
  257. ggplot(dataError, aes(x = `neurError`, y = `interact`, fill = ifelse(..x.. >=-5 & ..x.. <=5, ">= -5", "< 5"))) +
  258. scale_fill_manual(values = c("gray70", "coral2"), name = NULL) +
  259. geom_density_ridges_gradient(alpha = 0.8) +
  260. theme_ridges() +
  261. theme(legend.position = "none") +
  262. xlab("% Error")
  263. dev.off()
  264. ```
  265. ####Fixed error ribbon
  266. ```{r, fig.height=10, fig.width=6}
  267. dataError$x <- dataError$neurError
  268. # Density plot -----------------------------------------------
  269. jj <- ggplot(dataError, aes(x = x, y = interact)) +
  270. stat_density_ridges(
  271. geom = "density_ridges_gradient") +
  272. theme_ridges() +
  273. theme(
  274. plot.margin = margin(t = 1, r = 1, b = 0.5, l = 0.5, "cm")
  275. )
  276. # Build ggplot and extract data
  277. d <- ggplot_build(jj)$data[[1]]
  278. # Add geom_ribbon for shaded area
  279. jj +
  280. geom_ribbon(
  281. data = transform(subset(d, x >= -5 & x <= 5), interact = group),
  282. aes(x, ymin = ymin, ymax = ymax, group = group),
  283. fill = "red",
  284. alpha = 0.5)
  285. ```
  286. ####Fixed quantiles
  287. ```{r, fig.height=10, fig.width=8}
  288. # https://stackoverflow.com/questions/49961582/how-shade-area-under-ggridges-curve/49971690#49971690
  289. png(filename=paste("errorDistributions_ols.png", sep=""), type="cairo",
  290. units="in", width=8, height=10, pointsize=12,
  291. res=300)
  292. ggplot(dataError, aes(x = neurError, y = interact, fill = factor(stat(quantile)))) +
  293. stat_density_ridges(
  294. geom = "density_ridges_gradient",
  295. calc_ecdf = TRUE,
  296. quantiles = c(0.125, 0.875),
  297. quantile_lines = FALSE) +
  298. scale_fill_manual(
  299. # name = "Probability", values = c("#A0A0A0A0", "#FF0000A0", "#A0A0A0A0"),
  300. name = "Probability", values = c("gray70", "coral2", "gray70"),
  301. labels = c("(0, 0.125]", "(0.125, 0.875]", "(0.875, 1]")) +
  302. theme_ridges() +
  303. theme(legend.position = "none") +
  304. xlab("% Error") +
  305. ylab("Axon:Projection combination")
  306. dev.off()
  307. ```
  308. ###Error vs LP
  309. ```{r}
  310. gplots::hist2d(dataError[, c(3,9)], nbins=50)
  311. ```
  312. ##Error distributions for all betas
  313. ```{r}
  314. # Estimate error per axon type and plane using median
  315. bootNeur <- readRDS("ProyeccionAnalysis/bootCoefs_perNeuron_OLS.rds")
  316. colnames(bootNeur) <- c("Type1", "Type2", "Type3", "Type4")
  317. dataAllbetas <- allData
  318. dataAllbetas$alpha <- dataAllbetas$LR/dataAllbetas$LP
  319. axType <- 1
  320. listPos <- 1
  321. listContour <- list()
  322. for (i in c("Type1", "Type2", "Type3", "Type4")) {
  323. for (j in c("XY", "XZ", "YZ")) {
  324. # Subset alpha
  325. alphaVec <- dataAllbetas[dataAllbetas$neurType == i & dataAllbetas$Plane == j, 8]
  326. # Create empty matrix
  327. matError <- matrix(ncol = 2000, nrow = numberTypes[axType])
  328. colCount <- 1
  329. for (h in bootNeur[, axType]) {
  330. # Estimate errors for every beta
  331. matError[, colCount] <- 100*(1 - h/alphaVec)
  332. colCount <- colCount + 1
  333. }
  334. colnames(matError) <- bootNeur[, axType]
  335. listContour[[listPos]] <- matError
  336. listPos <- listPos + 1
  337. }
  338. axType <- axType + 1
  339. }
  340. ```
  341. ###Reformat data
  342. ```{r}
  343. # Add names to list
  344. names(listContour) <- unique(dataAllbetas$interact)
  345. # Store as vectors
  346. fullVec <- lapply(listContour, function(x) as.vector(x))
  347. # Trasnform into data frames by neurType
  348. alphaType1 <- bootNeur[, 1]
  349. axType1 <- data.frame(errors = unlist(fullVec[c(1,2,3)]), Plane = rep(c("XY", "XZ", "YZ"), each = numberTypes[1]*2000),
  350. alphaBoot = rep(rep(alphaType1, each = numberTypes[1]), 3), axType = rep("Type1", 3*numberTypes[1]*2000))
  351. alphaType2 <- bootNeur[, 2]
  352. axType2 <- data.frame(errors = unlist(fullVec[c(4,5,6)]), Plane = rep(c("XY", "XZ", "YZ"), each = numberTypes[2]*2000),
  353. alphaBoot = rep(rep(alphaType2, each = numberTypes[2]), 3), axType = rep("Type2", 3*numberTypes[2]*2000))
  354. alphaType3 <- bootNeur[, 3]
  355. axType3 <- data.frame(errors = unlist(fullVec[c(7,8,9)]), Plane = rep(c("XY", "XZ", "YZ"), each = numberTypes[3]*2000),
  356. alphaBoot = rep(rep(alphaType3, each = numberTypes[3]), 3), axType = rep("Type3", 3*numberTypes[3]*2000))
  357. alphaType4 <- bootNeur[, 4]
  358. axType4 <- data.frame(errors = unlist(fullVec[c(10,11,12)]), Plane = rep(c("XY", "XZ", "YZ"), each = numberTypes[4]*2000),
  359. alphaBoot = rep(rep(alphaType4, each = numberTypes[4]), 3), axType = rep("Type4", 3*numberTypes[4]*2000))
  360. # Single data frame
  361. axData <- rbind(axType1, axType2, axType3, axType4)
  362. axData$axType <- factor(axData$axType)
  363. ```
  364. ###Plot
  365. ```{r, fig.width=15, fig.height=15}
  366. # https://www.r-graph-gallery.com/2d-density-plot-with-ggplot2.html#distr
  367. # Area + contour global
  368. g <- ggplot(axData, aes(x=alphaBoot, y=errors)) +
  369. stat_density_2d(aes(fill = after_stat(nlevel)), geom = "polygon") +
  370. theme_bw()
  371. png(filename=paste("errorDistributions_contour_ols.png", sep=""), type="cairo",
  372. units="in", width=10, height=12, pointsize=12,
  373. res=300)
  374. g + facet_grid(axType ~ Plane) + scale_fill_viridis_c()
  375. dev.off()
  376. # Area + contour by axType
  377. ax1 <- ggplot(axType1, aes(x=alphaBoot, y=errors)) +
  378. stat_density_2d(aes(fill = after_stat(nlevel)), geom = "polygon") +
  379. theme_bw()
  380. png(filename=paste("errorDistributions_contour_ax1_ols.png", sep=""), type="cairo",
  381. units="in", width=10, height=5, pointsize=12,
  382. res=300)
  383. ax1 + facet_grid( ~ Plane) + scale_fill_viridis_c()
  384. dev.off()
  385. ax2 <- ggplot(axType2, aes(x=alphaBoot, y=errors)) +
  386. stat_density_2d(aes(fill = after_stat(nlevel)), geom = "polygon") +
  387. theme_bw()
  388. png(filename=paste("errorDistributions_contour_ax2_ols.png", sep=""), type="cairo",
  389. units="in", width=10, height=5, pointsize=12,
  390. res=300)
  391. ax2 + facet_grid( ~ Plane) + scale_fill_viridis_c()
  392. dev.off()
  393. ax3 <- ggplot(axType3, aes(x=alphaBoot, y=errors)) +
  394. stat_density_2d(aes(fill = after_stat(nlevel)), geom = "polygon") +
  395. theme_bw()
  396. png(filename=paste("errorDistributions_contour_ax3_ols.png", sep=""), type="cairo",
  397. units="in", width=10, height=5, pointsize=12,
  398. res=300)
  399. ax3 + facet_grid( ~ Plane) + scale_fill_viridis_c()
  400. dev.off()
  401. ax4 <- ggplot(axType4, aes(x=alphaBoot, y=errors)) +
  402. stat_density_2d(aes(fill = after_stat(nlevel)), geom = "polygon") +
  403. theme_bw()
  404. png(filename=paste("errorDistributions_contour_ax4_ols.png", sep=""), type="cairo",
  405. units="in", width=10, height=5, pointsize=12,
  406. res=300)
  407. ax4 + facet_grid( ~ Plane) + scale_fill_viridis_c()
  408. dev.off()
  409. ```
  410. ##Bootstrap Pred Interval
  411. ###Example with mean LP
  412. ```{r}
  413. # OLS
  414. my.reg <- lm(LR ~ LP:neurType - 1, allData)
  415. summary(my.reg)
  416. # Mean LP per axonType
  417. meanLP <- aggregate(LP ~ neurType, allData, mean)
  418. meanLR <- aggregate(LR ~ neurType, allData, mean)
  419. # Predict LR for neurType1
  420. y.p <- coef(my.reg)[1]*meanLP[1,2]
  421. y.p
  422. # Create adjusted residuals
  423. leverage <- influence(my.reg)$hat
  424. my.s.resid <- residuals(my.reg)/sqrt(1-leverage)
  425. my.s.resid <- my.s.resid - mean(my.s.resid)
  426. reg <- my.reg
  427. s <- my.s.resid
  428. # Do bootstrap with 10,000 replications
  429. set.seed(123456)
  430. ep.draws <- replicate(n=100, the.replication(reg=my.reg, s=my.s.resid, x_Np1 = meanLP[1,2]))
  431. # Create prediction interval
  432. y.p + quantile(ep.draws, probs=c(0.025,0.975))
  433. # prediction interval using normal assumption
  434. predict(my.reg,
  435. newdata=data.frame(neurType = "Type1",
  436. LP = meanLP[1,2]),interval="prediction", level=0.95)
  437. ```
  438. ###Example with all LPs
  439. ```{r}
  440. # OLS
  441. my.reg <- lm(LR ~ LP:neurType - 1, allData)
  442. # List to store axon types
  443. axList <- list()
  444. # List to store random selection
  445. axRand <- list()
  446. axCount <- 1
  447. # 1/3 random observations per plane, no replacement
  448. numExtract <- floor(1/3*numberTypes)
  449. # Original data frame copy
  450. dataToBoot <- allData
  451. # Ensure reproducibility
  452. set.seed(123456)
  453. for (i in c("Type1", "Type2", "Type3", "Type4")) {
  454. # List to store planes
  455. planeList <- list()
  456. # List to store random selection
  457. planeRand <- list()
  458. planeCount <- 1
  459. for (j in c("XY", "XZ", "YZ")) {
  460. # Subset plane and axon type
  461. dataGroup <- dataToBoot[dataToBoot$neurType == i & dataToBoot$Plane == j, ]
  462. # Random sample of N=numExtract (variable per axon type)
  463. dataSub <- sample_n(dataGroup, numExtract[axCount], replace = FALSE)
  464. # Store selected for later
  465. planeRand[[planeCount]] <- dataSub
  466. # Remove from data so they can not be selected again
  467. # dataToBoot <- dataToBoot[! dataToBoot$id %in% dataSub$id, ]
  468. # List to store errors
  469. epList <- list()
  470. epCount <- 1
  471. for (k in dataSub[ , "LP"]) {
  472. # Predict LR for neurType axCount
  473. y.p <- coef(my.reg)[axCount]*k
  474. # Create adjusted residuals
  475. leverage <- influence(my.reg)$hat
  476. my.s.resid <- residuals(my.reg)/sqrt(1-leverage)
  477. my.s.resid <- my.s.resid - mean(my.s.resid)
  478. # Do bootstrap with 100 replications
  479. ep.draws <- replicate(n=100, the.replication.gen(reg = my.reg, s = my.s.resid, axCount, x_Np1 = k))
  480. # Store in list
  481. epList[[epCount]] <- ep.draws
  482. epCount <- epCount + 1
  483. print(epCount)
  484. }
  485. # Store in list
  486. planeList[[planeCount]] <- epList
  487. planeCount <- planeCount + 1
  488. }
  489. names(planeList) <- c("XY", "XZ", "YZ")
  490. names(planeRand) <- c("XY", "XZ", "YZ")
  491. # Store in list
  492. axList[[axCount]] <- planeList
  493. axRand[[axCount]] <- planeRand
  494. axCount <- axCount + 1
  495. # Renew original data for next axon type
  496. dataToBoot <- allData
  497. }
  498. names(axList) <- c("Type1", "Type2", "Type3", "Type4")
  499. names(axRand) <- c("Type1", "Type2", "Type3", "Type4")
  500. saveRDS(list(axList, axRand), "bootPI_full_Replacement.rds")
  501. ```
  502. ####Check convergence
  503. ```{r}
  504. # Load object
  505. bootCheck <- readRDS("bootPI_full.rds")
  506. # First list is prediction error, second is random subset
  507. axList <- bootCheck[[1]]
  508. axRand <- bootCheck[[2]]
  509. # One example
  510. # OLS
  511. my.reg <- lm(LR ~ LP:neurType - 1, allData)
  512. # Estimate and store in list
  513. lrpList <- list()
  514. lrpCount <- 1
  515. axCount <- 1
  516. for (i in c("Type1", "Type2", "Type3", "Type4")) {
  517. for (j in c("XY", "XZ", "YZ")) {
  518. # Extract LP and LR values
  519. lp <- axRand[[i]][[j]]$LP
  520. lr <- axRand[[i]][[j]]$LR
  521. for (k in 1:length(lp)) {
  522. # Predicted LR
  523. lr.p <- coef(my.reg)[axCount]*lp[k]
  524. # Extract absolute errors
  525. ae <- axList[[i]][[j]][[k]]
  526. # Empty vectors for storage
  527. lrp.Low <- numeric()
  528. lrp.Up <- numeric()
  529. # Estimate PI95 for boot n=10...100
  530. for (l in 4:100) {
  531. lrp <- lr.p + quantile(ae[1:l], probs = c(0.025,0.975))
  532. # Store in vectors
  533. lrp.Low <- c(lrp.Low, lrp[1])
  534. lrp.Up <- c(lrp.Up, lrp[2])
  535. }
  536. # Store in data frame
  537. lrpFrame <- data.frame(nboot = 4:100, lowLim = lrp.Low, upLim = lrp.Up, LR = rep(lr[k], length(4:100)))
  538. # Store in list
  539. lrpList[[lrpCount]] <- lrpFrame
  540. lrpCount <- lrpCount + 1
  541. }
  542. }
  543. axCount <- axCount + 1
  544. }
  545. ```
  546. #####Reformat
  547. ```{r}
  548. # 1/3 random observations per plane, no replacement
  549. numExtract <- floor(1/3*numberTypes)
  550. checkFrame <- do.call(rbind, lrpList)
  551. checkFrame$neurType <- rep(c("Type1", "Type2", "Type3", "Type4"), times = numExtract*(length(4:100))*3)
  552. checkFrame$Plane <- c(rep(c("XY", "XZ", "YZ"), each = numExtract[1]*length(4:100)),
  553. rep(c("XY", "XZ", "YZ"), each = numExtract[2]*length(4:100)),
  554. rep(c("XY", "XZ", "YZ"), each = numExtract[3]*length(4:100)),
  555. rep(c("XY", "XZ", "YZ"), each = numExtract[4]*length(4:100)))
  556. lowMean <- aggregate(lowLim ~ neurType + Plane + nboot, checkFrame, mean)
  557. upMean <- aggregate(upLim ~ neurType + Plane + nboot, checkFrame, mean)
  558. LRmean <- aggregate(LR ~ neurType + Plane + nboot, checkFrame, mean)
  559. cumError <- with(lowMean[lowMean$neurType == "Type1" & lowMean$Plane == "XY", ],
  560. abs((lowLim[-1] - lowLim[-length(lowLim)])/lowLim[-length(lowLim)])*100)
  561. ```
  562. #####Plot
  563. ```{r, fig.height=20, fig.width=30}
  564. # Separate plots
  565. png(filename=paste("convergence_bootPI_separate.png", sep=""), type="cairo",
  566. units="in", width=30, height=20, pointsize=12,
  567. res=300)
  568. par(mfrow=c(4,6), cex = 1.1)
  569. for (i in c("Type1", "Type2", "Type3", "Type4")) {
  570. for (j in c("XY", "XZ", "YZ")) {
  571. plot(lowLim ~ nboot, lowMean[lowMean$neurType == i & lowMean$Plane == j, ], type = "l", lwd = 2)
  572. plot(upLim ~ nboot, upMean[upMean$neurType == i & upMean$Plane == j, ], type = "l", lwd = 2)
  573. }
  574. }
  575. dev.off()
  576. # Same plot
  577. png(filename=paste("convergence_bootPI_together.png", sep=""), type="cairo",
  578. units="in", width=15, height=20, pointsize=12,
  579. res=300)
  580. par(mfrow=c(4,3), cex = 1.1)
  581. for (i in c("Type1", "Type2", "Type3", "Type4")) {
  582. for (j in c("XY", "XZ", "YZ")) {
  583. minY <- min(lowMean[lowMean$neurType == i & lowMean$Plane == j, "lowLim"])
  584. maxY <- max(upMean[upMean$neurType == i & upMean$Plane == j, "upLim"])
  585. plot(lowLim ~ nboot, lowMean[lowMean$neurType == i & lowMean$Plane == j, ], type = "l", lwd = 2, ylim = c(minY, maxY))
  586. lines(upLim ~ nboot, upMean[upMean$neurType == i & upMean$Plane == j, ], type = "l", lwd = 2)
  587. # lines(LR ~ nboot, LRmean[LRmean$neurType == i & LRmean$Plane == j, ], type = "l", lwd = 2, col = "red")
  588. }
  589. }
  590. dev.off()
  591. # Diff plot
  592. png(filename=paste("convergence_bootPI_Diff.png", sep=""), type="cairo",
  593. units="in", width=15, height=20, pointsize=12,
  594. res=300)
  595. par(mfrow=c(4,3), cex = 1.1)
  596. for (i in c("Type1", "Type2", "Type3", "Type4")) {
  597. for (j in c("XY", "XZ", "YZ")) {
  598. minY <- min(lowMean[lowMean$neurType == i & lowMean$Plane == j, "lowLim"])
  599. maxY <- max(upMean[upMean$neurType == i & upMean$Plane == j, "upLim"])
  600. diflu <- upMean[upMean$neurType == i & upMean$Plane == j, "upLim"] - lowMean[lowMean$neurType == i & lowMean$Plane == j, "lowLim"]
  601. plot(4:100, diflu, type = "l", col = "red", lwd = 2)
  602. }
  603. }
  604. dev.off()
  605. # Same plot random intervals
  606. for (m in 1:4) {
  607. png(filename=paste("convergence_bootPI_together_example_rand", m, ".png", sep=""), type="cairo",
  608. units="in", width=15, height=20, pointsize=12,
  609. res=300)
  610. par(mfrow=c(4,3), cex = 1.1)
  611. for (i in c("Type1", "Type2", "Type3", "Type4")) {
  612. for (j in c("XY", "XZ", "YZ")) {
  613. subData <- checkFrame[checkFrame$neurType == i & checkFrame$Plane == j, ]
  614. randLR <- sample(subData[, "LR"], 1)
  615. # randLR <- sample(subData[which(subData$lowLim > subData$LR), "LR"], 1)
  616. minY <- min(subData[subData$LR == randLR, "lowLim"])
  617. maxY <- max(subData[subData$LR == randLR, "upLim"])
  618. plot(lowLim ~ nboot, subData[subData$LR == randLR, ], type = "l", lwd = 2, ylim = c(minY, maxY))
  619. lines(upLim ~ nboot, subData[subData$LR == randLR, ], type = "l", lwd = 2)
  620. lines(LR ~ nboot, subData[subData$LR == randLR, ], type = "l", lwd = 2, col = "red")
  621. }
  622. }
  623. dev.off()
  624. }
  625. # Separate with lines
  626. png(filename=paste("convergence_bootPI_separate_FULL.png", sep=""), type="cairo",
  627. units="in", width=30, height=20, pointsize=12,
  628. res=300)
  629. par(mfrow=c(4,6), cex = 1.1)
  630. for (i in c("Type1", "Type2", "Type3", "Type4")) {
  631. for (j in c("XY", "XZ", "YZ")) {
  632. plot(lowLim ~ nboot, checkFrame[checkFrame$neurType == i & checkFrame$Plane == j, ], type = "l", col = "lightgray")
  633. lines(lowLim ~ nboot, lowMean[lowMean$neurType == i & lowMean$Plane == j, ], type = "l", lwd = 2)
  634. plot(upLim ~ nboot, checkFrame[checkFrame$neurType == i & checkFrame$Plane == j, ], type = "l", col = "lightgray")
  635. lines(upLim ~ nboot, upMean[upMean$neurType == i & upMean$Plane == j, ], type = "l", lwd = 2)
  636. }
  637. }
  638. dev.off()
  639. ```
  640. ####Estimate porcentual errors
  641. ```{r, fig.width=8, fig.height=8}
  642. # 1/3 random observations per plane, no replacement
  643. numExtract <- floor(1/3*numberTypes)
  644. # Load object
  645. bootPI <- readRDS("ProyeccionAnalysis/bootPI_full_Replacement_TRUE.rds")
  646. # First list is prediction error, second is random subset
  647. axList <- bootPI[[1]]
  648. axRand <- bootPI[[2]]
  649. # Estimate and store in list
  650. peList <- list()
  651. peCount <- 1
  652. for (i in c("Type1", "Type2", "Type3", "Type4")) {
  653. for (j in c("XY", "XZ", "YZ")) {
  654. # Extract LR values
  655. lr <- axRand[[i]][[j]]$LR
  656. for (k in 1:length(lr)) {
  657. # Divide absolute errors by LR to get porcentual errors
  658. pe <- axList[[i]][[j]][[k]] / lr[k]
  659. # Store in list
  660. peList[[peCount]] <- pe*100
  661. peCount <-peCount + 1
  662. }
  663. }
  664. }
  665. # Reformat as data frame
  666. # for bootPI
  667. # peFrame <- data.frame(pe = unlist(peList),
  668. # neurType = rep(c("Type1", "Type2", "Type3", "Type4"), each = 20*100*3),
  669. # Plane = rep(rep(c("XY", "XZ", "YZ"), each = 20*100), 4))
  670. # for bootPI_full
  671. peFrame <- data.frame(pe = unlist(peList),
  672. neurType = rep(c("Type1", "Type2", "Type3", "Type4"), times = numExtract*100*3),
  673. Plane = c(rep(c("XY", "XZ", "YZ"), each = numExtract[1]*100),
  674. rep(c("XY", "XZ", "YZ"), each = numExtract[2]*100),
  675. rep(c("XY", "XZ", "YZ"), each = numExtract[3]*100),
  676. rep(c("XY", "XZ", "YZ"), each = numExtract[4]*100)))
  677. peFrame$interact <- interaction(peFrame$neurType, peFrame$Plane, lex.order=T)
  678. ```
  679. #####Plot density and histogram
  680. ```{r}
  681. # Plot
  682. # peGroup <- peFrame %>% group_by(interact)
  683. # peSample <- sample_n(peGroup, 100)
  684. png(filename=paste("prederrorDistributions_ols_FULLsample_Repeat_TRUE.png", sep=""), type="cairo",
  685. units="in", width=8, height=10, pointsize=12,
  686. res=300)
  687. (p <- ggplot(peFrame, aes(x = `pe`, y = `interact`, fill = ifelse(..x.. >=-5 & ..x.. <=5, ">= -5", "< 5"))) +
  688. scale_fill_manual(values = c("gray70", "coral2"), name = NULL) +
  689. geom_density_ridges_gradient(alpha = 0.8) +
  690. theme_ridges() +
  691. theme(legend.position = "none") +
  692. xlab("% Error")) +
  693. xlim(c(-40,40))
  694. dev.off()
  695. png(filename=paste("prederrorHistogram_ols_FULLsample_Repeat_TRUE.png", sep=""), type="cairo",
  696. units="in", width=8, height=10, pointsize=12,
  697. res=300)
  698. # Histogram
  699. # nint 300 bootPI
  700. # nint 500 bootPI_full
  701. # nint 400 bootPI_full_replace
  702. (h <- histogram( ~ pe | Plane + neurType, peFrame, nint = 400, type ="density", xlim = c(-50,50)))
  703. dev.off()
  704. ```
  705. #####Plot errors vs cumul probability
  706. ```{r, fig.height=5, fig.width=20}
  707. # Inverse quantile
  708. quantInv <- function(distr, value) ecdf(distr)(value)
  709. #ecdf plot example
  710. # plot(ecdf(peFrame[peFrame$interact == "Type1.XY", "pe"]))
  711. # lines(ecdf(peFrame[peFrame$interact == "Type1.XZ", "pe"]), col = "red")
  712. # lines(ecdf(peFrame[peFrame$interact == "Type1.YZ", "pe"]), col = "blue")
  713. probList <- list()
  714. binProb <- 0.5
  715. probSeq <- seq(binProb, 100, binProb)
  716. for (i in unique(peFrame$interact)) {
  717. dataProb <- peFrame[peFrame$interact == i, "pe"]
  718. probVec <- numeric()
  719. for (j in probSeq) {
  720. errProb <- quantInv(dataProb, j) - quantInv(dataProb, -j)
  721. probVec <- c(probVec, errProb)
  722. }
  723. probList[[i]] <- probVec
  724. }
  725. # Plot with limited axis
  726. setwd("ProyeccionAnalysis/")
  727. png(filename=paste("errorProbabilityCum_bin", binProb, ".png", sep=""), type="cairo",
  728. units="in", width=20, height=5, pointsize=12,
  729. res=300)
  730. par(mfrow = c(1,4))
  731. axCount <- 1
  732. for (i in seq(1,10,3)) {
  733. plot(probSeq, probList[[i]], type = "l", col = "green", lwd = 1.5, ylim = c(0,1), xlim = c(0.5, 40),
  734. ylab = "Cumulative probability", xlab = "% Error", main = paste("Axon Type", axCount))
  735. lines(probSeq, probList[[i + 1]], col = "red", lwd = 1.5)
  736. lines(probSeq, probList[[i + 2]], col = "blue", lwd = 1.5)
  737. abline(v = 5, lty = "dashed", col = "gray")
  738. abline(h = 0.95, lty = "dashed", col = "gray" )
  739. legend("bottomright", c("XY", "XZ", "YZ"), lwd = 1.5, col = c("green", "red", "blue"))
  740. axCount <- axCount + 1
  741. }
  742. dev.off()
  743. # Plot with free axis
  744. png(filename=paste("errorProbabilityCum_bin", binProb, "_freeAxis.png", sep=""), type="cairo",
  745. units="in", width=20, height=5, pointsize=12,
  746. res=300)
  747. par(mfrow = c(1,4))
  748. axCount <- 1
  749. for (i in seq(1,10,3)) {
  750. plot(probSeq, probList[[i]], type = "l", col = "green", lwd = 1.5,
  751. ylab = "Cumulative probability", xlab = "% Error", main = paste("Axon Type", axCount))
  752. lines(probSeq, probList[[i + 1]], col = "red", lwd = 1.5)
  753. lines(probSeq, probList[[i + 2]], col = "blue", lwd = 1.5)
  754. abline(v = 5, lty = "dashed", col = "gray")
  755. abline(h = 0.95, lty = "dashed", col = "gray" )
  756. legend("bottomright", c("XY", "XZ", "YZ"), lwd = 1.5, col = c("green", "red", "blue"))
  757. axCount <- axCount + 1
  758. }
  759. dev.off()
  760. ```
  761. #####Plot errors vs inverse cumul probability
  762. ```{r, fig.height=5, fig.width=20}
  763. # Inverse quantile
  764. quantInv <- function(distr, value) ecdf(distr)(value)
  765. #ecdf plot example
  766. # plot(ecdf(peFrame[peFrame$interact == "Type1.XY", "pe"]))
  767. # lines(ecdf(peFrame[peFrame$interact == "Type1.XZ", "pe"]), col = "red")
  768. # lines(ecdf(peFrame[peFrame$interact == "Type1.YZ", "pe"]), col = "blue")
  769. probList <- list()
  770. binProb <- 0.5
  771. probSeq <- seq(binProb, 100, binProb)
  772. for (i in unique(peFrame$interact)) {
  773. dataProb <- peFrame[peFrame$interact == i, "pe"]
  774. probVec <- numeric()
  775. for (j in probSeq) {
  776. errProb <- 1 - (quantInv(dataProb, j) - quantInv(dataProb, -j))
  777. probVec <- c(probVec, errProb)
  778. }
  779. probList[[i]] <- probVec
  780. }
  781. # Plot with limited axis
  782. setwd("ProyeccionAnalysis/")
  783. png(filename=paste("errorProbabilityCumInv_bin", binProb, ".png", sep=""), type="cairo",
  784. units="in", width=20, height=5, pointsize=12,
  785. res=300)
  786. par(mfrow = c(1,4))
  787. axCount <- 1
  788. for (i in seq(1,10,3)) {
  789. plot(probSeq, probList[[i]], type = "l", col = "green", lwd = 1.5, ylim = c(0,1), xlim = c(0.5, 40),
  790. ylab = "Cumulative probability", xlab = "% Error", main = paste("Axon Type", axCount))
  791. lines(probSeq, probList[[i + 1]], col = "red", lwd = 1.5)
  792. lines(probSeq, probList[[i + 2]], col = "blue", lwd = 1.5)
  793. abline(v = 5, lty = "dashed", col = "gray")
  794. abline(h = 0.05, lty = "dashed", col = "gray" )
  795. legend("bottomright", c("XY", "XZ", "YZ"), lwd = 1.5, col = c("green", "red", "blue"))
  796. axCount <- axCount + 1
  797. }
  798. dev.off()
  799. # Plot with free axis
  800. png(filename=paste("errorProbabilityCumInv_bin", binProb, "_freeAxis.png", sep=""), type="cairo",
  801. units="in", width=20, height=5, pointsize=12,
  802. res=300)
  803. par(mfrow = c(1,4))
  804. axCount <- 1
  805. for (i in seq(1,10,3)) {
  806. plot(probSeq, probList[[i]], type = "l", col = "green", lwd = 1.5,
  807. ylab = "Cumulative probability", xlab = "% Error", main = paste("Axon Type", axCount))
  808. lines(probSeq, probList[[i + 1]], col = "red", lwd = 1.5)
  809. lines(probSeq, probList[[i + 2]], col = "blue", lwd = 1.5)
  810. abline(v = 5, lty = "dashed", col = "gray")
  811. abline(h = 0.05, lty = "dashed", col = "gray" )
  812. legend("bottomright", c("XY", "XZ", "YZ"), lwd = 1.5, col = c("green", "red", "blue"))
  813. axCount <- axCount + 1
  814. }
  815. dev.off()
  816. ```
  817. #####Plot errors vs probability
  818. ```{r, fig.height=5, fig.width=20}
  819. # Inverse quantile
  820. quantInv <- function(distr, value) ecdf(distr)(value)
  821. #ecdf plot example
  822. # plot(ecdf(peFrame[peFrame$interact == "Type1.XY", "pe"]))
  823. # lines(ecdf(peFrame[peFrame$interact == "Type1.XZ", "pe"]), col = "red")
  824. # lines(ecdf(peFrame[peFrame$interact == "Type1.YZ", "pe"]), col = "blue")
  825. probList <- list()
  826. binProb <- 1
  827. probSeq <- seq(0, 100, binProb)
  828. intProb <- 0.1
  829. for (i in unique(peFrame$interact)) {
  830. dataProb <- peFrame[peFrame$interact == i, "pe"]
  831. probVec <- numeric()
  832. for (j in probSeq) {
  833. errProbPos <- quantInv(dataProb, j + intProb) - quantInv(dataProb, j - intProb)
  834. errProbNeg <- quantInv(dataProb, -j + intProb) - quantInv(dataProb, - j - intProb)
  835. probVec <- c(probVec, errProbPos + errProbNeg)
  836. }
  837. probList[[i]] <- probVec
  838. }
  839. # Plot with limited axis
  840. setwd("ProyeccionAnalysis/")
  841. png(filename=paste("errorProbability_bin", binProb, ".png", sep=""), type="cairo",
  842. units="in", width=20, height=5, pointsize=12,
  843. res=300)
  844. par(mfrow = c(1,4))
  845. axCount <- 1
  846. for (i in seq(1,10,3)) {
  847. plot(probSeq, probList[[i]], type = "l", col = "green", lwd = 1.5, ylim = c(0,0.07), xlim = c(0.5, 40),
  848. ylab = "Probability", xlab = "% Error", main = paste("Axon Type", axCount))
  849. lines(probSeq, probList[[i + 1]], col = "red", lwd = 1.5)
  850. lines(probSeq, probList[[i + 2]], col = "blue", lwd = 1.5)
  851. abline(v = 5, lty = "dashed", col = "gray")
  852. legend("bottomright", c("XY", "XZ", "YZ"), lwd = 1.5, col = c("green", "red", "blue"))
  853. axCount <- axCount + 1
  854. }
  855. dev.off()
  856. # Plot with free axis
  857. png(filename=paste("errorProbability_bin", binProb, "_freeAxis.png", sep=""), type="cairo",
  858. units="in", width=20, height=5, pointsize=12,
  859. res=300)
  860. par(mfrow = c(1,4))
  861. axCount <- 1
  862. for (i in seq(1,10,3)) {
  863. plot(probSeq, probList[[i]], type = "l", col = "green", lwd = 1.5,
  864. ylab = "Probability", xlab = "% Error", main = paste("Axon Type", axCount))
  865. lines(probSeq, probList[[i + 1]], col = "red", lwd = 1.5)
  866. lines(probSeq, probList[[i + 2]], col = "blue", lwd = 1.5)
  867. abline(v = 5, lty = "dashed", col = "gray")
  868. legend("bottomright", c("XY", "XZ", "YZ"), lwd = 1.5, col = c("green", "red", "blue"))
  869. axCount <- axCount + 1
  870. }
  871. dev.off()
  872. ```