highspeed-analysis-repetition.Rmd 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. ## Repetition trials
  2. ### Preparation
  3. #### Initialization
  4. We load the data and relevant functions:
  5. ```{r, warning=FALSE, message=FALSE}
  6. # find the path to the root of this project:
  7. if (!requireNamespace("here")) install.packages("here")
  8. if ( basename(here::here()) == "highspeed" ) {
  9. path_root = here::here("highspeed-analysis")
  10. } else {
  11. path_root = here::here()
  12. }
  13. # create a list of participants to exclude based on behavioral performance:
  14. sub_exclude <- c("sub-24", "sub-31", "sub-37", "sub-40")
  15. ```
  16. The next step is only evaluated during manual execution:
  17. ```{r, eval=FALSE}
  18. # source all relevant functions from the setup R script:
  19. source(file.path(path_root, "code", "highspeed-analysis-setup.R"))
  20. ```
  21. #### Prepare events and decoding data
  22. We prepare the behavioral events data and the decoding data of repetition trials:
  23. ```{r}
  24. # create a subset of the events data table only including repetition task events:
  25. dt_events_rep = dt_events %>%
  26. filter(condition == "repetition" & trial_type == "stimulus") %>%
  27. setDT(.)
  28. # create a data table containing all classifier predictions on repetition trials:
  29. dt_pred_rep = dt_pred %>%
  30. # create a subset of the decoding data only including the repetition task data:
  31. filter(condition == "repetition" & class != "other" & mask == "cv" & stim != "cue") %>%
  32. setDT(.) %>%
  33. # filter excluded participants:
  34. filter(!(id %in% sub_exclude)) %>%
  35. setDT(.) %>%
  36. # add the serial position, change trial and target cue to the repetition data table:
  37. .[, c("position", "change", "trial_cue") := get_pos(.SD, dt_events_rep),
  38. by = .(id, trial, class), .SDcols = c("id", "trial", "class")] %>%
  39. # add a counter for the number of change trials in the repetition data:
  40. .[, by = .(id, classifier, class, change), ":=" (
  41. trial_change = rep(1:length(unique(trial)), each = max(seq_tr))
  42. )] %>%
  43. # add random position indices for out-of-class stimuli:
  44. .[is.na(position), by = .(id, classification, trial), ":=" (
  45. position = rep(permute(3:5), each = max(seq_tr))
  46. )] %>%
  47. # check if there are any NA values:
  48. verify(!is.na(position)) %>%
  49. # create a new variable containing the position label by copying the position:
  50. mutate(position_label = position) %>%
  51. # re-label all positions > 2 to "other":
  52. transform(position_label = ifelse(position_label > 2, "none", position_label)) %>%
  53. setDT(.) %>%
  54. # create a new variable counting the occurrence of each event position:
  55. .[position != 2, ":=" (occurence = change - 1)] %>%
  56. .[position == 2 & change <= 9, ":=" (occurence = max(change) + 1 - change)] %>%
  57. .[position == 2 & change == 16, ":=" (occurence = max(change) + 1 - change)]
  58. ```
  59. ### Probability time-courses
  60. First, we average across trials for each factor grouping and calculate
  61. the mean classification probability for each serial event. Note, that
  62. this approach averages across the specific stimuli used in any given
  63. trial as the specific stimulus identities are not considered important here.
  64. We verify if the correct number of trials was used for the calculations.
  65. Next, we average across participants. We calculate the average probability
  66. of each serial event and also calculate the standard error of them mean.
  67. ```{r}
  68. dt_pred_rep_timecourses = dt_pred_rep %>%
  69. # calculate the mean probability for every repetition condition and TR
  70. .[, by = .(classification, id, seq_tr, change, position), .(
  71. num_events = .N,
  72. mean_probability = mean(probability * 100)
  73. )] %>% verify(num_events == 5) %>%
  74. # average mean probability across participants:
  75. .[, by = .(classification, seq_tr, change, position), .(
  76. num_sub = .N,
  77. mean_probability = mean(mean_probability),
  78. sem_upper = mean(mean_probability) + (sd(mean_probability)/sqrt(.N)),
  79. sem_lower = mean(mean_probability) - (sd(mean_probability)/sqrt(.N))
  80. )] %>%
  81. verify(all(num_sub == 36))
  82. ```
  83. We define the colors used for plotting as well as the facet labels:
  84. ```{r}
  85. # define colors for plotting:
  86. colors_inseq = colorRampPalette(c("dodgerblue", "red"), space = "Lab")(2)
  87. colors_outseq = colorRampPalette(c("gray70", "gray90"), alpha = TRUE)(3)
  88. colors = c(colors_inseq, colors_outseq)
  89. # change the facet labels such that it shows the time-point of the change from
  90. # the first to the second unique stimulus:
  91. facet_labels_new = c(sort(unique(paste0(dt_pred_rep_timecourses$change[
  92. dt_pred_rep_timecourses$change != 16]-1, "/", 10-dt_pred_rep_timecourses$change[
  93. dt_pred_rep_timecourses$change != 16]))), unique(paste0(dt_pred_rep_timecourses$change[
  94. dt_pred_rep_timecourses$change == 16]-1, "/1")))
  95. facet_labels_new[1] = paste("short", sprintf('\u2192'), "long")
  96. facet_labels_new[1] = "Forward interference"
  97. facet_labels_new[length(facet_labels_new)-1] = "Backward interference"
  98. facet_labels_old = as.character(sort(unique(dt_pred_rep_timecourses$change)))
  99. names(facet_labels_new) = facet_labels_old
  100. ```
  101. #### Figure 4a
  102. We plot the probability time courses:
  103. ```{r, echo=TRUE}
  104. trs = c(2, 7)
  105. plot_rep_probas <- function(data, xmin = 1, xmax = 13) {
  106. # reduced the data frame to plot rectangles (see below):
  107. data_factor_reduced = subset(data, position == 1 & seq_tr == 1)
  108. ggplot(data, aes(x = as.numeric(seq_tr), y = as.numeric(mean_probability),
  109. color = as.factor(position), fill = as.factor(position))) +
  110. facet_wrap(~ as.factor(change), ncol = 3, labeller = as_labeller(facet_labels_new)) +
  111. geom_rect(data = data_factor_reduced, fill = "gray", alpha = 0.15, color = NA,
  112. aes(xmin = xmin, xmax = xmax, ymin = 0, ymax = 60)) +
  113. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  114. geom_line() +
  115. xlab("Time from sequence onset (TRs)") + ylab("Probability (%)") +
  116. annotate("text", x = 13, y = 0, label = "1 TR = 1.25 s",
  117. hjust = 1, size = rel(2)) +
  118. scale_colour_manual(name = "Serial event", values = colors) +
  119. scale_fill_manual(name = "Serial event", values = colors) +
  120. scale_x_continuous(labels = label_fill(seq(1,13,1), mod = 4), breaks = seq(1,13,1)) +
  121. coord_capped_cart(left = "both", bottom = "both", ylim = c(0,60)) +
  122. theme(strip.text.x = element_text(margin = margin(b = 3, t = 2))) +
  123. guides(color = guide_legend(nrow = 1)) +
  124. theme(legend.position = "bottom", legend.direction = "horizontal",
  125. legend.justification = "center", legend.margin = margin(0,0,0,0),
  126. legend.box.margin = margin(t = -5, r = 0, b = 10, l = 0)) +
  127. theme(panel.background = element_blank()) +
  128. theme(axis.line = element_line(colour = "black"),
  129. panel.grid.major = element_blank(),
  130. panel.grid.minor = element_blank(),
  131. panel.border = element_blank(),
  132. panel.background = element_blank())
  133. }
  134. plot_data = dt_pred_rep_timecourses %>% filter(classification == "ovr" & change %in% c(2,9))
  135. fig_a = plot_rep_probas(data = plot_data, xmin = trs[1] - 0.5, xmax = trs[2] + 0.5)
  136. fig_s1_a = plot_rep_probas(data = subset(dt_pred_rep_timecourses, classification == "ovr"),
  137. xmin = trs[1] - 0.5, xmax = trs[2] + 0.5)
  138. fig_a; fig_s1_a;
  139. ```
  140. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  141. ggsave(filename = "highspeed_plot_decoding_repetition_timecourse_forward_backward.pdf",
  142. plot = fig_a, device = cairo_pdf, path = path_figures, scale = 1,
  143. dpi = "retina", width = 4.5, height = 2.5)
  144. ggsave(filename = "highspeed_plot_decoding_repetition_timecourses_supplement.pdf",
  145. plot = fig_s1_a, device = cairo_pdf, path = path_figures, scale = 1,
  146. dpi = "retina", width = 6, height = 5)
  147. ```
  148. ### Mean probabilities
  149. #### Figure 4b
  150. We calculate the mean decoding probabilities for each event:
  151. ```{r, results="hold"}
  152. # define the subset of selected TRs:
  153. trs = seq(2, 7)
  154. # calculate the mean probability for the event types across conditions:
  155. dt_pred_rep_prob = dt_pred_rep %>%
  156. # only consider the two extreme conditions (2 and 9):
  157. filter(change %in% c(2, 9)) %>% verify(length(unique(change)) == 2) %>% setDT(.) %>%
  158. # verify that the number of repetition trials per condition is correct:
  159. verify(all(.[, by = .(classification, id, change), .(
  160. num_trials = length(unique(trial))
  161. )]$num_trials == 5)) %>%
  162. # filter specific TRs:
  163. filter(seq_tr %in% trs) %>%
  164. transform(probability_z = scale(probability)) %>%
  165. filter(classification == "ovr") %>% setDT(.)
  166. # calculate the mean probability for each position label:
  167. dt_pred_rep_mean_prob = dt_pred_rep_prob %>%
  168. # calculate the mean probability for each position label on each trial:
  169. .[, by = .(id, classification, trial, position_label), .(
  170. num_events = .N,
  171. probability = mean(probability * 100)
  172. )] %>%
  173. # verify that the number of events for each position label is correct:
  174. verify(all(num_events %in% c(length(trs), length(trs) * 3))) %>%
  175. # set the position label as factor and z-score the probability values:
  176. mutate(position_label = as.factor(position_label)) %>%
  177. transform(probability_z = scale(probability)) %>%
  178. setorder(id, classification, trial, position_label)
  179. # average the data for each position label across trials:
  180. dt_pred_rep_mean_prob_plot = dt_pred_rep_mean_prob %>% setDT(.) %>%
  181. .[, by = .(id, classification, position_label), .(
  182. num_trials = .N,
  183. probability = mean(probability)
  184. )] %>% verify(all(num_trials == 10))
  185. ```
  186. ```{r}
  187. # define linear mixed effects model with by-participant random intercepts:
  188. lme_rep_mean_prob = lmer(probability ~ position_label + (1 + position_label|id),
  189. data = subset(dt_pred_rep_mean_prob, classification == "ovr"), control = lcctrl)
  190. summary(lme_rep_mean_prob)
  191. anova(lme_rep_mean_prob)
  192. emmeans_results = emmeans(lme_rep_mean_prob, list(pairwise ~ position_label))
  193. print(emmeans_results)
  194. emmeans_pvalues = round_pvalues(summary(emmeans_results[[2]])$p.value)
  195. ```
  196. ```{r}
  197. dt_significance = data.table(
  198. contrast = emmeans_results$contrast,
  199. position_label = rep(c("1", "2", "none"), each = 2),
  200. group = c(1.5, 2.0, 2.5),
  201. probability = c(55, 75, 65),
  202. pvalue = emmeans_pvalues
  203. )
  204. ```
  205. ```{r}
  206. fig_b = ggplot(data = subset(dt_pred_rep_mean_prob_plot, classification == "ovr"), aes(
  207. x = as.factor(position_label), y = as.numeric(probability),
  208. fill = as.factor(position_label))) +
  209. geom_flat_violin(scale = "width", trim = FALSE, aes(color = NA),
  210. position = position_nudge(x = 0.2, y = 0), alpha = 0.4) +
  211. geom_point(stat = "summary", fun = "mean", color = "black", pch = 23, size = 3,
  212. position = position_nudge(x = 0.125, y = 0)) +
  213. geom_errorbar(stat = "summary", fun.data = "mean_se", width = 0.0,
  214. position = position_nudge(x = 0.125, y = 0), color = "black") +
  215. geom_boxplot(outlier.colour = "black", outlier.shape = NA, alpha = 0.5,
  216. notch = FALSE, outlier.alpha = 1, width = 0.05, color = "black",
  217. position = position_nudge(x = 0.25, y = 0)) +
  218. geom_point(position = position_jitter(width = .05, height = 0, seed = 4),
  219. pch = 21, alpha = 1, color = "black") +
  220. xlab("Serial event") + ylab("Probability (%)") +
  221. scale_fill_manual(name = "Serial event", values = colors) +
  222. scale_color_manual(name = "Serial event", values = colors) +
  223. #scale_y_continuous(labels = label_fill(seq(0, 100, 10), mod = 2), breaks = seq(0, 100, 10)) +
  224. coord_capped_cart(left = "both", bottom = "both", ylim = c(0, 100)) +
  225. theme(legend.position = "bottom", legend.direction = "horizontal",
  226. legend.justification = "center", legend.margin = margin(0,0,0,0),
  227. legend.box.margin = margin(t = -10, r = 0, b = 10, l = 0)) +
  228. theme(panel.background = element_blank()) +
  229. #theme(plot.margin = unit(c(t = 1, r = 1, b = 1, l = 1), "pt")) +
  230. geom_line(data = dt_significance, aes(group = group),
  231. linetype = "solid", position = position_nudge(x = 0.1, y = 0)) +
  232. geom_text(data = dt_significance %>% distinct(group, probability, .keep_all = TRUE),
  233. aes(x = group, y = probability + 1.5, label = pvalue), color = "black",
  234. position = position_nudge(x = 0.125, y = 4), parse = FALSE) +
  235. theme(axis.line.y = element_line(colour = "black"),
  236. axis.line.x = element_line(colour = "white"),
  237. panel.grid.major = element_blank(),
  238. panel.grid.minor = element_blank(),
  239. panel.border = element_blank(),
  240. panel.background = element_blank(),
  241. axis.text.x = element_blank(),
  242. axis.title.x = element_blank(),
  243. axis.ticks.x = element_line(colour = "white"))
  244. fig_b
  245. ```
  246. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  247. ggsave(filename = "highspeed_plot_decoding_repetition_probabilities_mean_all_trs.pdf",
  248. plot = fig_b, device = cairo_pdf, path = path_figures, scale = 1,
  249. dpi = "retina", width = 4, height = 3)
  250. ```
  251. #### Figure 4c
  252. ```{r}
  253. trs = seq(2, 7)
  254. # calculate means for each participant:
  255. dt_pred_rep_a2_sub = dt_pred_rep %>%
  256. # only consider the two extreme conditions (2 and 9):
  257. #filter(change %in% c(2,9)) %>%
  258. filter(seq_tr %in% trs) %>%
  259. # set the change variable to a factor:
  260. transform(change = as.factor(change)) %>%
  261. transform(position_label = as.factor(position_label)) %>% setDT(.) %>%
  262. # check if the number of change conditions really only equals 2:
  263. #verify(length(unique(change)) == 2) %>%
  264. .[, by = .(id, classification, change, position_label), .(
  265. probability = mean(probability * 100)
  266. )] %>%
  267. # standardize the probabilities:
  268. #mutate(probability_z = scale(probability)) %>% setDT(.) %>%
  269. setorder(id, classification, change, position_label) %>%
  270. filter(classification == "ovr") %>% setDT(.)
  271. ```
  272. ```{r}
  273. lme_data = dt_pred_rep_a2_sub %>% filter(classification == "ovr" & change %in% c(2,9))
  274. lme_rep_prob_rep = lmer(probability ~ position_label * change + (1 + position_label + change|id),
  275. data = lme_data, na.action = na.omit, control = lcctrl)
  276. summary(lme_rep_prob_rep)
  277. anova(lme_rep_prob_rep)
  278. emmeans_results = emmeans(lme_rep_prob_rep, list(pairwise ~ position_label | change))
  279. print(emmeans_results)
  280. emmeans_summary = summary(emmeans_results[[2]])
  281. ```
  282. ```{r}
  283. dt_significance = data.table(
  284. change = rep(emmeans_summary$change, each = 2),
  285. contrast = rep(emmeans_summary$contrast, each = 2),
  286. pvalue = rep(round_pvalues(emmeans_summary$p.value), each = 2),
  287. group = rep(c(1.5, 2.0, 2.5), each = 2),
  288. position_label = c("1", "2", "1", "none", "2", "none"),
  289. probability = rep(c(60, 80, 70), each = 2)
  290. )
  291. ```
  292. ```{r}
  293. dt_plot = dt_pred_rep_a2_sub %>% filter(classification == "ovr" & change %in% c(2,9))
  294. fig_c = ggplot(data = dt_plot, aes(
  295. x = as.factor(position_label), y = as.numeric(probability),
  296. fill = as.factor(position_label))) +
  297. geom_flat_violin(
  298. scale = "width", trim = FALSE, aes(color = NA),
  299. position = position_nudge(x = 0.25, y = 0), alpha = 0.4) +
  300. geom_point(
  301. stat = "summary", fun = "mean", color = "black", pch = 23, size = 3,
  302. position = position_nudge(x = 0.15, y = 0)) +
  303. geom_errorbar(
  304. stat = "summary", fun.data = "mean_se", width = 0.0,
  305. position = position_nudge(x = 0.15, y = 0), color = "black") +
  306. geom_boxplot(
  307. outlier.colour = "black", outlier.shape = NA, outlier.size = 50,
  308. notch = FALSE, outlier.alpha = 1, width = 0.05, color = "black",
  309. position = position_nudge(x = 0.35, y = 0), alpha = 0.5) +
  310. geom_point(
  311. position = position_jitter(width = .05, height = 0, seed = 4),
  312. pch = 21, alpha = 1) +
  313. facet_wrap(~ as.factor(change), labeller = as_labeller(facet_labels_new)) +
  314. xlab("Serial event") + ylab("Probability (%)") +
  315. scale_fill_manual(name = "Serial event", values = colors) +
  316. scale_color_manual(name = "Serial event", values = colors) +
  317. #scale_y_continuous(labels = label_fill(seq(0, 100, 10), mod = 2), breaks = seq(0, 100, 10)) +
  318. coord_capped_cart(left = "both", bottom = "both", ylim = c(0, 100), expand = TRUE) +
  319. theme(legend.position = "bottom", legend.direction = "horizontal",
  320. legend.justification = "center", legend.margin = margin(0,0,0,0),
  321. legend.box.margin = margin(t = -10, r = 0, b = 10, l = 0)) +
  322. theme(panel.background = element_blank()) +
  323. theme(strip.text.x = element_text(margin = margin(b = 3, t = 2))) +
  324. theme(axis.title.x = element_blank(), axis.text.x = element_blank(),
  325. axis.ticks.x = element_blank(), axis.line.x = element_blank()) +
  326. #theme(plot.margin = unit(c(t = 1, r = 1, b = 1, l = 1), "pt")) +
  327. geom_line(data = dt_significance, aes(group = group, x = position_label, y = probability),
  328. linetype = "solid", position = position_nudge(x = 0.1, y = 0)) +
  329. geom_text(data = dt_significance %>% distinct(group, change, probability, .keep_all = TRUE),
  330. aes(x = group, y = probability + 1, label = pvalue), color = "black",
  331. position = position_nudge(x = 0.1, y = 4), parse = FALSE) +
  332. theme(axis.line.y = element_line(colour = "black"),
  333. axis.line.x = element_line(colour = "white"),
  334. panel.grid.major = element_blank(),
  335. panel.grid.minor = element_blank(),
  336. panel.border = element_blank(),
  337. panel.background = element_blank(),
  338. axis.text.x = element_blank(),
  339. axis.title.x = element_blank(),
  340. axis.ticks.x = element_line(colour = "white"))
  341. fig_c
  342. ```
  343. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  344. ggsave(filename = "highspeed_plot_decoding_repetition_average_probabilities.pdf",
  345. plot = fig_c, device = cairo_pdf, path = path_figures, scale = 1,
  346. dpi = "retina", width = 5, height = 3)
  347. ```
  348. #### Figure S8
  349. ```{r}
  350. # select specific trs:
  351. trs = seq(2, 7)
  352. dt_pred_rep_all_reps_sub = dt_pred_rep %>%
  353. # filter out the extremely long condition (with 16 items):
  354. filter(occurence != 15) %>% setDT(.) %>%
  355. # filter for specific trs:
  356. filter(seq_tr %in% trs) %>% setDT(.) %>%
  357. # transform the main variables of interest:
  358. transform(
  359. occurence = as.numeric(occurence),
  360. position_label = as.factor(position_label),
  361. probability = probability * 100
  362. ) %>% setDT(.) %>%
  363. # calculate the mean probability for each serial event item (first, second, other)
  364. # separately depending on the number of indiviual repetitions
  365. .[, by = .(id, classification, occurence, position_label), .(
  366. probability = mean(probability)
  367. )] %>%
  368. setorder(., classification, id, occurence, position_label)
  369. dt_pred_rep_all_reps_mean = dt_pred_rep_all_reps_sub %>%
  370. # average across participants and calculate the standard error of the mean:
  371. . [, by = .(classification, occurence, position_label), {
  372. list(
  373. mean_probability = mean(probability),
  374. num_subs = .N,
  375. sem_upper = mean(probability) + (sd(probability)/sqrt(.N)),
  376. sem_lower = mean(probability) - (sd(probability)/sqrt(.N))
  377. )}] %>%
  378. verify(all(num_subs == 36)) %>%
  379. setorder(., classification, occurence, position_label)
  380. ```
  381. Are the short repetitions of event 1 and event 2 different from noise:
  382. LME model including all three event types:
  383. ```{r}
  384. # model including all three event types:
  385. lme_pred_rep_all_reps = lmer(probability ~ position_label * occurence + (1 + position_label + occurence|id),
  386. data = subset(dt_pred_rep_all_reps_sub, classification == "ovr" & occurence != 15),
  387. na.action = na.omit, control = lcctrl)
  388. summary(lme_pred_rep_all_reps)
  389. anova(lme_pred_rep_all_reps)
  390. ```
  391. LME model excluding out-of-sequence events:
  392. ```{r}
  393. lme_pred_rep_all_reps_reduced = lmer(probability ~ position_label * occurence + (1 + position_label + occurence|id),
  394. data = subset(dt_pred_rep_all_reps_sub, classification == "ovr" & occurence != 15 & position_label != "none"),
  395. na.action = na.omit, control = lcctrl)
  396. summary(lme_pred_rep_all_reps_reduced)
  397. anova(lme_pred_rep_all_reps_reduced)
  398. ```
  399. ```{r}
  400. dt_pred_rep_all_reps_stats = dt_pred_rep_all_reps_sub %>%
  401. filter(occurence %in% c(8)) %>% setDT(.) %>%
  402. transform(position_label = paste0("pos_", position_label)) %>%
  403. spread(key = position_label, value = probability, drop = FALSE) %>%
  404. filter(classification == "ovr")
  405. # some ugly code to perform three t-tests:
  406. summary(dt_pred_rep_all_reps_stats$pos_2); round(sd(dt_pred_rep_all_reps_stats$pos_2), 2)
  407. summary(dt_pred_rep_all_reps_stats$pos_1); round(sd(dt_pred_rep_all_reps_stats$pos_1), 2)
  408. summary(dt_pred_rep_all_reps_stats$pos_none); round(sd(dt_pred_rep_all_reps_stats$pos_none), 2)
  409. # perform separate t-tests
  410. test1 = t.test(dt_pred_rep_all_reps_stats$pos_2, dt_pred_rep_all_reps_stats$pos_1,
  411. paired = TRUE, alternative = "two.sided")
  412. test2 = t.test(dt_pred_rep_all_reps_stats$pos_2, dt_pred_rep_all_reps_stats$pos_none,
  413. paired = TRUE, alternative = "two.sided")
  414. test3 = t.test(dt_pred_rep_all_reps_stats$pos_1, dt_pred_rep_all_reps_stats$pos_none,
  415. paired = TRUE, alternative = "two.sided")
  416. test1; test2; test3
  417. # correct for multiple comparisons
  418. p.adjust(c(test1$p.value, test2$p.value, test3$p.value), method = "bonferroni", n = 6)
  419. ```
  420. ```{r}
  421. fig_d = ggplot(data = subset(dt_pred_rep_all_reps_mean, classification == "ovr"), aes(
  422. x = as.factor(occurence), y = as.numeric(mean_probability),
  423. group = as.factor(position_label), color = as.factor(position_label))) +
  424. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper,
  425. fill = as.factor(position_label)), alpha = 0.5) +
  426. geom_line(size = 0.7, mapping = aes(color = as.factor(position_label))) +
  427. xlab("Number of item repetitions") +
  428. ylab("Probability (in %)") +
  429. scale_fill_manual(name = "Serial event", values = colors) +
  430. scale_color_manual(name = "Serial event", values = colors) +
  431. scale_y_continuous(labels = label_fill(seq(0, 100, 10), mod = 2), breaks = seq(0, 100, 10)) +
  432. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0,40)) +
  433. theme(legend.position = "right", legend.direction = "vertical") +
  434. guides(color = guide_legend(ncol = 1)) +
  435. theme(legend.position = "right", legend.direction = "vertical",
  436. legend.justification = "center", legend.margin = margin(0,0,0,0),
  437. legend.box.margin = margin(t = 0, r = , b = 0, l = -20)) +
  438. theme(panel.background = element_blank()) +
  439. theme(plot.margin = unit(c(t = 1, r = 1, b = 1, l = 1), "pt")) +
  440. theme(axis.line = element_line(colour = "black"),
  441. panel.grid.major = element_blank(),
  442. panel.grid.minor = element_blank(),
  443. panel.border = element_blank(),
  444. panel.background = element_blank())
  445. fig_d
  446. ```
  447. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  448. ggsave(filename = "highspeed_plot_decoding_repetition_duration_supplement.pdf",
  449. plot = fig_d, device = cairo_pdf, path = path_figures, scale = 1,
  450. dpi = "retina", width = 3.5, height = 2)
  451. ```
  452. ### Step-sizes
  453. Are there more within sequence items in the classifier predictions?
  454. To this end, we check if serial events 1 and 2 (of 2) are decoded more
  455. often than other (out-of-sequence) serial events in the repetition trials.
  456. ```{r}
  457. # define the number of TRs per trial (used below):
  458. select_trs = seq(2, 7)
  459. dt_pred_rep_count = dt_pred_rep %>%
  460. # get the position with the highest probability:
  461. .[, by = .(classification, id, trial, change, seq_tr), ":=" (
  462. num_events = .N,
  463. position_max_prob = as.numeric(probability == max(probability))
  464. )] %>%
  465. # check if the data consisted of 5 trials per TR:
  466. verify(num_events == 5) %>%
  467. # check if there is only one event with the maximum probability:
  468. verify(.[, by = .(classification, id, trial, change, seq_tr), .(
  469. num_max_events = sum(position_max_prob)
  470. )]$num_max_events == 1) %>%
  471. # get the position with the highest probability:
  472. filter(position_max_prob == 1) %>%
  473. setDT(.) %>%
  474. # WARNING: HERE WE FILTER OUT SPECIFIC TRIALS SET ABOVE:
  475. filter(seq_tr %in% select_trs) %>%
  476. setDT(.) %>%
  477. # calculate the absolute and relative occurrence of each maximum position:
  478. .[, by = .(classification, id, change, trial, position), .(
  479. count_num = .N / length(select_trs),
  480. freq = .N
  481. )] %>%
  482. # complete missing positions with zeros:
  483. complete(nesting(classification, id, change, trial),
  484. nesting(position), fill = list(count_num = 0, freq = 0)) %>%
  485. # create a new variable containing the type by copying the max position:
  486. mutate(type = position) %>%
  487. transform(type = ifelse(type %in% c(1,2), type, "none")) %>% setDT(.) %>%
  488. # sort the data table
  489. setorder(., classification, id, change, trial, position, type) %>%
  490. # check if the sum of frequencies matches the number of trs:
  491. verify(.[, by = .(classification, id, change, trial), .(
  492. sum_freq = sum(freq)
  493. )]$sum_freq == length(select_trs)) %>%
  494. # average relative proportion of each position across trials per change:
  495. setDT(.) %>% .[, by = .(classification, id, change, type), .(
  496. mean_count = mean(count_num) * 100,
  497. sum_freq = sum(freq),
  498. num_trials = .N
  499. )] %>%
  500. #check if the data consisted of 5 trials per TR:
  501. verify(num_trials %in% c(5, 15)) %>%
  502. # change variable types for mixed effects models:
  503. transform(change = as.factor(change)) %>%
  504. transform(type = as.factor(type)) %>%
  505. #mutate(mean_count_z = scale(mean_count, center = TRUE, scale = FALSE)) %>% setDT(.) %>%
  506. # create a new variable:
  507. .[type == "1", ":=" (is_first = 1, is_second = 0)] %>%
  508. .[type == "2", ":=" (is_first = 0, is_second = 1)] %>%
  509. .[type == "none", ":=" (is_first = 0, is_second = 0)] %>%
  510. transform(is_first = as.numeric(is_first)) %>%
  511. transform(is_second = as.numeric(is_second))
  512. ```
  513. ```{r}
  514. dt_pred_rep_count_mean = dt_pred_rep_count %>%
  515. # average the number of counts across participants:
  516. setDT(.) %>%
  517. .[, by = .(classification, change, type), .(
  518. mean_count = mean(mean_count),
  519. sd_count = sd(mean_count),
  520. num_subs = .N,
  521. sem_upper = mean(mean_count) + (sd(mean_count)/sqrt(.N)),
  522. sem_lower = mean(mean_count) - (sd(mean_count)/sqrt(.N))
  523. )] %>%
  524. # check if the number of data points (here, participants) is correct:
  525. verify(num_subs == 36) %>%
  526. #verify(.[, by = .(classification, change), .(
  527. # mean_count = sum(mean_count)
  528. #)]$mean_count == 100) %>%
  529. # order the data table:
  530. setorder(., classification, change, type)
  531. ```
  532. ```{r}
  533. lme_rep_count = lmer(mean_count ~ type * change + (1 + type + change|id),
  534. data = subset(dt_pred_rep_count, classification == "ovr" & change %in% c(2,9)),
  535. na.action = na.omit, control = lcctrl)
  536. summary(lme_rep_count)
  537. anova(lme_rep_count)
  538. emmeans_results = emmeans(lme_rep_count, list(pairwise ~ type | change))
  539. emmeans_summary = summary(emmeans_results[[2]])
  540. ```
  541. ```{r, echo = FALSE}
  542. dt_significance = data.table(
  543. change = rep(emmeans_summary$change, each = 2),
  544. contrast = rep(emmeans_summary$contrast, each = 2),
  545. pvalue = rep(round_pvalues(emmeans_summary$p.value), each = 2),
  546. group = rep(c(1.5, 2.0, 2.5), each = 2),
  547. type = c("1", "2", "1", "none", "2", "none"),
  548. probability = rep(c(75, 95, 85), each = 2)
  549. )
  550. ```
  551. ```{r, echo = FALSE}
  552. fig_e = ggplot(data = subset(dt_pred_rep_count, classification == "ovr" & change %in% c(2,9)), aes(
  553. x = as.factor(type), y = as.numeric(mean_count),
  554. fill = as.factor(type))) +
  555. geom_flat_violin(
  556. scale = "width", trim = FALSE, aes(color = NA),
  557. position = position_nudge(x = 0.3, y = 0), alpha = 0.4) +
  558. geom_bar(
  559. stat = "summary", fun = "mean", color = "black", width = 0.05,
  560. position = position_nudge(x = 0.2, y = 0)) +
  561. geom_errorbar(
  562. stat = "summary", fun.data = "mean_se", width = 0.0,
  563. position = position_nudge(x = 0.2, y = 0), color = "black") +
  564. geom_boxplot(
  565. outlier.colour = "black", outlier.shape = NA, outlier.size = 50,
  566. notch = FALSE, outlier.alpha = 1, width = 0.05, color = "black",
  567. position = position_nudge(x = 0.4, y = 0), alpha = 0.5) +
  568. geom_point(
  569. position = position_jitter(width = .05, height = 0, seed = 4),
  570. pch = 21, alpha = 1, color = "black") +
  571. facet_wrap(~ as.factor(change), labeller = as_labeller(facet_labels_new)) +
  572. xlab("Serial event") + ylab("Proportion per trial (in %)") +
  573. scale_fill_manual(name = "Serial event", values = colors) +
  574. scale_color_manual(name = "Serial event", values = colors) +
  575. scale_y_continuous(labels = label_fill(seq(0, 100, 10), mod = 2), breaks = seq(0, 100, 10)) +
  576. coord_capped_cart(left = "both", bottom = "both", ylim = c(0, 100), expand = TRUE) +
  577. theme(legend.position = "bottom", legend.direction = "horizontal",
  578. legend.justification = "center", legend.margin = margin(0,0,0,0),
  579. legend.box.margin = margin(t = -10, r = 0, b = 10, l = 0)) +
  580. theme(panel.background = element_blank()) +
  581. theme(axis.title.x = element_blank(), axis.text.x = element_blank(),
  582. axis.ticks.x = element_blank(), axis.line.x = element_blank()) +
  583. theme(plot.margin = unit(c(t = 1, r = 1, b = 1, l = 1), "pt")) +
  584. geom_line(data = dt_significance, aes(group = group, x = type, y = probability),
  585. linetype = "solid", position = position_nudge(x = 0.2, y = 0)) +
  586. geom_text(data = dt_significance %>% distinct(group, change, probability, .keep_all = TRUE),
  587. aes(x = group, y = probability + 1, label = pvalue), color = "black",
  588. position = position_nudge(x = 0.2, y = 4), parse = FALSE) +
  589. theme(axis.line.y = element_line(colour = "black"),
  590. axis.line.x = element_line(colour = "white"),
  591. panel.grid.major = element_blank(),
  592. panel.grid.minor = element_blank(),
  593. panel.border = element_blank(),
  594. panel.background = element_blank(),
  595. axis.text.x = element_blank(),
  596. axis.title.x = element_blank(),
  597. axis.ticks.x = element_line(colour = "white"))
  598. fig_e
  599. ```
  600. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  601. ggsave(filename = "highspeed_plot_decoding_repetition_maxprob.pdf",
  602. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  603. dpi = "retina", width = 5, height = 3)
  604. ```
  605. **Test for significance**
  606. **Test 1:** Using a paired t-test, we test how well we are able to decode a single
  607. briefly presented item in a 32 ms sequence compared to items that were not
  608. presented, when the item (first event) is followed by a statistical representation
  609. that could mask the item (change = 2 condition).
  610. **Test 2:** Using a paired t-test, we then test how well we are able to decode a single
  611. briefly presented item in a 32 ms sequence compared to items that were not
  612. presented, when the item (last event) is preceded by a statistical signal
  613. (change = 9 condition).
  614. ```{r, echo=FALSE, eval=FALSE}
  615. # label all data points with position = 1 or 2 as part of the sequnece:
  616. dt_pred_rep_count$type_group[dt_pred_rep_count$type %in% c(1,2)] = "in_seq"
  617. # label all data points with positions > 2 as *not* part of the sequence:
  618. dt_pred_rep_count$type_group[dt_pred_rep_count$type == "other"] = "out_seq"
  619. # create a data table for the change 2 condition that does not contain the 2nd stimulus:
  620. df1 = subset(dt_pred_rep_count, change == 2 & type != 2)
  621. # create a data table for the change = 9 condition that does not contain the 1st stimulus:
  622. df2 = subset(dt_pred_rep_count, change == 9 & type != 1)
  623. # combine both data frames:
  624. df3 = setDT(rbind(df1, df2))
  625. df3_test = df3 %>%
  626. # calculate means separately for within and out of sequence stimuli:
  627. .[, by = .(classification, id, change, type_group), .(
  628. mean_count = mean(mean_count)
  629. )] %>%
  630. # transform data table to wide format to calculate t-tests:
  631. spread(key = type_group, value = mean_count) %>%
  632. # average across particiants and calculate t-tests:
  633. .[, by = .(classification, change), {
  634. results = t.test(x = in_seq, y = out_seq, paired = TRUE);
  635. list(mean_out = mean(out_seq), sd_out = sd(out_seq),
  636. mean_in = mean(in_seq), sd_in = sd(in_seq),
  637. pvalue = results$p.value, tvalue = results$statistic, n = .N)
  638. }] %>%
  639. .[, by = .(classification), ":=" (
  640. num_comp = .N,
  641. pvalue_adjust = p.adjust(pvalue, method = "bonferroni", 2)
  642. )] %>% verify(all(num_comp == 2))
  643. ```
  644. ### Transitions
  645. We analyzed if there more within sequence transitions than transitions to
  646. out-of-sequence elements:
  647. ```{r}
  648. transition_type <- function(head, tail){
  649. if (head == 1 & tail == 2){
  650. transition_type = "forward"
  651. } else if(head == 2 & tail == 1) {
  652. transition_type = "backward"
  653. } else if(head == 1 & tail == 1) {
  654. transition_type = "repetition 1"
  655. } else if(head == 2 & tail == 2) {
  656. transition_type = "repetition 2"
  657. #} else if(head == tail) {
  658. # transition_type = "repetition"
  659. } else if(head %in% c(1,2) & tail > 2) {
  660. transition_type = "outwards"
  661. } else if(head > 2 & tail %in% c(1,2)) {
  662. transition_type = "inwards"
  663. } else if(head > 2 & tail > 2 & head != tail) {
  664. transition_type = "outside"
  665. } else if(head > 2 & tail > 2 & head == tail) {
  666. transition_type = "repetition out"
  667. } else {
  668. transition_type = "ERROR!"
  669. }
  670. return(transition_type)
  671. }
  672. ```
  673. #### Figure 4e
  674. ```{r}
  675. trs = seq(2, 7)
  676. # define the number of transitions, which is the number of TRs - 1
  677. num_transitions = length(trs) - 1
  678. # calculate the transitions between decoded repetition trial elements
  679. dt_pred_rep_step = dt_pred_rep %>%
  680. # filter for specific trs in a selected time window:
  681. filter(seq_tr %in% trs) %>% setDT(.) %>%
  682. # for each classification, id, trial, get position with highest probability:
  683. .[, by = .(classification, id, trial, change, seq_tr), .(
  684. num_trials = .N,
  685. position_max = position[which.max(probability)]
  686. )] %>%
  687. # check if the data consisted of 5 trials per TR:
  688. verify(num_trials == 5) %>%
  689. # get the head and tail of the maximum positions (sequence shifted by 1):
  690. # head = tr 1 to n-1 and tail = tr 2 to n:
  691. .[, by = .(classification, id, trial, change), .(
  692. n = .N,
  693. head = head(position_max, -1),
  694. tail = tail(position_max, -1)
  695. )] %>%
  696. # get the number of each unique combination of consecutive positions indices:
  697. .[, .N, .(classification, id, trial, change, head, tail)] %>%
  698. # complete all missing pairings with 0s for each trial:
  699. complete(nesting(classification, id, trial, change), nesting(head, tail),
  700. fill = list(N = 0)) %>% setDT(.) %>%
  701. # check if the number of true transitions matching the expected number:
  702. verify(.[, by = .(classification, id, trial, change), .(
  703. sum_of_n = sum(N)
  704. )]$sum_of_n == num_transitions) %>%
  705. # set the order: sort by classification, participant, trial and positions:
  706. setorder(classification, id, trial, change, head, tail) %>%
  707. # define the transition type for every transition:
  708. mutate(step_type = mapply(transition_type, head, tail)) %>%
  709. # calculate the relative proportion per trial across TRs for each transition
  710. mutate(proportion = N/num_transitions) %>% setDT(.) %>%
  711. # check if any step type is not properly assigned:
  712. verify(all(!is.na(step_type))) %>% setDT(.)
  713. dt_pred_rep_step_version1 = dt_pred_rep_step %>%
  714. # average the proportion of transitions across trials:
  715. .[, by = .(classification, id, change, step_type), .(
  716. mean_proportion = mean(proportion) * 100
  717. )]
  718. dt_pred_rep_step_version2 = dt_pred_rep_step %>%
  719. # sum up the number of transitions for each transition type:
  720. .[, by = .(classification, id, trial, change, step_type), .(
  721. num = sum(N)
  722. )] %>%
  723. # check if the number of true transitions matching the expected number per trial:
  724. #verify(.[, by = .(classification, id, trial, change), .(
  725. # num_transitions = sum(num)
  726. #)]$num_transitions == num_transitions) %>%
  727. # calculate the proportion per trial across TRs for each transition
  728. mutate(proportion = num/num_transitions) %>% setDT(.) %>%
  729. # average the proportion of transitions across trials:
  730. .[, by = .(classification, id, change, step_type), .(
  731. mean_num = mean(num),
  732. mean_proportion = mean(proportion) * 100
  733. )]
  734. dt_pred_rep_step_mean = dt_pred_rep_step_version1 %>%
  735. # average across participants and compute the standard error:
  736. .[, by = .(classification, change, step_type), .(
  737. mean_proportion = mean(mean_proportion),
  738. num_subs = .N,
  739. sem_upper = mean(mean_proportion) + (sd(mean_proportion)/sqrt(.N)),
  740. sem_lower = mean(mean_proportion) - (sd(mean_proportion)/sqrt(.N))
  741. )] %>%
  742. verify(all(num_subs == 36)) %>%
  743. setorder(classification, change, step_type) %>%
  744. filter(classification == "ovr")
  745. ```
  746. ```{r}
  747. plot_rep_trans_mat = function(dt){
  748. ggplot(data = dt, mapping = aes(
  749. x = as.factor(head), y = fct_rev(as_factor(tail)),
  750. size = mean_proportion, fill = step_type)) +
  751. geom_point(aes(size = mean_proportion, fill = step_type), pch = 21, color = "black") +
  752. coord_capped_cart(left = "both", bottom = "both", expand = TRUE) +
  753. xlab("Decoded event at t") +
  754. ylab("Decoded event at t + 1") +
  755. facet_wrap(~ as.factor(change), labeller = as_labeller(facet_labels_new)) +
  756. scale_size(range = c(1, 15), name = "Proportion per trial (in %)",
  757. guide = guide_legend(
  758. title.position = "top", direction = "horizontal",
  759. nrow = 1,
  760. order = 2)) +
  761. scale_fill_manual(name = "Transition type",
  762. values = c(colors, "white", "white", "white"),
  763. guide = guide_legend(ncol = 2)) +
  764. theme(strip.text.x = element_text(margin = margin(b = 3, t = 2))) +
  765. scale_y_discrete(limits = rev(levels(~tail))) +
  766. theme(axis.line = element_line(colour = "black"),
  767. panel.grid.major = element_blank(),
  768. panel.grid.minor = element_blank(),
  769. panel.border = element_blank(),
  770. panel.background = element_blank()) +
  771. # remove background of legend keys:
  772. theme(legend.key = element_blank()) +
  773. theme(legend.background = element_blank())
  774. # add annotation
  775. #annotate("segment", x = 0, xend = 6, y = 6, yend = 0,
  776. # colour = "white", size = 10, alpha = 0.8) +
  777. #annotate(geom = "text", x = 3, y = 3, angle = 360 - 45, hjust = 0.5,
  778. # label = "Repetitions of the same decoded event",
  779. # color = "gray")
  780. }
  781. ```
  782. ```{r}
  783. dt_pred_rep_step_trans = dt_pred_rep_step %>%
  784. .[, by = .(classification, change, head, tail, step_type), .(
  785. num_data = .N,
  786. mean_proportion = mean(proportion) * 100
  787. )] %>%
  788. # check if averaged is number of participants times number of trials
  789. verify(all(num_data == 36 * 5)) %>%
  790. # create new variable containing the the averaged number as a label:
  791. mutate(mean_num_label = as.character(sub('^(-)?0[.]', '\\1.', round(mean_proportion, 2)))) %>%
  792. # remove all repetition transitions:
  793. mutate(step_type = ifelse(stringr::str_detect(step_type, "repetition"), "repetition", step_type)) %>%
  794. setDT(.)
  795. ```
  796. ```{r}
  797. fig_trans_mat = plot_rep_trans_mat(
  798. dt = subset(dt_pred_rep_step_trans, classification == "ovr" & change %in% c(2,9)))
  799. fig_trans_mat
  800. ```
  801. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  802. ggsave(filename = "highspeed_plot_decoding_repetition_transition_matrix.pdf",
  803. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  804. dpi = "retina", width = 8, height = 3)
  805. ```
  806. #### Figure 4d
  807. ```{r, echo=TRUE}
  808. plot_rep_trans <- function(dt){
  809. ggplot(
  810. data = dt, aes(x = as.factor(step_type), y = as.numeric(mean_proportion),
  811. fill = as.factor(step_type))) +
  812. geom_bar(stat = "summary", fun = "mean",
  813. aes(fill = as.factor(step_type)),
  814. color = "black",
  815. position = position_dodge()) +
  816. geom_point(position = position_jitter(width = .2, height = 0, seed = 4),
  817. pch = 21, alpha = 0.5, color = "black") +
  818. geom_errorbar(stat = "summary", fun.data = "mean_se", width = 0,
  819. position = position_dodge(.9), color = "black") +
  820. facet_wrap(~ as.factor(change), labeller = as_labeller(facet_labels_new)) +
  821. xlab("Serial event") +
  822. #xlab(expression(paste("Time-point of change to ", 2^nd, " serial event"))) +
  823. ylab("Proportion per trial (%)") +
  824. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0,10)) +
  825. scale_fill_manual(name = "Transition", values = c(colors, "lightblue", "lightcoral", "darkgray")) +
  826. theme(strip.text.x = element_text(margin = margin(b = 3, t = 2))) +
  827. #scale_x_discrete(labels = as_labeller(facet_labels_new)) +
  828. #theme(legend.position = "right", legend.direction = "vertical", legend.justification = "center") +
  829. theme(legend.position = "bottom", legend.direction = "horizontal",
  830. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  831. legend.box.margin = margin(t = -10, r = 0, b = 10, l = 0)) +
  832. guides(fill = guide_legend(nrow = 2, title.position = "top", title.hjust = 0.5)) +
  833. theme(panel.background = element_blank()) +
  834. #theme(plot.margin = unit(c(t = 1, r = 1, b = 1, l = 1), "pt")) +
  835. theme(axis.line.y = element_line(colour = "black"),
  836. axis.line.x = element_line(colour = "white"),
  837. panel.grid.major = element_blank(),
  838. panel.grid.minor = element_blank(),
  839. panel.border = element_blank(),
  840. panel.background = element_blank(),
  841. axis.text.x = element_blank(),
  842. axis.title.x = element_blank(),
  843. axis.ticks.x = element_line(colour = "white"))
  844. }
  845. # & change %in% c(2,9)
  846. fig_trans_prop = plot_rep_trans(dt = subset(
  847. dt_pred_rep_step_version1, classification == "ovr" & change %in% c(2,9) &
  848. step_type != "repetition 1" & step_type != "repetition 2" & step_type != "repetition out"
  849. ))
  850. fig_trans_prop
  851. ```
  852. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  853. ggsave(filename = "highspeed_plot_decoding_repetition_transition_types.pdf",
  854. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  855. dpi = "retina", width = 5, height = 3)
  856. ```
  857. compare the average proportion of forward transitions with the average proportion
  858. of outwards steps in the change = 2 condition. also compare the average
  859. proportion of forward transitions with the average number of within out-of-sequence transitions:
  860. ```{r, results = "hold"}
  861. dt_pred_rep_trans_test = dt_pred_rep_step_version1 %>%
  862. filter(step_type %in% c("forward", "outwards", "outside") & change == 9) %>%
  863. spread(key = step_type, value = mean_proportion, drop = TRUE) %>% setDT(.) %>%
  864. .[, by = .(classification), {
  865. ttest_outwards = t.test(x = forward, y = outwards, paired = TRUE);
  866. ttest_outside = t.test(x = forward, y = outside, paired = TRUE);
  867. list(mean_forward = round(mean(forward), 2),
  868. mean_outwards = round(mean(outwards), 2),
  869. mean_outside = round(mean(outside), 2),
  870. df = ttest_outwards$parameter,
  871. ttest_outwards_pvalue = p.adjust(ttest_outwards$p.value,
  872. method = "bonferroni", n = 4),
  873. ttest_outwards_tvalue = round(ttest_outwards$statistic, 2),
  874. ttest_outside_pvalue = p.adjust(ttest_outside$p.value,
  875. method = "bonferroni", n = 4),
  876. ttest_outside_tvalue = round(ttest_outside$statistic, 2),
  877. num_subs = .N)
  878. }] %>%
  879. verify(all(num_subs == 36)) %>%
  880. filter(classification == "ovr")
  881. dt_pred_rep_trans_test
  882. ```
  883. ### Figure 4
  884. ```{r, fig.width = 10, fig.height = 10}
  885. plot_grid(plot_grid(fig_a, fig_b, labels = c("a", "b")),
  886. #plot_grid(fig_c, fig_d, labels = c("c", "d")),
  887. plot_grid(fig_c, fig_trans_prop, labels = c("c", "d")),
  888. plot_grid(fig_trans_mat, labels = c("e")),
  889. ncol = 1, label_fontface = "bold")
  890. ```
  891. ```{r, echo=FALSE, include=FALSE, eval=FALSE}
  892. ggsave(filename = "highspeed_plot_decoding_repetition.pdf",
  893. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  894. dpi = "retina", width = 10, height = 10)
  895. ggsave(filename = "wittkuhn_schuck_figure_4.pdf",
  896. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  897. dpi = "retina", width = 10, height = 10)
  898. ```