highspeed-analysis-repetition.Rmd 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968
  1. ## Repetition trials
  2. ### Preparation
  3. #### Initialization
  4. We load the data and relevant functions:
  5. ```{r, warning=FALSE, message=FALSE, echo=TRUE}
  6. # find the path to the root of this project:
  7. if (!requireNamespace("here")) install.packages("here")
  8. if ( basename(here::here()) == "highspeed" ) {
  9. path_root = here::here("highspeed-analysis")
  10. } else {
  11. path_root = here::here()
  12. }
  13. # source all relevant functions from the setup R script:
  14. source(file.path(path_root, "code", "highspeed-analysis-setup.R"))
  15. # create a list of participants to exclude based on behavioral performance:
  16. sub_exclude <- c("sub-24", "sub-31", "sub-37", "sub-40")
  17. ```
  18. #### Prepare events and decoding data
  19. We prepare the behavioral events data and the decoding data of repetition trials:
  20. ```{r}
  21. # create a subset of the events data table only including repetition task events:
  22. dt_events_rep = dt_events %>%
  23. filter(condition == "repetition" & trial_type == "stimulus") %>%
  24. setDT(.)
  25. # create a data table containing all classifier predictions on repetition trials:
  26. dt_pred_rep = dt_pred %>%
  27. # create a subset of the decoding data only including the repetition task data:
  28. filter(condition == "repetition" & class != "other" & mask == "cv" & stim != "cue") %>%
  29. setDT(.) %>%
  30. # filter excluded participants:
  31. filter(!(id %in% sub_exclude)) %>%
  32. setDT(.) %>%
  33. # add the serial position, change trial and target cue to the repetition data table:
  34. .[, c("position", "change", "trial_cue") := get_pos(.SD, dt_events_rep),
  35. by = .(id, trial, class), .SDcols = c("id", "trial", "class")] %>%
  36. # add a counter for the number of change trials in the repetition data:
  37. .[, by = .(id, classifier, class, change), ":=" (
  38. trial_change = rep(1:length(unique(trial)), each = max(seq_tr))
  39. )] %>%
  40. # add random position indices for out-of-class stimuli:
  41. .[is.na(position), by = .(id, classification, trial), ":=" (
  42. position = rep(permute(3:5), each = max(seq_tr))
  43. )] %>%
  44. # check if there are any NA values:
  45. verify(!is.na(position)) %>%
  46. # create a new variable containing the position label by copying the position:
  47. mutate(position_label = position) %>%
  48. # re-label all positions > 2 to "other":
  49. transform(position_label = ifelse(position_label > 2, "none", position_label)) %>%
  50. setDT(.) %>%
  51. # create a new variable counting the occurrence of each event position:
  52. .[position != 2, ":=" (occurence = change - 1)] %>%
  53. .[position == 2 & change <= 9, ":=" (occurence = max(change) + 1 - change)] %>%
  54. .[position == 2 & change == 16, ":=" (occurence = max(change) + 1 - change)]
  55. ```
  56. ### Probability time-courses
  57. First, we average across trials for each factor grouping and calculate
  58. the mean classification probability for each serial event. Note, that
  59. this approach averages across the specific stimuli used in any given
  60. trial as the specific stimulus identities are not considered important here.
  61. We verify if the correct number of trials was used for the calculations.
  62. Next, we average across participants. We calculate the average probability
  63. of each serial event and also calculate the standard error of them mean.
  64. Finally, we are plotting the data
  65. ```{r}
  66. dt_pred_rep_timecourses = dt_pred_rep %>%
  67. # calculate the mean probability for every repetition condition and TR
  68. .[, by = .(classification, id, seq_tr, change, position), .(
  69. num_events = .N,
  70. mean_probability = mean(probability * 100)
  71. )] %>% verify(num_events == 5) %>%
  72. # average mean probability across participants:
  73. .[, by = .(classification, seq_tr, change, position), .(
  74. num_sub = .N,
  75. mean_probability = mean(mean_probability),
  76. sem_upper = mean(mean_probability) + (sd(mean_probability)/sqrt(.N)),
  77. sem_lower = mean(mean_probability) - (sd(mean_probability)/sqrt(.N))
  78. )] %>% verify(all(num_sub == 36))
  79. ```
  80. ```{r, echo=TRUE}
  81. # define colors for plotting:
  82. colors_inseq = colorRampPalette(c("dodgerblue", "red"), space = "Lab")(2)
  83. colors_outseq = colorRampPalette(c("gray70", "gray90"), alpha = TRUE)(3)
  84. colors = c(colors_inseq, colors_outseq)
  85. # change the facet labels such that it shows the time-point of the change from
  86. # the first to the second unique stimulus:
  87. facet_labels_new = c(sort(unique(paste0(dt_pred_rep_timecourses$change[
  88. dt_pred_rep_timecourses$change != 16]-1, "/", 10-dt_pred_rep_timecourses$change[
  89. dt_pred_rep_timecourses$change != 16]))), unique(paste0(dt_pred_rep_timecourses$change[
  90. dt_pred_rep_timecourses$change == 16]-1, "/1")))
  91. facet_labels_new[1] = paste("short", sprintf('\u2192'), "long")
  92. facet_labels_new[1] = "Forward interference"
  93. facet_labels_new[length(facet_labels_new)-1] = "Backward interference"
  94. facet_labels_old = as.character(sort(unique(dt_pred_rep_timecourses$change)))
  95. names(facet_labels_new) = facet_labels_old
  96. ```
  97. ```{r, echo=TRUE}
  98. trs = c(2, 7)
  99. plot_rep_probas <- function(data, xmin = 1, xmax = 13) {
  100. # reduced the data frame to plot rectangles (see below):
  101. data_factor_reduced = subset(data, position == 1 & seq_tr == 1)
  102. ggplot(data, aes(x = as.numeric(seq_tr), y = as.numeric(mean_probability),
  103. color = as.factor(position), fill = as.factor(position))) +
  104. facet_wrap(~ as.factor(change), ncol = 3, labeller = as_labeller(facet_labels_new)) +
  105. geom_rect(data = data_factor_reduced, fill = "gray", alpha = 0.15, color = NA,
  106. aes(xmin = xmin, xmax = xmax, ymin = 0, ymax = 60)) +
  107. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  108. geom_line() +
  109. xlab("Time from sequence onset (TRs)") + ylab("Probability (%)") +
  110. annotate("text", x = 13, y = 0, label = "1 TR = 1.25 s",
  111. hjust = 1, size = rel(2)) +
  112. scale_colour_manual(name = "Serial event", values = colors) +
  113. scale_fill_manual(name = "Serial event", values = colors) +
  114. scale_x_continuous(labels = label_fill(seq(1,13,1), mod = 4), breaks = seq(1,13,1)) +
  115. coord_capped_cart(left = "both", bottom = "both", ylim = c(0,60)) +
  116. theme(strip.text.x = element_text(margin = margin(b = 3, t = 2))) +
  117. guides(color = guide_legend(nrow = 1)) +
  118. theme(legend.position = "bottom", legend.direction = "horizontal",
  119. legend.justification = "center", legend.margin = margin(0,0,0,0),
  120. legend.box.margin = margin(t = -5, r = 0, b = 10, l = 0)) +
  121. theme(panel.background = element_blank()) +
  122. theme(axis.line = element_line(colour = "black"),
  123. panel.grid.major = element_blank(),
  124. panel.grid.minor = element_blank(),
  125. panel.border = element_blank(),
  126. panel.background = element_blank())
  127. }
  128. plot_data = dt_pred_rep_timecourses %>% filter(classification == "ovr" & change %in% c(2,9))
  129. fig_a = plot_rep_probas(data = plot_data, xmin = trs[1] - 0.5, xmax = trs[2] + 0.5)
  130. fig_s1_a = plot_rep_probas(data = subset(dt_pred_rep_timecourses, classification == "ovr"),
  131. xmin = trs[1] - 0.5, xmax = trs[2] + 0.5)
  132. fig_a; fig_s1_a;
  133. ```
  134. ```{r, echo=FALSE}
  135. ggsave(filename = "highspeed_plot_decoding_repetition_timecourse_forward_backward.pdf",
  136. plot = fig_a, device = cairo_pdf, path = path_figures, scale = 1,
  137. dpi = "retina", width = 4.5, height = 2.5)
  138. ggsave(filename = "highspeed_plot_decoding_repetition_timecourses_supplement.pdf",
  139. plot = fig_s1_a, device = cairo_pdf, path = path_figures, scale = 1,
  140. dpi = "retina", width = 6, height = 5)
  141. ```
  142. ### Mean probabilities
  143. #### Figure 4b
  144. ```{r, results="hold"}
  145. # define the subset of selected TRs:
  146. trs = seq(2, 7)
  147. # calculate the mean probability for the event types across conditions:
  148. dt_pred_rep_prob = dt_pred_rep %>%
  149. # only consider the two extreme conditions (2 and 9):
  150. filter(change %in% c(2, 9)) %>% verify(length(unique(change)) == 2) %>% setDT(.) %>%
  151. # verify that the number of reptition trials per condition is correct:
  152. verify(all(.[, by = .(classification, id, change), .(
  153. num_trials = length(unique(trial))
  154. )]$num_trials == 5)) %>%
  155. # filter specific TRs:
  156. filter(seq_tr %in% trs) %>%
  157. transform(probability_z = scale(probability)) %>%
  158. filter(classification == "ovr") %>% setDT(.)
  159. # calculate the mean probability for each position label:
  160. dt_pred_rep_mean_prob = dt_pred_rep_prob %>%
  161. # calculate the mean probability for each position label on each trial:
  162. .[, by = .(id, classification, trial, position_label), .(
  163. num_events = .N,
  164. probability = mean(probability * 100)
  165. )] %>%
  166. # verify that the number of events for each position label is correct:
  167. verify(all(num_events %in% c(length(trs), length(trs) * 3))) %>%
  168. # set the position label as factor and z-score the probability values:
  169. mutate(position_label = as.factor(position_label)) %>%
  170. transform(probability_z = scale(probability)) %>%
  171. setorder(id, classification, trial, position_label)
  172. # average the data for each position label across trials:
  173. dt_pred_rep_mean_prob_plot = dt_pred_rep_mean_prob %>% setDT(.) %>%
  174. .[, by = .(id, classification, position_label), .(
  175. num_trials = .N,
  176. probability = mean(probability)
  177. )] %>% verify(all(num_trials == 10))
  178. ```
  179. ```{r}
  180. # define linear mixed effects model with by-participant random intercepts:
  181. lme_rep_mean_prob = lmer(probability ~ position_label + (1 + position_label|id),
  182. data = subset(dt_pred_rep_mean_prob, classification == "ovr"), control = lcctrl)
  183. summary(lme_rep_mean_prob)
  184. anova(lme_rep_mean_prob)
  185. emmeans_results = emmeans(lme_rep_mean_prob, list(pairwise ~ position_label))
  186. print(emmeans_results)
  187. emmeans_pvalues = round_pvalues(summary(emmeans_results[[2]])$p.value)
  188. ```
  189. ```{r}
  190. dt_significance = data.table(
  191. contrast = emmeans_results$contrast,
  192. position_label = rep(c("1", "2", "none"), each = 2),
  193. group = c(1.5, 2.0, 2.5),
  194. probability = c(55, 75, 65),
  195. pvalue = emmeans_pvalues
  196. )
  197. ```
  198. ```{r}
  199. fig_b = ggplot(data = subset(dt_pred_rep_mean_prob_plot, classification == "ovr"), aes(
  200. x = as.factor(position_label), y = as.numeric(probability),
  201. fill = as.factor(position_label))) +
  202. geom_flat_violin(scale = "width", trim = FALSE, aes(color = NA),
  203. position = position_nudge(x = 0.2, y = 0), alpha = 0.4) +
  204. geom_point(stat = "summary", fun = "mean", color = "black", pch = 23, size = 3,
  205. position = position_nudge(x = 0.125, y = 0)) +
  206. geom_errorbar(stat = "summary", fun.data = "mean_se", width = 0.0,
  207. position = position_nudge(x = 0.125, y = 0), color = "black") +
  208. geom_boxplot(outlier.colour = "black", outlier.shape = NA, alpha = 0.5,
  209. notch = FALSE, outlier.alpha = 1, width = 0.05, color = "black",
  210. position = position_nudge(x = 0.25, y = 0)) +
  211. geom_point(position = position_jitter(width = .05, height = 0, seed = 4),
  212. pch = 21, alpha = 1, color = "black") +
  213. xlab("Serial event") + ylab("Probability (%)") +
  214. scale_fill_manual(name = "Serial event", values = colors) +
  215. scale_color_manual(name = "Serial event", values = colors) +
  216. #scale_y_continuous(labels = label_fill(seq(0, 100, 10), mod = 2), breaks = seq(0, 100, 10)) +
  217. coord_capped_cart(left = "both", bottom = "both", ylim = c(0, 100)) +
  218. theme(legend.position = "bottom", legend.direction = "horizontal",
  219. legend.justification = "center", legend.margin = margin(0,0,0,0),
  220. legend.box.margin = margin(t = -10, r = 0, b = 10, l = 0)) +
  221. theme(panel.background = element_blank()) +
  222. #theme(plot.margin = unit(c(t = 1, r = 1, b = 1, l = 1), "pt")) +
  223. geom_line(data = dt_significance, aes(group = group),
  224. linetype = "solid", position = position_nudge(x = 0.1, y = 0)) +
  225. geom_text(data = dt_significance %>% distinct(group, probability, .keep_all = TRUE),
  226. aes(x = group, y = probability + 1.5, label = pvalue), color = "black",
  227. position = position_nudge(x = 0.125, y = 4), parse = FALSE) +
  228. theme(axis.line.y = element_line(colour = "black"),
  229. axis.line.x = element_line(colour = "white"),
  230. panel.grid.major = element_blank(),
  231. panel.grid.minor = element_blank(),
  232. panel.border = element_blank(),
  233. panel.background = element_blank(),
  234. axis.text.x = element_blank(),
  235. axis.title.x = element_blank(),
  236. axis.ticks.x = element_line(colour = "white"))
  237. fig_b
  238. ```
  239. ```{r, echo = FALSE}
  240. ggsave(filename = "highspeed_plot_decoding_repetition_probabilities_mean_all_trs.pdf",
  241. plot = fig_b, device = cairo_pdf, path = path_figures, scale = 1,
  242. dpi = "retina", width = 4, height = 3)
  243. ```
  244. #### Figure 4c
  245. ```{r}
  246. trs = seq(2, 7)
  247. # calculate means for each participant:
  248. dt_pred_rep_a2_sub = dt_pred_rep %>%
  249. # only consider the two extreme conditions (2 and 9):
  250. #filter(change %in% c(2,9)) %>%
  251. filter(seq_tr %in% trs) %>%
  252. # set the change variable to a factor:
  253. transform(change = as.factor(change)) %>%
  254. transform(position_label = as.factor(position_label)) %>% setDT(.) %>%
  255. # check if the number of change conditions really only equals 2:
  256. #verify(length(unique(change)) == 2) %>%
  257. .[, by = .(id, classification, change, position_label), .(
  258. probability = mean(probability * 100)
  259. )] %>%
  260. # standardize the probabilities:
  261. #mutate(probability_z = scale(probability)) %>% setDT(.) %>%
  262. setorder(id, classification, change, position_label) %>%
  263. filter(classification == "ovr") %>% setDT(.)
  264. ```
  265. ```{r}
  266. lme_data = dt_pred_rep_a2_sub %>% filter(classification == "ovr" & change %in% c(2,9))
  267. lme_rep_prob_rep = lmer(probability ~ position_label * change + (1 + position_label + change|id),
  268. data = lme_data, na.action = na.omit, control = lcctrl)
  269. summary(lme_rep_prob_rep)
  270. anova(lme_rep_prob_rep)
  271. emmeans_results = emmeans(lme_rep_prob_rep, list(pairwise ~ position_label | change))
  272. print(emmeans_results)
  273. emmeans_summary = summary(emmeans_results[[2]])
  274. ```
  275. ```{r}
  276. dt_significance = data.table(
  277. change = rep(emmeans_summary$change, each = 2),
  278. contrast = rep(emmeans_summary$contrast, each = 2),
  279. pvalue = rep(round_pvalues(emmeans_summary$p.value), each = 2),
  280. group = rep(c(1.5, 2.0, 2.5), each = 2),
  281. position_label = c("1", "2", "1", "none", "2", "none"),
  282. probability = rep(c(60, 80, 70), each = 2)
  283. )
  284. ```
  285. ```{r}
  286. dt_plot = dt_pred_rep_a2_sub %>% filter(classification == "ovr" & change %in% c(2,9))
  287. fig_c = ggplot(data = dt_plot, aes(
  288. x = as.factor(position_label), y = as.numeric(probability),
  289. fill = as.factor(position_label))) +
  290. geom_flat_violin(
  291. scale = "width", trim = FALSE, aes(color = NA),
  292. position = position_nudge(x = 0.25, y = 0), alpha = 0.4) +
  293. geom_point(
  294. stat = "summary", fun = "mean", color = "black", pch = 23, size = 3,
  295. position = position_nudge(x = 0.15, y = 0)) +
  296. geom_errorbar(
  297. stat = "summary", fun.data = "mean_se", width = 0.0,
  298. position = position_nudge(x = 0.15, y = 0), color = "black") +
  299. geom_boxplot(
  300. outlier.colour = "black", outlier.shape = NA, outlier.size = 50,
  301. notch = FALSE, outlier.alpha = 1, width = 0.05, color = "black",
  302. position = position_nudge(x = 0.35, y = 0), alpha = 0.5) +
  303. geom_point(
  304. position = position_jitter(width = .05, height = 0, seed = 4),
  305. pch = 21, alpha = 1) +
  306. facet_wrap(~ as.factor(change), labeller = as_labeller(facet_labels_new)) +
  307. xlab("Serial event") + ylab("Probability (%)") +
  308. scale_fill_manual(name = "Serial event", values = colors) +
  309. scale_color_manual(name = "Serial event", values = colors) +
  310. #scale_y_continuous(labels = label_fill(seq(0, 100, 10), mod = 2), breaks = seq(0, 100, 10)) +
  311. coord_capped_cart(left = "both", bottom = "both", ylim = c(0, 100), expand = TRUE) +
  312. theme(legend.position = "bottom", legend.direction = "horizontal",
  313. legend.justification = "center", legend.margin = margin(0,0,0,0),
  314. legend.box.margin = margin(t = -10, r = 0, b = 10, l = 0)) +
  315. theme(panel.background = element_blank()) +
  316. theme(strip.text.x = element_text(margin = margin(b = 3, t = 2))) +
  317. theme(axis.title.x = element_blank(), axis.text.x = element_blank(),
  318. axis.ticks.x = element_blank(), axis.line.x = element_blank()) +
  319. #theme(plot.margin = unit(c(t = 1, r = 1, b = 1, l = 1), "pt")) +
  320. geom_line(data = dt_significance, aes(group = group, x = position_label, y = probability),
  321. linetype = "solid", position = position_nudge(x = 0.1, y = 0)) +
  322. geom_text(data = dt_significance %>% distinct(group, change, probability, .keep_all = TRUE),
  323. aes(x = group, y = probability + 1, label = pvalue), color = "black",
  324. position = position_nudge(x = 0.1, y = 4), parse = FALSE) +
  325. theme(axis.line.y = element_line(colour = "black"),
  326. axis.line.x = element_line(colour = "white"),
  327. panel.grid.major = element_blank(),
  328. panel.grid.minor = element_blank(),
  329. panel.border = element_blank(),
  330. panel.background = element_blank(),
  331. axis.text.x = element_blank(),
  332. axis.title.x = element_blank(),
  333. axis.ticks.x = element_line(colour = "white"))
  334. fig_c
  335. ```
  336. ```{r}
  337. ggsave(filename = "highspeed_plot_decoding_repetition_average_probabilities.pdf",
  338. plot = fig_c, device = cairo_pdf, path = path_figures, scale = 1,
  339. dpi = "retina", width = 5, height = 3)
  340. ```
  341. #### Figure S8
  342. ```{r}
  343. # select specific trs:
  344. trs = seq(2, 7)
  345. dt_pred_rep_all_reps_sub = dt_pred_rep %>%
  346. # filter out the extremely long condition (with 16 items):
  347. filter(occurence != 15) %>% setDT(.) %>%
  348. # filter for specific trs:
  349. filter(seq_tr %in% trs) %>% setDT(.) %>%
  350. # transform the main variables of interest:
  351. transform(
  352. occurence = as.numeric(occurence),
  353. position_label = as.factor(position_label),
  354. probability = probability * 100
  355. ) %>% setDT(.) %>%
  356. # calculate the mean probability for each serial event item (first, second, other)
  357. # separately depending on the number of indiviual repetitions
  358. .[, by = .(id, classification, occurence, position_label), .(
  359. probability = mean(probability)
  360. )] %>%
  361. setorder(., classification, id, occurence, position_label)
  362. dt_pred_rep_all_reps_mean = dt_pred_rep_all_reps_sub %>%
  363. # average across participants and calculate the standard error of the mean:
  364. . [, by = .(classification, occurence, position_label), {
  365. list(
  366. mean_probability = mean(probability),
  367. num_subs = .N,
  368. sem_upper = mean(probability) + (sd(probability)/sqrt(.N)),
  369. sem_lower = mean(probability) - (sd(probability)/sqrt(.N))
  370. )}] %>%
  371. verify(all(num_subs == 36)) %>%
  372. setorder(., classification, occurence, position_label)
  373. ```
  374. Are the short repetitions of event 1 and event 2 different from noise:
  375. LME model including all three event types:
  376. ```{r}
  377. # model including all three event types:
  378. lme_pred_rep_all_reps = lmer(probability ~ position_label * occurence + (1 + position_label + occurence|id),
  379. data = subset(dt_pred_rep_all_reps_sub, classification == "ovr" & occurence != 15),
  380. na.action = na.omit, control = lcctrl)
  381. summary(lme_pred_rep_all_reps)
  382. anova(lme_pred_rep_all_reps)
  383. ```
  384. LME model excluding out-of-sequence events:
  385. ```{r}
  386. lme_pred_rep_all_reps_reduced = lmer(probability ~ position_label * occurence + (1 + position_label + occurence|id),
  387. data = subset(dt_pred_rep_all_reps_sub, classification == "ovr" & occurence != 15 & position_label != "none"),
  388. na.action = na.omit, control = lcctrl)
  389. summary(lme_pred_rep_all_reps_reduced)
  390. anova(lme_pred_rep_all_reps_reduced)
  391. ```
  392. ```{r}
  393. dt_pred_rep_all_reps_stats = dt_pred_rep_all_reps_sub %>%
  394. filter(occurence %in% c(8)) %>% setDT(.) %>%
  395. transform(position_label = paste0("pos_", position_label)) %>%
  396. spread(key = position_label, value = probability, drop = FALSE) %>%
  397. filter(classification == "ovr")
  398. # some ugly code to perform three t-tests:
  399. summary(dt_pred_rep_all_reps_stats$pos_2); sd(dt_pred_rep_all_reps_stats$pos_2)
  400. summary(dt_pred_rep_all_reps_stats$pos_1); sd(dt_pred_rep_all_reps_stats$pos_1)
  401. summary(dt_pred_rep_all_reps_stats$pos_none); sd(dt_pred_rep_all_reps_stats$pos_none)
  402. # perform separate t-tests
  403. test1 = t.test(dt_pred_rep_all_reps_stats$pos_2, dt_pred_rep_all_reps_stats$pos_1,
  404. paired = TRUE, alternative = "two.sided")
  405. test2 = t.test(dt_pred_rep_all_reps_stats$pos_2, dt_pred_rep_all_reps_stats$pos_none,
  406. paired = TRUE, alternative = "two.sided")
  407. test3 = t.test(dt_pred_rep_all_reps_stats$pos_1, dt_pred_rep_all_reps_stats$pos_none,
  408. paired = TRUE, alternative = "two.sided")
  409. test1; test2; test3
  410. # correct for multiple comparisons
  411. p.adjust(c(test1$p.value, test2$p.value, test3$p.value), method = "bonferroni", n = 6)
  412. ```
  413. ```{r, echo = FALSE}
  414. fig_d = ggplot(data = subset(dt_pred_rep_all_reps_mean, classification == "ovr"), aes(
  415. x = as.factor(occurence), y = as.numeric(mean_probability),
  416. group = as.factor(position_label), color = as.factor(position_label))) +
  417. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper,
  418. fill = as.factor(position_label)), alpha = 0.5) +
  419. geom_line(size = 0.7, mapping = aes(color = as.factor(position_label))) +
  420. xlab("Number of item repetitions") +
  421. ylab("Probability (in %)") +
  422. scale_fill_manual(name = "Serial event", values = colors) +
  423. scale_color_manual(name = "Serial event", values = colors) +
  424. scale_y_continuous(labels = label_fill(seq(0, 100, 10), mod = 2), breaks = seq(0, 100, 10)) +
  425. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0,40)) +
  426. theme(legend.position = "right", legend.direction = "vertical") +
  427. guides(color = guide_legend(ncol = 1)) +
  428. theme(legend.position = "right", legend.direction = "vertical",
  429. legend.justification = "center", legend.margin = margin(0,0,0,0),
  430. legend.box.margin = margin(t = 0, r = , b = 0, l = -20)) +
  431. theme(panel.background = element_blank()) +
  432. theme(plot.margin = unit(c(t = 1, r = 1, b = 1, l = 1), "pt")) +
  433. theme(axis.line = element_line(colour = "black"),
  434. panel.grid.major = element_blank(),
  435. panel.grid.minor = element_blank(),
  436. panel.border = element_blank(),
  437. panel.background = element_blank())
  438. fig_d
  439. ```
  440. ```{r, echo = FALSE}
  441. ggsave(filename = "highspeed_plot_decoding_repetition_duration_supplement.pdf",
  442. plot = fig_d, device = cairo_pdf, path = path_figures, scale = 1,
  443. dpi = "retina", width = 3.5, height = 2)
  444. ```
  445. ### Step-sizes
  446. Are there more within sequence items in the classifier predictions?
  447. To this end, we check if serial events 1 and 2 (of 2) are decoded more
  448. often than other (out-of-sequence) serial events in the repetition trials.
  449. #### Figure 4d
  450. ```{r}
  451. # define the number of TRs per trial (used below):
  452. select_trs = seq(2, 7)
  453. dt_pred_rep_count = dt_pred_rep %>%
  454. # get the position with the highest probability:
  455. .[, by = .(classification, id, trial, change, seq_tr), ":=" (
  456. num_events = .N,
  457. position_max_prob = as.numeric(probability == max(probability))
  458. )] %>%
  459. # check if the data consisted of 5 trials per TR:
  460. verify(num_events == 5) %>%
  461. # check if there is only one event with the maximum probability:
  462. verify(.[, by = .(classification, id, trial, change, seq_tr), .(
  463. num_max_events = sum(position_max_prob)
  464. )]$num_max_events == 1) %>%
  465. # get the position with the highest probability:
  466. filter(position_max_prob == 1) %>%
  467. setDT(.) %>%
  468. # WARNING: HERE WE FILTER OUT SPECIFIC TRIALS SET ABOVE:
  469. filter(seq_tr %in% select_trs) %>%
  470. setDT(.) %>%
  471. # calculate the absolute and relative occurrence of each maximum position:
  472. .[, by = .(classification, id, change, trial, position), .(
  473. count_num = .N / length(select_trs),
  474. freq = .N
  475. )] %>%
  476. # complete missing positions with zeros:
  477. complete(nesting(classification, id, change, trial),
  478. nesting(position), fill = list(count_num = 0, freq = 0)) %>%
  479. # create a new variable containing the type by copying the max position:
  480. mutate(type = position) %>%
  481. transform(type = ifelse(type %in% c(1,2), type, "none")) %>% setDT(.) %>%
  482. # sort the data table
  483. setorder(., classification, id, change, trial, position, type) %>%
  484. # check if the sum of frequencies matches the number of trs:
  485. verify(.[, by = .(classification, id, change, trial), .(
  486. sum_freq = sum(freq)
  487. )]$sum_freq == length(select_trs)) %>%
  488. # average relative proportion of each position across trials per change:
  489. setDT(.) %>% .[, by = .(classification, id, change, type), .(
  490. mean_count = mean(count_num) * 100,
  491. sum_freq = sum(freq),
  492. num_trials = .N
  493. )] %>%
  494. #check if the data consisted of 5 trials per TR:
  495. verify(num_trials %in% c(5, 15)) %>%
  496. # change variable types for mixed effects models:
  497. transform(change = as.factor(change)) %>%
  498. transform(type = as.factor(type)) %>%
  499. #mutate(mean_count_z = scale(mean_count, center = TRUE, scale = FALSE)) %>% setDT(.) %>%
  500. # create a new variable:
  501. .[type == "1", ":=" (is_first = 1, is_second = 0)] %>%
  502. .[type == "2", ":=" (is_first = 0, is_second = 1)] %>%
  503. .[type == "none", ":=" (is_first = 0, is_second = 0)] %>%
  504. transform(is_first = as.numeric(is_first)) %>%
  505. transform(is_second = as.numeric(is_second))
  506. ```
  507. ```{r}
  508. dt_pred_rep_count_mean = dt_pred_rep_count %>%
  509. # average the number of counts across participants:
  510. setDT(.) %>%
  511. .[, by = .(classification, change, type), .(
  512. mean_count = mean(mean_count),
  513. sd_count = sd(mean_count),
  514. num_subs = .N,
  515. sem_upper = mean(mean_count) + (sd(mean_count)/sqrt(.N)),
  516. sem_lower = mean(mean_count) - (sd(mean_count)/sqrt(.N))
  517. )] %>%
  518. # check if the number of data points (here, participants) is correct:
  519. verify(num_subs == 36) %>%
  520. #verify(.[, by = .(classification, change), .(
  521. # mean_count = sum(mean_count)
  522. #)]$mean_count == 100) %>%
  523. # order the data table:
  524. setorder(., classification, change, type)
  525. ```
  526. ```{r}
  527. lme_rep_count = lmer(mean_count ~ type * change + (1 + type + change|id),
  528. data = subset(dt_pred_rep_count, classification == "ovr" & change %in% c(2,9)),
  529. na.action = na.omit, control = lcctrl)
  530. summary(lme_rep_count)
  531. anova(lme_rep_count)
  532. emmeans_results = emmeans(lme_rep_count, list(pairwise ~ type | change))
  533. emmeans_summary = summary(emmeans_results[[2]])
  534. ```
  535. ```{r, echo = FALSE}
  536. dt_significance = data.table(
  537. change = rep(emmeans_summary$change, each = 2),
  538. contrast = rep(emmeans_summary$contrast, each = 2),
  539. pvalue = rep(round_pvalues(emmeans_summary$p.value), each = 2),
  540. group = rep(c(1.5, 2.0, 2.5), each = 2),
  541. type = c("1", "2", "1", "none", "2", "none"),
  542. probability = rep(c(75, 95, 85), each = 2)
  543. )
  544. ```
  545. ```{r, echo = FALSE}
  546. fig_e = ggplot(data = subset(dt_pred_rep_count, classification == "ovr" & change %in% c(2,9)), aes(
  547. x = as.factor(type), y = as.numeric(mean_count),
  548. fill = as.factor(type))) +
  549. geom_flat_violin(
  550. scale = "width", trim = FALSE, aes(color = NA),
  551. position = position_nudge(x = 0.3, y = 0), alpha = 0.4) +
  552. geom_bar(
  553. stat = "summary", fun = "mean", color = "black", width = 0.05,
  554. position = position_nudge(x = 0.2, y = 0)) +
  555. geom_errorbar(
  556. stat = "summary", fun.data = "mean_se", width = 0.0,
  557. position = position_nudge(x = 0.2, y = 0), color = "black") +
  558. geom_boxplot(
  559. outlier.colour = "black", outlier.shape = NA, outlier.size = 50,
  560. notch = FALSE, outlier.alpha = 1, width = 0.05, color = "black",
  561. position = position_nudge(x = 0.4, y = 0), alpha = 0.5) +
  562. geom_point(
  563. position = position_jitter(width = .05, height = 0, seed = 4),
  564. pch = 21, alpha = 1, color = "black") +
  565. facet_wrap(~ as.factor(change), labeller = as_labeller(facet_labels_new)) +
  566. xlab("Serial event") + ylab("Proportion per trial (in %)") +
  567. scale_fill_manual(name = "Serial event", values = colors) +
  568. scale_color_manual(name = "Serial event", values = colors) +
  569. scale_y_continuous(labels = label_fill(seq(0, 100, 10), mod = 2), breaks = seq(0, 100, 10)) +
  570. coord_capped_cart(left = "both", bottom = "both", ylim = c(0, 100), expand = TRUE) +
  571. theme(legend.position = "bottom", legend.direction = "horizontal",
  572. legend.justification = "center", legend.margin = margin(0,0,0,0),
  573. legend.box.margin = margin(t = -10, r = 0, b = 10, l = 0)) +
  574. theme(panel.background = element_blank()) +
  575. theme(axis.title.x = element_blank(), axis.text.x = element_blank(),
  576. axis.ticks.x = element_blank(), axis.line.x = element_blank()) +
  577. theme(plot.margin = unit(c(t = 1, r = 1, b = 1, l = 1), "pt")) +
  578. geom_line(data = dt_significance, aes(group = group, x = type, y = probability),
  579. linetype = "solid", position = position_nudge(x = 0.2, y = 0)) +
  580. geom_text(data = dt_significance %>% distinct(group, change, probability, .keep_all = TRUE),
  581. aes(x = group, y = probability + 1, label = pvalue), color = "black",
  582. position = position_nudge(x = 0.2, y = 4), parse = FALSE) +
  583. theme(axis.line.y = element_line(colour = "black"),
  584. axis.line.x = element_line(colour = "white"),
  585. panel.grid.major = element_blank(),
  586. panel.grid.minor = element_blank(),
  587. panel.border = element_blank(),
  588. panel.background = element_blank(),
  589. axis.text.x = element_blank(),
  590. axis.title.x = element_blank(),
  591. axis.ticks.x = element_line(colour = "white"))
  592. fig_e
  593. ```
  594. ```{r}
  595. ggsave(filename = "highspeed_plot_decoding_repetition_maxprob.pdf",
  596. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  597. dpi = "retina", width = 5, height = 3)
  598. ```
  599. **Test for significance**
  600. **Test 1:** Using a paired t-test, we test how well we are able to decode a single
  601. briefly presented item in a 32 ms sequence compared to items that were not
  602. presented, when the item (first event) is followed by a statistical representation
  603. that could mask the item (change = 2 condition).
  604. **Test 2:** Using a paired t-test, we then test how well we are able to decode a single
  605. briefly presented item in a 32 ms sequence compared to items that were not
  606. presented, when the item (last event) is preceded by a statistical signal
  607. (change = 9 condition).
  608. ```{r}
  609. # label all data points with position = 1 or 2 as part of the sequnece:
  610. dt_pred_rep_count$type_group[dt_pred_rep_count$type %in% c(1,2)] = "in_seq"
  611. # label all data points with positions > 2 as *not* part of the sequence:
  612. dt_pred_rep_count$type_group[dt_pred_rep_count$type == "other"] = "out_seq"
  613. # create a data table for the change 2 condition that does not contain the 2nd stimulus:
  614. df1 = subset(dt_pred_rep_count, change == 2 & type != 2)
  615. # create a data table for the change = 9 condition that does not contain the 1st stimulus:
  616. df2 = subset(dt_pred_rep_count, change == 9 & type != 1)
  617. # combine both data frames:
  618. df3 = setDT(rbind(df1, df2))
  619. df3_test = df3 %>%
  620. # calculate means separately for within and out of sequence stimuli:
  621. .[, by = .(classification, id, change, type_group), .(
  622. mean_count = mean(mean_count)
  623. )] %>%
  624. # transform data table to wide format to calculate t-tests:
  625. spread(key = type_group, value = mean_count) %>%
  626. # average across particiants and calculate t-tests:
  627. .[, by = .(classification, change), {
  628. results = t.test(x = in_seq, y = out_seq, paired = TRUE);
  629. list(mean_out = mean(out_seq), sd_out = sd(out_seq),
  630. mean_in = mean(in_seq), sd_in = sd(in_seq),
  631. pvalue = results$p.value, tvalue = results$statistic, n = .N)
  632. }] %>%
  633. .[, by = .(classification), ":=" (
  634. num_comp = .N,
  635. pvalue_adjust = p.adjust(pvalue, method = "bonferroni", 2)
  636. )] %>% verify(all(num_comp == 2))
  637. ```
  638. ### Transitions
  639. We analyzed if there more within sequence transitions than transitions to
  640. out-of-sequence elements:
  641. ```{r}
  642. transition_type <- function(head, tail){
  643. if (head == 1 & tail == 2){
  644. transition_type = "forward"
  645. } else if(head == 2 & tail == 1) {
  646. transition_type = "backward"
  647. } else if(head == 1 & tail == 1) {
  648. transition_type = "repetition 1"
  649. } else if(head == 2 & tail == 2) {
  650. transition_type = "repetition 2"
  651. #} else if(head == tail) {
  652. # transition_type = "repetition"
  653. } else if(head %in% c(1,2) & tail > 2) {
  654. transition_type = "outwards"
  655. } else if(head > 2 & tail %in% c(1,2)) {
  656. transition_type = "inwards"
  657. } else if(head > 2 & tail > 2 & head != tail) {
  658. transition_type = "outside"
  659. } else if(head > 2 & tail > 2 & head == tail) {
  660. transition_type = "repetition out"
  661. } else {
  662. transition_type = "ERROR!"
  663. }
  664. return(transition_type)
  665. }
  666. ```
  667. ```{r}
  668. trs = seq(2, 7)
  669. # define the number of transitions, which is the number of TRs - 1
  670. num_transitions = length(trs) - 1
  671. # calculate the transitions between decoded repetition trial elements
  672. dt_pred_rep_step = dt_pred_rep %>%
  673. # filter for specific trs in a selected time window:
  674. filter(seq_tr %in% trs) %>% setDT(.) %>%
  675. # for each classification, id, trial, get position with highest probability:
  676. .[, by = .(classification, id, trial, change, seq_tr), .(
  677. num_trials = .N,
  678. position_max = position[which.max(probability)]
  679. )] %>%
  680. # check if the data consisted of 5 trials per TR:
  681. verify(num_trials == 5) %>%
  682. # get the head and tail of the maximum positions (sequence shifted by 1):
  683. # head = tr 1 to n-1 and tail = tr 2 to n:
  684. .[, by = .(classification, id, trial, change), .(
  685. n = .N,
  686. head = head(position_max, -1),
  687. tail = tail(position_max, -1)
  688. )] %>%
  689. # get the number of each unique combination of consecutive positions indices:
  690. .[, .N, .(classification, id, trial, change, head, tail)] %>%
  691. # complete all missing pairings with 0s for each trial:
  692. complete(nesting(classification, id, trial, change), nesting(head, tail),
  693. fill = list(N = 0)) %>% setDT(.) %>%
  694. # check if the number of true transitions matching the expected number:
  695. verify(.[, by = .(classification, id, trial, change), .(
  696. sum_of_n = sum(N)
  697. )]$sum_of_n == num_transitions) %>%
  698. # set the order: sort by classification, participant, trial and positions:
  699. setorder(classification, id, trial, change, head, tail) %>%
  700. # define the transition type for every transition:
  701. mutate(step_type = mapply(transition_type, head, tail)) %>%
  702. # calculate the relative proportion per trial across TRs for each transition
  703. mutate(proportion = N/num_transitions) %>% setDT(.) %>%
  704. # check if any step type is not properly assigned:
  705. verify(all(!is.na(step_type))) %>% setDT(.)
  706. dt_pred_rep_step_version1 = dt_pred_rep_step %>%
  707. # average the proportion of transitions across trials:
  708. .[, by = .(classification, id, change, step_type), .(
  709. mean_proportion = mean(proportion) * 100
  710. )]
  711. dt_pred_rep_step_version2 = dt_pred_rep_step %>%
  712. # sum up the number of transitions for each transition type:
  713. .[, by = .(classification, id, trial, change, step_type), .(
  714. num = sum(N)
  715. )] %>%
  716. # check if the number of true transitions matching the expected number per trial:
  717. #verify(.[, by = .(classification, id, trial, change), .(
  718. # num_transitions = sum(num)
  719. #)]$num_transitions == num_transitions) %>%
  720. # calculate the proportion per trial across TRs for each transition
  721. mutate(proportion = num/num_transitions) %>% setDT(.) %>%
  722. # average the proportion of transitions across trials:
  723. .[, by = .(classification, id, change, step_type), .(
  724. mean_num = mean(num),
  725. mean_proportion = mean(proportion) * 100
  726. )]
  727. dt_pred_rep_step_mean = dt_pred_rep_step_version1 %>%
  728. # average across participants and compute the standard error:
  729. .[, by = .(classification, change, step_type), .(
  730. mean_proportion = mean(mean_proportion),
  731. num_subs = .N,
  732. sem_upper = mean(mean_proportion) + (sd(mean_proportion)/sqrt(.N)),
  733. sem_lower = mean(mean_proportion) - (sd(mean_proportion)/sqrt(.N))
  734. )] %>%
  735. verify(all(num_subs == 36)) %>%
  736. setorder(classification, change, step_type) %>%
  737. filter(classification == "ovr")
  738. ```
  739. ```{r, dev = "cairo_pdf"}
  740. plot_rep_trans_mat = function(dt){
  741. ggplot(data = dt, mapping = aes(
  742. x = as.factor(head), y = fct_rev(as_factor(tail)),
  743. size = mean_proportion, fill = step_type)) +
  744. geom_point(aes(size = mean_proportion, fill = step_type), pch = 21, color = "black") +
  745. coord_capped_cart(left = "both", bottom = "both", expand = TRUE) +
  746. xlab("Decoded event at t") +
  747. ylab("Decoded event at t + 1") +
  748. facet_wrap(~ as.factor(change), labeller = as_labeller(facet_labels_new)) +
  749. scale_size(range = c(1,15), name = "Proportion per trial (in %)",
  750. guide = guide_legend(
  751. title.position = "top", direction = "horizontal",
  752. nrow = 1,
  753. order = 2)) +
  754. scale_fill_manual(name = "Transition type",
  755. values = c(colors, "white", "white", "white"),
  756. guide = guide_legend(ncol = 2)) +
  757. theme(strip.text.x = element_text(margin = margin(b = 3, t = 2))) +
  758. scale_y_discrete(limits = rev(levels(~tail))) +
  759. theme(axis.line = element_line(colour = "black"),
  760. panel.grid.major = element_blank(),
  761. panel.grid.minor = element_blank(),
  762. panel.border = element_blank(),
  763. panel.background = element_blank()) +
  764. # remove background of legend keys:
  765. theme(legend.key = element_blank()) +
  766. theme(legend.background = element_blank())
  767. # add annotation
  768. #annotate("segment", x = 0, xend = 6, y = 6, yend = 0,
  769. # colour = "white", size = 10, alpha = 0.8) +
  770. #annotate(geom = "text", x = 3, y = 3, angle = 360 - 45, hjust = 0.5,
  771. # label = "Repetitions of the same decoded event",
  772. # color = "gray")
  773. }
  774. ```
  775. ```{r}
  776. dt_pred_rep_step_trans = dt_pred_rep_step %>%
  777. .[, by = .(classification, change, head, tail, step_type), .(
  778. num_data = .N,
  779. mean_proportion = mean(proportion) * 100
  780. )] %>%
  781. # check if averaged is number of participants times number of trials
  782. verify(all(num_data == 36 * 5)) %>%
  783. # create new variable containing the the averaged number as a label:
  784. mutate(mean_num_label = as.character(sub('^(-)?0[.]', '\\1.', round(mean_proportion, 2)))) %>%
  785. # remove all repetition transitions:
  786. mutate(step_type = ifelse(stringr::str_detect(step_type, "repetition"), "repetition", step_type)) %>%
  787. setDT(.)
  788. ```
  789. ```{r}
  790. fig_trans_mat = plot_rep_trans_mat(
  791. dt = subset(dt_pred_rep_step_trans, classification == "ovr" & change %in% c(2,9)))
  792. fig_trans_mat
  793. ```
  794. ```{r, echo = FALSE}
  795. ggsave(filename = "highspeed_plot_decoding_repetition_transition_matrix.pdf",
  796. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  797. dpi = "retina", width = 8, height = 3)
  798. ```
  799. ```{r, echo = FALSE, dev = "cairo_pdf"}
  800. plot_rep_trans <- function(dt){
  801. ggplot(
  802. data = dt, aes(x = as.factor(step_type), y = as.numeric(mean_proportion),
  803. fill = as.factor(step_type))) +
  804. geom_bar(stat = "summary", fun = "mean",
  805. aes(fill = as.factor(step_type)),
  806. color = "black",
  807. position = position_dodge()) +
  808. geom_point(position = position_jitter(width = .2, height = 0, seed = 4),
  809. pch = 21, alpha = 0.5, color = "black") +
  810. geom_errorbar(stat = "summary", fun.data = "mean_se", width = 0,
  811. position = position_dodge(.9), color = "black") +
  812. facet_wrap(~ as.factor(change), labeller = as_labeller(facet_labels_new)) +
  813. xlab("Serial event") +
  814. #xlab(expression(paste("Time-point of change to ", 2^nd, " serial event"))) +
  815. ylab("Proportion per trial (%)") +
  816. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0,10)) +
  817. scale_fill_manual(name = "Transition", values = c(colors, "lightblue", "lightcoral", "darkgray")) +
  818. theme(strip.text.x = element_text(margin = margin(b = 3, t = 2))) +
  819. #scale_x_discrete(labels = as_labeller(facet_labels_new)) +
  820. #theme(legend.position = "right", legend.direction = "vertical", legend.justification = "center") +
  821. theme(legend.position = "bottom", legend.direction = "horizontal",
  822. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  823. legend.box.margin = margin(t = -10, r = 0, b = 10, l = 0)) +
  824. guides(fill = guide_legend(nrow = 2, title.position = "top", title.hjust = 0.5)) +
  825. theme(panel.background = element_blank()) +
  826. #theme(plot.margin = unit(c(t = 1, r = 1, b = 1, l = 1), "pt")) +
  827. theme(axis.line.y = element_line(colour = "black"),
  828. axis.line.x = element_line(colour = "white"),
  829. panel.grid.major = element_blank(),
  830. panel.grid.minor = element_blank(),
  831. panel.border = element_blank(),
  832. panel.background = element_blank(),
  833. axis.text.x = element_blank(),
  834. axis.title.x = element_blank(),
  835. axis.ticks.x = element_line(colour = "white"))
  836. }
  837. # & change %in% c(2,9)
  838. fig_trans_prop = plot_rep_trans(dt = subset(
  839. dt_pred_rep_step_version1, classification == "ovr" & change %in% c(2,9) &
  840. step_type != "repetition 1" & step_type != "repetition 2" & step_type != "repetition out"
  841. ))
  842. fig_trans_prop
  843. ```
  844. ```{r, echo = FALSE}
  845. ggsave(filename = "highspeed_plot_decoding_repetition_transition_types.pdf",
  846. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  847. dpi = "retina", width = 5, height = 3)
  848. ```
  849. compare the average proportion of forward transitions with the average proportion
  850. of outwards steps in the change = 2 condition. also compare the average
  851. proportion of forward transitions with the average number of within out-of-sequence transitions:
  852. ```{r, results = "hold"}
  853. dt_pred_rep_trans_test = dt_pred_rep_step_version1 %>%
  854. filter(step_type %in% c("forward", "outwards", "outside") & change == 9) %>%
  855. spread(key = step_type, value = mean_proportion, drop = TRUE) %>% setDT(.) %>%
  856. .[, by = .(classification), {
  857. ttest_outwards = t.test(x = forward, y = outwards, paired = TRUE);
  858. ttest_outside = t.test(x = forward, y = outside, paired = TRUE);
  859. list(mean_forward = round(mean(forward), 2),
  860. mean_outwards = round(mean(outwards), 2),
  861. mean_outside = round(mean(outside), 2),
  862. df = ttest_outwards$parameter,
  863. ttest_outwards_pvalue = p.adjust(ttest_outwards$p.value,
  864. method = "bonferroni", n = 4),
  865. ttest_outwards_tvalue = round(ttest_outwards$statistic, 2),
  866. ttest_outside_pvalue = p.adjust(ttest_outside$p.value,
  867. method = "bonferroni", n = 4),
  868. ttest_outside_tvalue = round(ttest_outside$statistic, 2),
  869. num_subs = .N)
  870. }] %>%
  871. verify(all(num_subs == 36)) %>%
  872. filter(classification == "ovr")
  873. dt_pred_rep_trans_test
  874. ```
  875. ```{r, fig.width = 10, fig.height = 10, dev = "cairo_pdf"}
  876. plot_grid(plot_grid(fig_a, fig_b, labels = c("a", "b")),
  877. #plot_grid(fig_c, fig_d, labels = c("c", "d")),
  878. plot_grid(fig_c, fig_trans_prop, labels = c("c", "d")),
  879. plot_grid(fig_trans_mat, labels = c("e")),
  880. ncol = 1, label_fontface = "bold")
  881. ```
  882. ```{r}
  883. ggsave(filename = "highspeed_plot_decoding_repetition.pdf",
  884. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  885. dpi = "retina", width = 10, height = 10)
  886. ggsave(filename = "wittkuhn_schuck_figure_4.pdf",
  887. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  888. dpi = "retina", width = 10, height = 10)
  889. ```