Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

highspeed-analysis-sequence.Rmd 83 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947
  1. ## Decoding: Sequence trials
  2. ### Initialization
  3. #### Load data and files
  4. We load the data and relevant functions:
  5. ```{r, warning=FALSE, message=FALSE, echo=TRUE}
  6. # find the path to the root of this project:
  7. if (!requireNamespace("here")) install.packages("here")
  8. if ( basename(here::here()) == "highspeed" ) {
  9. path_root = here::here("highspeed-analysis")
  10. } else {
  11. path_root = here::here()
  12. }
  13. # source all relevant functions from the setup R script:
  14. load(file.path(path_root, "data", "tmp", "dt_periods.Rdata"))
  15. load(file.path(path_root, "data", "tmp", "dt_odd_seq_sim_diff.Rdata"))
  16. load(file.path(path_root, "data", "tmp", "dt_odd_seq_sim.Rdata"))
  17. sub_exclude <- c("sub-24", "sub-31", "sub-37", "sub-40")
  18. ```
  19. The next step is only evaluated during manual execution:
  20. ```{r, eval=FALSE}
  21. # source all relevant functions from the setup R script:
  22. source(file.path(path_root, "code", "highspeed-analysis-setup.R"))
  23. ```
  24. #### Data preparation
  25. We create a function to determine early and late zones of forward and backward periods:
  26. ```{r}
  27. get_zones = function(trs_in){
  28. if (length(trs_in) == 3) {
  29. early = trs_in[c(2)]
  30. late = trs_in[c(3)]
  31. } else if (length(trs_in) == 4) {
  32. early = trs_in[c(2)]
  33. late = trs_in[c(4)]
  34. } else if (length(trs_in) == 5) {
  35. early = trs_in[c(2)]
  36. late = trs_in[c(4)]
  37. } else if (length(trs_in) == 6) {
  38. early = trs_in[c(2, 3)]
  39. late = trs_in[c(5, 6)]
  40. } else if (length(trs_in) == 7) {
  41. early = trs_in[c(2, 3)]
  42. late = trs_in[c(5, 6)]
  43. }
  44. return(list(early = early, late = late))
  45. }
  46. ```
  47. We prepare event and decoding data of sequence trials:
  48. ```{r}
  49. # create a subset of the events data table only including sequence task events:
  50. dt_events_seq = dt_events %>%
  51. filter(condition == "sequence" & trial_type == "stimulus")
  52. # create a subset of the decoding data only including the sequence task data:
  53. dt_pred_seq = dt_pred %>%
  54. filter(condition == "sequence" & class != "other" & mask == "cv" & stim != "cue") %>%
  55. setDT(.) %>%
  56. # add serial position, change trial and target cue to the sequence data table:
  57. .[, c("position", "change", "trial_cue", "accuracy", "trial_cue_position") := get_pos(
  58. .SD, dt_events_seq), by = .(id, trial, class), .SDcols = c("id", "trial", "class")] %>%
  59. # add variable to later pool trial_cue_position 1 to 3:
  60. mutate(cue_pos_label = ifelse(trial_cue_position <= 3, "1-3", trial_cue_position)) %>%
  61. setDT(.)
  62. ```
  63. We define the forward and backward periods depending on the response functions:
  64. ```{r}
  65. # define forward and backward period depending on response functions:
  66. for (cspeed in unique(dt_pred_seq$tITI)) {
  67. for (period in c("forward", "backward")) {
  68. # get trs in the relevant forward or backward period based on response functions:
  69. trs_period = dt_periods[[which(dt_periods$speed == cspeed), period]]
  70. # set the period variable in the sequence data table accordingly:
  71. dt_pred_seq$period[
  72. dt_pred_seq$tITI %in% cspeed & dt_pred_seq$seq_tr %in% trs_period] = period
  73. for (zone in c("early", "late")) {
  74. trs_zone = get_zones(trs_period)[[zone]]
  75. dt_pred_seq$zone[
  76. dt_pred_seq$tITI %in% cspeed & dt_pred_seq$seq_tr %in% trs_zone] = zone
  77. }
  78. }
  79. }
  80. # assign the excluded label to all trs that are not in the forward or backward period:
  81. dt_pred_seq$period[is.na(dt_pred_seq$period)] = "excluded"
  82. ```
  83. We exclude all participants with below-chance performance from the analyses:
  84. ```{r}
  85. # exclude participants with below-chance performance:
  86. dt_pred_seq = dt_pred_seq %>%
  87. filter(!(id %in% sub_exclude)) %>%
  88. verify(length(unique(id)) == 36) %>%
  89. setDT(.)
  90. ```
  91. We calculate the number of correct and incorrect sequence trials:
  92. ```{r}
  93. dt_num_correct <- dt_pred_seq %>%
  94. # calculate the number of trials for each accuracy level for each participant:
  95. .[, by = .(classification, id, accuracy), .(
  96. num_trials = length(unique(trial))
  97. )] %>%
  98. # select only the one-versus-rest decoding approach:
  99. filter(classification == "ovr") %>%
  100. setDT(.) %>%
  101. # verify that the sum of all trials equals 75 for all participants:
  102. verify(.[, by = .(classification, id), .(
  103. sum_trials = sum(num_trials))]$sum_trials == 75) %>%
  104. # complete missing values for number of trials for each accuracy level:
  105. complete(classification, id, accuracy, fill = list(num_trials = 0)) %>%
  106. setDT(.) %>%
  107. # verify that there are two accuracy levels per participant:
  108. verify(.[, by = .(classification, id), .(
  109. num_acc_levels = .N)]$num_acc_levels == 2) %>%
  110. # calculate the mean number of (in)accurate trials per participant:
  111. .[, by = .(classification, accuracy), .(
  112. num_subs = .N,
  113. mean_num_trials = mean(num_trials),
  114. percent_trials = mean(num_trials)/75
  115. )]
  116. # print formatted table:
  117. rmarkdown::paged_table(dt_num_correct)
  118. ```
  119. ### Probability time courses
  120. We calculate the decoding probability time-courses:
  121. ```{r}
  122. # select the variable of interest:
  123. variable = "probability_norm"
  124. dt_pred_seq_prob = dt_pred_seq %>%
  125. # average across trials separately for each position, TR, and participant
  126. .[, by = .(id, classification, tITI, period, seq_tr, position), .(
  127. num_trials = .N,
  128. mean_prob = mean(get(variable)) * 100
  129. )] %>%
  130. # check if the averaged data consists of 15 sequence trial per participant:
  131. verify(all(num_trials == 15)) %>%
  132. # average across participants and calculate standard error of the mean:
  133. .[, by = .(classification, tITI, period, seq_tr, position), .(
  134. num_subs = .N,
  135. mean_prob = mean(mean_prob),
  136. sem_upper = mean(mean_prob) + (sd(mean_prob)/sqrt(.N)),
  137. sem_lower = mean(mean_prob) - (sd(mean_prob)/sqrt(.N))
  138. )] %>%
  139. # check if averaged data is consistent with expected number of participants:
  140. verify(all(num_subs == 36)) %>%
  141. # create a new variable that expresses TRs as time from stimulus onset:
  142. mutate(time = (seq_tr - 1) * 1.25) %>%
  143. setDT(.)
  144. ```
  145. #### Figure 3a
  146. We plot the decoding probability time-courses:
  147. ```{r}
  148. plot_raw_probas = function(dt1){
  149. dt_reduced = dt1 %>%
  150. setDT(.) %>%
  151. .[, by = .(classification, tITI, period), .(
  152. xmin = min(seq_tr) - 0.5,
  153. xmax = max(seq_tr) + 0.5
  154. )] %>%
  155. filter(period != "excluded") %>%
  156. mutate(fill = ifelse(period == "forward", "dodgerblue", "red"))
  157. plot = ggplot(data = dt1, mapping = aes(
  158. x = as.factor(seq_tr), y = as.numeric(mean_prob),
  159. group = as.factor(position)), environment = environment()) +
  160. geom_rect(data = dt_reduced, aes(
  161. xmin = xmin, xmax = xmax, ymin = 0, ymax = 40),
  162. alpha = 0.05, inherit.aes = FALSE, show.legend = FALSE, fill = dt_reduced$fill) +
  163. facet_wrap(facets = ~ as.factor(tITI), labeller = get_labeller(array = dt1$tITI), nrow = 1) +
  164. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper,
  165. fill = as.factor(position)), alpha = 0.5) +
  166. geom_line(mapping = aes(color = as.factor(position))) +
  167. theme(legend.position = "top", legend.direction = "horizontal",
  168. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  169. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  170. xlab("Time from sequence onset (TRs; 1 TR = 1.25 s)") +
  171. ylab("Probability (%)") +
  172. scale_color_manual(values = color_events, name = "Serial event") +
  173. scale_fill_manual(values = color_events, name = "Serial event") +
  174. scale_x_discrete(labels = label_fill(seq(1, 13, 1), mod = 4), breaks = seq(1, 13, 1)) +
  175. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0, 25)) +
  176. theme(panel.border = element_blank(), axis.line = element_line()) +
  177. theme(axis.line = element_line(colour = "black"),
  178. panel.grid.major = element_blank(),
  179. panel.grid.minor = element_blank(),
  180. panel.border = element_blank(),
  181. panel.background = element_blank()) +
  182. theme(strip.text.x = element_text(margin = margin(b = 2, t = 2))) +
  183. guides(color = guide_legend(nrow = 1))
  184. return(plot)
  185. }
  186. fig_seq_probas = plot_raw_probas(dt1 = subset(dt_pred_seq_prob, classification == "ovr"))
  187. fig_seq_probas
  188. ```
  189. #### Source Data File Fig. 3a
  190. ```{r, echo=TRUE}
  191. subset(dt_pred_seq_prob, classification == "ovr") %>%
  192. select(-classification, -num_subs) %>%
  193. write.csv(., file = file.path(path_sourcedata, "source_data_figure_3a.csv"),
  194. row.names = FALSE)
  195. ```
  196. We plot the decoding probabilities as a heat-map:
  197. ```{r, echo=FALSE}
  198. plot_data = subset(dt_pred_seq_prob, classification == "ovr" & !(tITI %in% c(2.048)))
  199. ggplot(plot_data, aes(x = as.factor(position), y = as.factor(seq_tr), fill = as.numeric(mean_prob))) +
  200. facet_wrap(facets = ~ as.factor(tITI),
  201. labeller = get_labeller(array = plot_data$tITI), nrow = 1) +
  202. geom_tile() +
  203. xlab("Serial event position") + ylab("Time from sequence onset (TRs)") +
  204. scale_fill_viridis(option = "inferno", name = "Probability (%)") +
  205. scale_y_discrete(labels = label_fill(seq(1, 13, 1), mod = 4), breaks = seq(1, 13, 1)) +
  206. lemon::coord_capped_cart(left = "both", bottom = "both", expand = TRUE) +
  207. theme(panel.border = element_blank(), axis.line = element_line()) +
  208. theme(strip.text.x = element_text(margin = margin(b = 2, t = 2))) +
  209. theme(legend.position = "top", legend.direction = "horizontal",
  210. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  211. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  212. guides(color = guide_legend(nrow = 1)) +
  213. theme(panel.border = element_blank(), axis.line = element_line())
  214. ```
  215. ### Influence of target cue position
  216. We analyze the probabilities by target cue position:
  217. ```{r}
  218. # select the variable of interest:
  219. variable = "probability_norm"
  220. dt_pred_seq_prob_cue = dt_pred_seq %>%
  221. # average across trials separately for each position, TR, and participant
  222. .[, by = .(id, classification, tITI, period, seq_tr, position, cue_pos_label), .(
  223. num_trials = .N,
  224. mean_prob = mean(get(variable)) * 100
  225. )] %>%
  226. # average across participants and calculate standard error of the mean:
  227. .[, by = .(classification, tITI, period, seq_tr, position,cue_pos_label), .(
  228. num_subs = .N,
  229. mean_num_trials = mean(num_trials),
  230. sd_num_trials = sd(num_trials),
  231. mean_prob = mean(mean_prob),
  232. sem_upper = mean(mean_prob) + (sd(mean_prob)/sqrt(.N)),
  233. sem_lower = mean(mean_prob) - (sd(mean_prob)/sqrt(.N))
  234. )] %>%
  235. # check if averaged data is consistent with expected number of participants:
  236. verify(all(num_subs == 36)) %>%
  237. # check that the SD of the number of trials per cue position is 0
  238. # which means that each participant has the same number of trials per cue position:
  239. verify(all(sd_num_trials == 0)) %>%
  240. verify(all(mean_num_trials == 5)) %>%
  241. # create a new variable that expresses TRs as time from stimulus onset:
  242. mutate(time = (seq_tr - 1) * 1.25) %>%
  243. transform(tITI = paste0(as.numeric(tITI) * 1000, " ms")) %>%
  244. transform(tITI = factor(tITI, levels = c(
  245. "32 ms", "64 ms", "128 ms", "512 ms", "2048 ms"))) %>%
  246. setDT(.)
  247. ```
  248. We plot the probabilities by target cue position:
  249. ```{r}
  250. plot_raw_probas_cue = function(dt1){
  251. plot = ggplot(data = dt1, mapping = aes(
  252. x = as.factor(seq_tr), y = as.numeric(mean_prob),
  253. group = as.factor(position))) +
  254. facet_grid(rows = vars(cue_pos_label), cols = vars(tITI)) +
  255. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper,
  256. fill = as.factor(position)), alpha = 0.5) +
  257. geom_line(mapping = aes(color = as.factor(position))) +
  258. theme(legend.position = "top", legend.direction = "horizontal",
  259. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  260. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  261. xlab("Time from sequence onset (TRs)") + ylab("Probability (%)") +
  262. scale_color_manual(values = color_events, name = "Serial event") +
  263. scale_fill_manual(values = color_events, name = "Serial event") +
  264. scale_x_discrete(labels = label_fill(seq(1, 13, 1), mod = 4), breaks = seq(1, 13, 1)) +
  265. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0, 25)) +
  266. theme(panel.border = element_blank(), axis.line = element_line()) +
  267. theme(axis.line = element_line(colour = "black"),
  268. panel.grid.major = element_blank(),
  269. panel.grid.minor = element_blank(),
  270. panel.border = element_blank(),
  271. panel.background = element_blank())
  272. theme(strip.text.x = element_text(margin = margin(b = 2, t = 2))) +
  273. guides(color = guide_legend(nrow = 1))
  274. return(plot)
  275. }
  276. fig_seq_probas_cue = plot_raw_probas_cue(dt1 = subset(dt_pred_seq_prob_cue, classification == "ovr"))
  277. fig_seq_probas_cue
  278. ```
  279. ### Regression slope timecourses
  280. ```{r, echo=FALSE, eval=FALSE}
  281. # select positions within every TR that should be selected:
  282. pos_sel = seq(1, 5)
  283. set.seed(4)
  284. probability = runif(5)
  285. # earlier events have higher probability:
  286. probability = c(0.6, 0.9, 0.1, 0.5, 0.2)
  287. position = seq(1, 5, 1)
  288. position = c(1, 3, 2, 4, 5)
  289. ordered_positions = probability[order(probability, decreasing = TRUE)]
  290. diff(ordered_positions)
  291. # order the probabilities in decreasing order (first = highest):
  292. prob_order_idx = order(probability, decreasing = TRUE)
  293. # order the positions by probability:
  294. pos_order = position[prob_order_idx]
  295. # order the probabilities:
  296. prob_order = probability[prob_order_idx]
  297. # select positions
  298. pos_order_sel = pos_order[pos_sel]
  299. prob_order_sel = prob_order[pos_sel]
  300. ```
  301. We compare the mean indices of association (regression slope, correlation,
  302. mean serial position) for every TR:
  303. ```{r}
  304. # select positions within every TR that should be selected:
  305. pos_sel = seq(1, 5)
  306. # define relevant variables:
  307. variable = "probability_norm"
  308. cor_method = "kendall"
  309. # calculate indices of association at every TR:
  310. dt_pred_seq_cor = dt_pred_seq %>%
  311. # here, we can filter for specific sequence events:
  312. filter(position %in% seq(1, 5, by = 1)) %>%
  313. setDT(.) %>%
  314. # order positions by decreasing probability and calculate step size
  315. # calculate correlation and slope between position and probability
  316. # verify that there are five probabilities (one for each class) per volume
  317. # verify that all correlations range between -1 and 1
  318. .[, by = .(id, classification, tITI, period, trial_tITI, seq_tr), {
  319. # order the probabilities in decreasing order (first = highest):
  320. prob_order_idx = order(get(variable), decreasing = TRUE)
  321. # order the positions by probability:
  322. pos_order = position[prob_order_idx]
  323. # order the probabilities:
  324. prob_order = get(variable)[prob_order_idx]
  325. # select positions
  326. pos_order_sel = pos_order[pos_sel]
  327. prob_order_sel = prob_order[pos_sel]
  328. list(
  329. # calculate the number of events:
  330. num_events = length(pos_order_sel[!is.na(pos_order_sel)]),
  331. # calculate the mean step size between probability-ordered events:
  332. mean_step = mean(diff(pos_order_sel)),
  333. # calculate the mean correlation between positions and their probabilities:
  334. cor = cor.test(pos_order_sel, prob_order_sel, method = cor_method)$estimate,
  335. # calculate the slope of a linear regression between position and probabilities:
  336. slope = coef(lm(prob_order_sel ~ pos_order_sel))[2]
  337. # verify that the number of events matches selection and correlations -1 < r < 1
  338. )}] %>% verify(all(num_events == length(pos_sel))) %>% #verify(between(cor, -1, 1)) %>%
  339. # average across trials for each participant (flip values by multiplying with -1):
  340. # verify that the number of trials per participant is correct:
  341. .[, by = .(id, classification, tITI, period, seq_tr), .(
  342. num_trials = .N,
  343. mean_cor = mean(cor) * (-1),
  344. mean_step = mean(mean_step),
  345. mean_slope = mean(slope) * (-1)
  346. )] %>%
  347. verify(all(num_trials == 15)) %>%
  348. # shorten the period name:
  349. mutate(period_short = ifelse(period == "forward", "fwd", period)) %>%
  350. transform(period_short = ifelse(period == "backward", "bwd", period_short)) %>%
  351. mutate(color = ifelse(period_short == "fwd", "dodgerblue", "red")) %>% setDT(.)
  352. ```
  353. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  354. cfg = list(variable = "mean_cor", threshold = 2.021, baseline = 0,
  355. grouping = c("classification", "tITI"), n_perms = 10000, n_trs = 13)
  356. dt_pred_seq_cor_cluster = cluster_permutation(dt_pred_seq_cor, cfg)
  357. ```
  358. We compare the mean indices of association (regression slope, correlation,
  359. mean serial position) against zero (the expectation of no association)
  360. for every TR:
  361. ```{r}
  362. seq_test_time <- function(data, variable){
  363. data_out = data %>%
  364. # average across participants for every speed at every TR:
  365. # check if the number of participants matches:
  366. .[, by = .(classification, tITI, period, seq_tr), {
  367. # perform a two-sided one-sample t-test against zero (baseline):
  368. ttest_results = t.test(get(variable), alternative = "two.sided", mu = 0);
  369. list(
  370. num_subs = .N,
  371. mean_variable = mean(get(variable)),
  372. pvalue = ttest_results$p.value,
  373. tvalue = ttest_results$statistic,
  374. df = ttest_results$parameter,
  375. cohens_d = round(abs(mean(mean(get(variable)) - 0)/sd(get(variable))), 2),
  376. sem_upper = mean(get(variable)) + (sd(get(variable))/sqrt(.N)),
  377. sem_lower = mean(get(variable)) - (sd(get(variable))/sqrt(.N))
  378. )}] %>% verify(all(num_subs == 36)) %>% verify(all((num_subs - df) == 1)) %>%
  379. # adjust p-values for multiple comparisons (filter for forward and backward period):
  380. # check if the number of comparisons matches expectations:
  381. .[period %in% c("forward", "backward"), by = .(classification), ":=" (
  382. num_comp = .N,
  383. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  384. )] %>% verify(all(num_comp == 39, na.rm = TRUE)) %>%
  385. # round the original p-values according to the apa standard:
  386. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  387. # round the adjusted p-value:
  388. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  389. # sort data table:
  390. setorder(., classification, period, tITI, seq_tr) %>%
  391. # create new variable indicating significance below 0.05
  392. mutate(significance = ifelse(pvalue_adjust < 0.05, "*", ""))
  393. return(data_out)
  394. }
  395. ```
  396. ```{r}
  397. # filter for significant p-values to make reporting easier:
  398. rmarkdown::paged_table(seq_test_time(data = dt_pred_seq_cor, variable = "mean_cor") %>%
  399. filter(pvalue_adjust < 0.05, classification == "ovr"))
  400. rmarkdown::paged_table(seq_test_time(data = dt_pred_seq_cor, variable = "mean_step") %>%
  401. filter(pvalue_adjust < 0.05, classification == "ovr"))
  402. rmarkdown::paged_table(seq_test_time(data = dt_pred_seq_cor, variable = "mean_slope") %>%
  403. filter(pvalue_adjust < 0.05, classification == "ovr"))
  404. ```
  405. ```{r}
  406. plot_seq_cor_time = function(dt, variable){
  407. # select the variable of interest, determine y-axis label and adjust axis:
  408. if (variable == "mean_slope") {
  409. ylabel = "Regression slope"
  410. adjust_axis = 0.1
  411. } else if (variable == "mean_cor") {
  412. ylabel = expression("Correlation ("*tau*")")
  413. adjust_axis = 1
  414. } else if (variable == "mean_step") {
  415. ylabel = "Mean step size"
  416. adjust_axis = 1
  417. }
  418. plot = ggplot(data = dt, mapping = aes(
  419. x = seq_tr, y = mean_variable, group = as.factor(as.numeric(tITI) * 1000),
  420. fill = as.factor(as.numeric(tITI) * 1000))) +
  421. geom_hline(aes(yintercept = 0), linetype = "solid", color = "gray") +
  422. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  423. geom_line(mapping = aes(color = as.factor(as.numeric(tITI) * 1000))) +
  424. xlab("Time from sequence onset (TRs)") + ylab(ylabel) +
  425. scale_colour_viridis(name = "Speed (ms)", discrete = TRUE, option = "cividis") +
  426. scale_fill_viridis(name = "Speed (ms)", discrete = TRUE, option = "cividis") +
  427. scale_x_continuous(labels = label_fill(seq(1, 13, 1), mod = 4), breaks = seq(1, 13, 1)) +
  428. guides(color = guide_legend(nrow = 1)) +
  429. annotate("text", x = 1, y = -0.4 * adjust_axis, label = "1 TR = 1.25 s",
  430. hjust = 0, size = rel(2)) +
  431. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  432. ylim = c(-0.4 * adjust_axis, 0.4 * adjust_axis)) +
  433. theme(panel.border = element_blank(), axis.line = element_line()) +
  434. theme(axis.line = element_line(colour = "black"),
  435. panel.grid.major = element_blank(),
  436. panel.grid.minor = element_blank(),
  437. panel.border = element_blank(),
  438. panel.background = element_blank()) +
  439. theme(legend.position = "top", legend.direction = "horizontal",
  440. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  441. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  442. geom_segment(aes(x = 0.05, xend = 0.05, y = 0.01 * adjust_axis, yend = 0.4 * adjust_axis),
  443. arrow = arrow(length = unit(5, "pt")), color = "darkgray") +
  444. geom_segment(aes(x = 0.05, xend = 0.05, y = -0.01 * adjust_axis, yend = -0.4 * adjust_axis),
  445. arrow = arrow(length = unit(5, "pt")), color = "darkgray") +
  446. annotate(geom = "text", x = 0.4, y = 0.2 * adjust_axis, label = "Forward order",
  447. color = "darkgray", angle = 90, size = 3) +
  448. annotate(geom = "text", x = 0.4, y = -0.2 * adjust_axis, label = "Backward order",
  449. color = "darkgray", angle = 90, size = 3)
  450. return(plot)
  451. }
  452. ```
  453. #### Figure 3b
  454. ```{r}
  455. fig_seq_cor_time = plot_seq_cor_time(dt = subset(
  456. seq_test_time(data = dt_pred_seq_cor, variable = "mean_cor"),
  457. classification == "ovr"), variable = "mean_cor")
  458. fig_seq_slope_time = plot_seq_cor_time(dt = subset(
  459. seq_test_time(data = dt_pred_seq_cor, variable = "mean_slope"),
  460. classification == "ovr"), variable = "mean_slope")
  461. fig_seq_step_time = plot_seq_cor_time(dt = subset(
  462. seq_test_time(data = dt_pred_seq_cor, variable = "mean_step"),
  463. classification == "ovr"), variable = "mean_step")
  464. fig_seq_cor_time; fig_seq_slope_time; fig_seq_step_time
  465. ```
  466. ```{r, eval=FALSE, echo=FALSE, include=FALSE}
  467. ggsave(filename = "highsspeed_plot_decoding_sequence_timecourses_slopes.pdf",
  468. plot = fig_seq_slope_time, device = cairo_pdf, path = path_figures, scale = 1,
  469. dpi = "retina", width = 5.5, height = 3)
  470. ```
  471. #### Source Data File Fig. 3b
  472. ```{r, echo=TRUE}
  473. subset(
  474. seq_test_time(data = dt_pred_seq_cor, variable = "mean_slope"),
  475. classification == "ovr") %>%
  476. select(-classification, -num_subs, -num_comp, -pvalue_adjust,
  477. -pvalue_round, -pvalue_adjust_round, -pvalue, -df, -cohens_d,
  478. -tvalue, -significance) %>%
  479. write.csv(., file = file.path(path_sourcedata, "source_data_figure_3b.csv"),
  480. row.names = FALSE)
  481. ```
  482. #### Source Data File Fig. S5a
  483. ```{r, echo=TRUE}
  484. subset(
  485. seq_test_time(data = dt_pred_seq_cor, variable = "mean_cor"),
  486. classification == "ovr") %>%
  487. select(-classification, -num_subs, -num_comp, -pvalue_adjust,
  488. -pvalue_round, -pvalue_adjust_round, -pvalue, -df, -cohens_d,
  489. -tvalue, -significance) %>%
  490. write.csv(., file = file.path(path_sourcedata, "source_data_figure_s5a.csv"),
  491. row.names = FALSE)
  492. ```
  493. #### Source Data File Fig. S5c
  494. ```{r, echo=TRUE}
  495. subset(
  496. seq_test_time(data = dt_pred_seq_cor, variable = "mean_step"),
  497. classification == "ovr") %>%
  498. select(-classification, -num_subs, -num_comp, -pvalue_adjust,
  499. -pvalue_round, -pvalue_adjust_round, -pvalue, -df, -cohens_d,
  500. -tvalue, -significance) %>%
  501. write.csv(., file = file.path(path_sourcedata, "source_data_figure_s5c.csv"),
  502. row.names = FALSE)
  503. ```
  504. We test depending on the target cue position:
  505. ```{r}
  506. # select positions within every TR that should be selected:
  507. pos_sel = seq(1, 5)
  508. # define relevant variables:
  509. variable = "probability_norm"
  510. cor_method = "kendall"
  511. # calculate indices of association at every TR:
  512. dt_pred_seq_cor_cue = dt_pred_seq %>%
  513. # here, we can filter for specific sequence events:
  514. filter(position %in% seq(1, 5, by = 1)) %>% setDT(.) %>%
  515. # order positions by decreasing probability and calculate step size
  516. # calculate correlation and slope between position and probability
  517. # verify that there are five probabilities (one for each class) per volume
  518. # verify that all correlations range between -1 and 1
  519. .[, by = .(id, classification, tITI, period, trial_tITI, seq_tr, cue_pos_label), {
  520. # order the probabilities in decreasing order (first = highest):
  521. prob_order_idx = order(get(variable), decreasing = TRUE)
  522. # order the positions by probability:
  523. pos_order = position[prob_order_idx]
  524. # order the probabilities:
  525. prob_order = get(variable)[prob_order_idx]
  526. # select positions
  527. pos_order_sel = pos_order[pos_sel]
  528. prob_order_sel = prob_order[pos_sel]
  529. list(
  530. # calculate the number of events:
  531. num_events = length(pos_order_sel[!is.na(pos_order_sel)]),
  532. # calculate the mean step size between probability-ordered events:
  533. mean_step = mean(diff(pos_order_sel)),
  534. # calculate the mean correlation between positions and their probabilities:
  535. cor = cor.test(pos_order_sel, prob_order_sel, method = cor_method)$estimate,
  536. # calculate the slope of a linear regression between position and probabilities:
  537. slope = coef(lm(prob_order_sel ~ pos_order_sel))[2]
  538. # verify that the number of events matches selection and correlations -1 < r < 1
  539. )}] %>% verify(all(num_events == length(pos_sel))) %>% #verify(between(cor, -1, 1)) %>%
  540. # average across trials for each participant (flip values by multiplying with -1):
  541. # verify that the number of trials per participant is correct:
  542. .[, by = .(id, classification, tITI, period, seq_tr, cue_pos_label), .(
  543. num_trials = .N,
  544. mean_cor = mean(cor) * (-1),
  545. mean_step = mean(mean_step),
  546. mean_slope = mean(slope) * (-1)
  547. )] %>%
  548. verify(all(num_trials == 5)) %>%
  549. # shorten the period name:
  550. mutate(period_short = ifelse(period == "forward", "fwd", period)) %>%
  551. transform(period_short = ifelse(period == "backward", "bwd", period_short)) %>%
  552. mutate(color = ifelse(period_short == "fwd", "dodgerblue", "red")) %>% setDT(.)
  553. ```
  554. ```{r}
  555. seq_test_time_cue <- function(data, variable){
  556. data_out = data %>%
  557. # average across participants for every speed at every TR:
  558. # check if the number of participants matches:
  559. .[, by = .(classification, tITI, period, seq_tr, cue_pos_label), {
  560. # perform a two-sided one-sample t-test against zero (baseline):
  561. ttest_results = t.test(get(variable), alternative = "two.sided", mu = 0);
  562. list(
  563. num_subs = .N,
  564. mean_variable = mean(get(variable)),
  565. pvalue = ttest_results$p.value,
  566. tvalue = ttest_results$statistic,
  567. df = ttest_results$parameter,
  568. cohens_d = round(abs(mean(mean(get(variable)) - 0)/sd(get(variable))), 2),
  569. sem_upper = mean(get(variable)) + (sd(get(variable))/sqrt(.N)),
  570. sem_lower = mean(get(variable)) - (sd(get(variable))/sqrt(.N))
  571. )}] %>% verify(all(num_subs == 36)) %>% verify(all((num_subs - df) == 1)) %>%
  572. # adjust p-values for multiple comparisons (filter for forward and backward period):
  573. # check if the number of comparisons matches expectations:
  574. .[period %in% c("forward", "backward"), by = .(classification), ":=" (
  575. num_comp = .N,
  576. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  577. )] %>% #verify(all(num_comp == 39, na.rm = TRUE)) %>%
  578. # round the original p-values according to the apa standard:
  579. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  580. # round the adjusted p-value:
  581. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  582. # sort data table:
  583. setorder(., classification, period, tITI, seq_tr) %>%
  584. # create new variable indicating significance below 0.05
  585. mutate(significance = ifelse(pvalue_adjust < 0.05, "*", ""))
  586. return(data_out)
  587. }
  588. ```
  589. ```{r, echo=FALSE}
  590. plot_seq_cor_time_cue = function(dt, variable){
  591. # select the variable of interest, determine y-axis label and adjust axis:
  592. if (variable == "mean_slope") {
  593. ylabel = "Regression slope"
  594. adjust_axis = 0.1
  595. } else if (variable == "mean_cor") {
  596. ylabel = expression("Correlation ("*tau*")")
  597. adjust_axis = 1
  598. } else if (variable == "mean_step") {
  599. ylabel = "Mean step size"
  600. adjust_axis = 1
  601. }
  602. plot = ggplot(data = dt, mapping = aes(
  603. x = seq_tr, y = mean_variable, group = as.factor(as.numeric(tITI) * 1000),
  604. fill = as.factor(as.numeric(tITI) * 1000))) +
  605. geom_hline(aes(yintercept = 0), linetype = "solid", color = "gray") +
  606. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  607. geom_line(mapping = aes(color = as.factor(as.numeric(tITI) * 1000))) +
  608. facet_wrap(~ cue_pos_label) +
  609. xlab("Time from sequence onset (TRs)") + ylab(ylabel) +
  610. scale_colour_viridis(name = "Speed (ms)", discrete = TRUE, option = "cividis") +
  611. scale_fill_viridis(name = "Speed (ms)", discrete = TRUE, option = "cividis") +
  612. scale_x_continuous(labels = label_fill(seq(1, 13, 1), mod = 4), breaks = seq(1, 13, 1)) +
  613. guides(color = guide_legend(nrow = 1)) +
  614. annotate("text", x = 1, y = -0.4 * adjust_axis, label = "1 TR = 1.25 s",
  615. hjust = 0, size = rel(2)) +
  616. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  617. ylim = c(-0.4 * adjust_axis, 0.4 * adjust_axis)) +
  618. theme(panel.border = element_blank(), axis.line = element_line()) +
  619. theme(axis.line = element_line(colour = "black"),
  620. panel.grid.major = element_blank(),
  621. panel.grid.minor = element_blank(),
  622. panel.border = element_blank(),
  623. panel.background = element_blank()) +
  624. theme(legend.position = "top", legend.direction = "horizontal",
  625. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  626. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  627. geom_segment(aes(x = 0.05, xend = 0.05, y = 0.01 * adjust_axis, yend = 0.4 * adjust_axis),
  628. arrow = arrow(length = unit(5, "pt")), color = "darkgray") +
  629. geom_segment(aes(x = 0.05, xend = 0.05, y = -0.01 * adjust_axis, yend = -0.4 * adjust_axis),
  630. arrow = arrow(length = unit(5, "pt")), color = "darkgray") +
  631. annotate(geom = "text", x = 0.6, y = 0.2 * adjust_axis, label = "Forward",
  632. color = "darkgray", angle = 90, size = 2) +
  633. annotate(geom = "text", x = 0.6, y = -0.2 * adjust_axis, label = "Backward",
  634. color = "darkgray", angle = 90, size = 2)
  635. return(plot)
  636. }
  637. ```
  638. ```{r}
  639. fig_seq_slope_time_cue = plot_seq_cor_time_cue(dt = subset(
  640. seq_test_time_cue(data = dt_pred_seq_cor_cue, variable = "mean_slope"),
  641. classification == "ovr"), variable = "mean_slope")
  642. fig_seq_slope_time_cue
  643. ```
  644. ### Correlation of regression time courses
  645. #### Correlation between participants
  646. We calculate the correlations between the predicted and the observed time courses *between* participants for each of the five speed conditions (inter-stimulus intervals):
  647. ```{r, echo=TRUE}
  648. # observed time courses:
  649. dt_data_between = seq_test_time(
  650. data = dt_pred_seq_cor, variable = "mean_slope") %>%
  651. filter(classification == "ovr") %>%
  652. transform(tITI = as.factor(as.numeric(tITI) * 1000)) %>%
  653. setorder(classification, tITI, seq_tr)
  654. # predicted time courses:
  655. dt_model_between = dt_odd_seq_sim_diff %>%
  656. transform(time = time + 1) %>%
  657. filter(time %in% seq(1, 13, 1)) %>%
  658. setorder(classification, speed, time)
  659. # combine in one data table:
  660. dt_between = data.table(
  661. speed = dt_data_between$tITI,
  662. tr = dt_data_between$seq_tr,
  663. empirical = dt_data_between$mean_variable,
  664. prediction = dt_model_between$mean_difference)
  665. # calculate the correlation between
  666. dt_between_results = dt_between %>%
  667. .[, by = .(speed), {
  668. cor = cor.test(empirical, prediction, method = "pearson")
  669. list(
  670. num_trs = .N,
  671. pvalue = cor$p.value,
  672. pvalue_round = round_pvalues(cor$p.value),
  673. correlation = round(cor$estimate, 2)
  674. )
  675. }] %>%
  676. verify(num_trs == 13) %>%
  677. select(-num_trs)
  678. # show the table with the correlations:
  679. rmarkdown::paged_table(dt_between_results)
  680. ```
  681. #### Figure 3d
  682. We plot the correlations between the regression slope time courses predicted by the model vs. the observed data *between* participants:
  683. ```{r, echo=TRUE, warning=FALSE, message=FALSE}
  684. fig_seq_cor_between = ggplot(
  685. data = dt_between,
  686. mapping = aes(
  687. x = prediction, y = empirical, color = speed, fill = speed)) +
  688. geom_point(alpha = 1) +
  689. geom_smooth(method = lm, se = FALSE, alpha = 0.5, fullrange = TRUE) +
  690. scale_colour_viridis(
  691. name = "Speed (ms)", discrete = TRUE,
  692. option = "cividis", guide = FALSE) +
  693. scale_fill_viridis(
  694. name = "Speed (ms)", discrete = TRUE,
  695. option = "cividis", guide = FALSE) +
  696. xlab("Predicted slope") +
  697. ylab("Observed slope") +
  698. # guides(color = guide_legend(nrow = 1)) +
  699. coord_capped_cart(
  700. left = "both", bottom = "both", expand = TRUE,
  701. xlim = c(-0.4, 0.4), ylim = c(-0.05, 0.05)) +
  702. theme(legend.position = "top",
  703. legend.direction = "horizontal",
  704. legend.justification = "center",
  705. legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  706. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  707. theme(axis.line = element_line(colour = "black"),
  708. panel.grid.major = element_blank(),
  709. panel.grid.minor = element_blank(),
  710. panel.border = element_blank(),
  711. panel.background = element_blank())
  712. # show the plot:
  713. fig_seq_cor_between
  714. ```
  715. #### Source Data File Fig. 3d
  716. ```{r, echo=TRUE}
  717. dt_between %>%
  718. write.csv(., file = file.path(path_sourcedata, "source_data_figure_3d.csv"),
  719. row.names = FALSE)
  720. ```
  721. #### Correlation within participants
  722. We calculate the correlations between the predicted and the observed time courses *within* participants for each of the five speed conditions (inter-stimulus intervals):
  723. ```{r}
  724. # observed regression slope time courses
  725. dt_data_within = dt_pred_seq_cor %>%
  726. filter(classification == "ovr") %>%
  727. transform(tITI = as.factor(as.numeric(tITI) * 1000)) %>%
  728. setorder(classification, id, tITI, seq_tr)
  729. # predicted regression slope time courses
  730. dt_model_within = dt_odd_seq_sim %>%
  731. transform(time = time + 1) %>%
  732. filter(time %in% seq(1, 13, 1)) %>%
  733. setorder(classification, id, speed, time)
  734. # combine in one data table:
  735. dt_within = data.table(
  736. id = dt_data_within$id,
  737. speed = dt_data_within$tITI,
  738. time = dt_data_within$seq_tr,
  739. empirical = dt_data_within$mean_slope,
  740. prediction = dt_model_within$probability)
  741. # run correlations:
  742. dt_within_cor = dt_within %>%
  743. .[, by = .(id, speed), {
  744. cor = cor.test(empirical, prediction, method = "pearson")
  745. list(
  746. num_trs = .N,
  747. pvalue = cor$p.value,
  748. estimate = as.numeric(cor$estimate)
  749. )}] %>%
  750. verify(num_trs == 13)
  751. # run t-tests over correlation coefficients for each speed level:
  752. dt_within_cor_results = setDT(dt_within_cor) %>%
  753. .[, by = .(speed), {
  754. ttest_results = t.test(
  755. estimate, mu = 0, alternative = "two.sided", paired = FALSE)
  756. list(
  757. num_subs = .N,
  758. mean_estimate = round(mean(estimate), 2),
  759. pvalue = ttest_results$p.value,
  760. tvalue = round(ttest_results$statistic, 2),
  761. df = ttest_results$parameter,
  762. cohens_d = round((mean(estimate) - 0)/sd(estimate), 2)
  763. )}] %>%
  764. verify(num_subs == 36) %>%
  765. verify((num_subs - df) == 1) %>%
  766. # adjust p-values for multiple comparisons:
  767. # check if the number of comparisons matches expectations:
  768. .[, ":=" (
  769. num_comp = .N,
  770. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  771. )] %>%
  772. # round the original p-values according to the apa standard:
  773. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  774. # round the adjusted p-value:
  775. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  776. # create new variable indicating significance below 0.05
  777. mutate(significance = ifelse(pvalue_adjust < 0.05, "*", ""))
  778. # show the table with the t-test results:
  779. rmarkdown::paged_table(dt_within_cor_results)
  780. ```
  781. #### Figure 3e
  782. We plot the correlations between the predicted and the observed time courses *within* participants for each of the five speed conditions (inter-stimulus intervals):
  783. ```{r, echo=TRUE}
  784. fig_seq_cor_within = ggplot(
  785. data = dt_within_cor,
  786. mapping = aes(
  787. x = speed, y = estimate, color = speed, fill = speed, group = speed)) +
  788. stat_summary(geom = "bar", fun = "mean") +
  789. geom_dotplot(binaxis = "y", stackdir = "center", stackratio = 0.5,
  790. color = "white", alpha = 0.5,
  791. inherit.aes = TRUE, binwidth = 0.05) +
  792. stat_summary(geom = "errorbar", fun.data = "mean_se", width = 0, color = "black") +
  793. scale_colour_viridis(
  794. name = "Speed (ms)", discrete = TRUE, option = "cividis", guide = FALSE) +
  795. scale_fill_viridis(
  796. name = "Speed (ms)", discrete = TRUE, option = "cividis", guide = FALSE) +
  797. xlab("Speed (in ms)") +
  798. ylab("Correlation (r)") +
  799. #guides(color = guide_legend(nrow = 1)) +
  800. coord_capped_cart(left = "both", bottom = "both", expand = TRUE) +
  801. theme(legend.position = "top", legend.direction = "horizontal",
  802. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  803. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  804. theme(axis.ticks.x = element_line(colour = "white"),
  805. axis.line.x = element_line(colour = "white")) +
  806. theme(axis.title.x = element_blank(), axis.text.x = element_blank()) +
  807. theme(axis.line = element_line(colour = "black"),
  808. panel.grid.major = element_blank(),
  809. panel.grid.minor = element_blank(),
  810. panel.border = element_blank(),
  811. panel.background = element_blank())
  812. # show figure:
  813. fig_seq_cor_within
  814. ```
  815. #### Source Data File Fig. 3e
  816. ```{r, echo=TRUE}
  817. dt_within_cor %>%
  818. select(-num_trs, -pvalue) %>%
  819. write.csv(., file = file.path(path_sourcedata, "source_data_figure_3e.csv"),
  820. row.names = FALSE)
  821. ```
  822. ### Regression slope means
  823. Calculate the average correlation or average regression slope
  824. for each time period (forward versus backward) for all five speed conditions:
  825. ```{r, echo=TRUE}
  826. seq_test_period <- function(data, variable){
  827. data_out = data %>%
  828. # filter out the excluded time period (select only forward and backward period):
  829. filter(period != "excluded") %>%
  830. setDT(.) %>%
  831. # average for each time period and speed condition for every participant:
  832. .[, by = .(classification, id, tITI, period), .(
  833. mean_variable = mean(get(variable)))] %>%
  834. # average across participants for every speed at every TR:
  835. # check if the number of participants matches:
  836. .[, by = .(classification, tITI, period), {
  837. # perform a two-sided one-sample t-test against zero (baseline):
  838. ttest_results = t.test(mean_variable, alternative = "two.sided", mu = 0);
  839. list(
  840. num_subs = .N,
  841. mean_variable = mean(mean_variable),
  842. pvalue = ttest_results$p.value,
  843. tvalue = round(abs(ttest_results$statistic), 2),
  844. df = ttest_results$parameter,
  845. cohens_d = abs(round((mean(mean_variable) - 0) / sd(mean_variable), 2)),
  846. sem_upper = mean(mean_variable) + (sd(mean_variable)/sqrt(.N)),
  847. sem_lower = mean(mean_variable) - (sd(mean_variable)/sqrt(.N))
  848. )
  849. }] %>%
  850. verify(all(num_subs == 36)) %>%
  851. verify(all((num_subs - df) == 1)) %>%
  852. # adjust p-values for multiple comparisons:
  853. # check if the number of comparisons matches expectations:
  854. .[period %in% c("forward", "backward"), by = .(classification), ":=" (
  855. num_comp = .N,
  856. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  857. )] %>%
  858. verify(num_comp == 10) %>%
  859. # add variable that indicates significance with stupid significance stars:
  860. mutate(significance = ifelse(pvalue < 0.05, "*", "")) %>%
  861. # round the original p-values according to APA manual:
  862. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  863. # round the adjusted p-value:
  864. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  865. # sort data table:
  866. setorder(classification, period, tITI) %>%
  867. # shorten the period name:
  868. mutate(period_short = ifelse(period == "forward", "fwd", period)) %>%
  869. transform(period_short = ifelse(period == "backward", "bwd", period_short)) %>%
  870. mutate(color = ifelse(period_short == "fwd", "dodgerblue", "red")) %>% setDT(.)
  871. return(data_out)
  872. }
  873. ```
  874. ```{r}
  875. rmarkdown::paged_table(
  876. seq_test_period(data = dt_pred_seq_cor, variable = "mean_cor") %>%
  877. filter(pvalue_adjust < 0.05, classification == "ovr"))
  878. rmarkdown::paged_table(
  879. seq_test_period(data = dt_pred_seq_cor, variable = "mean_step") %>%
  880. filter(pvalue_adjust < 0.05, classification == "ovr"))
  881. rmarkdown::paged_table(
  882. seq_test_period(data = dt_pred_seq_cor, variable = "mean_slope") %>%
  883. filter(pvalue_adjust < 0.05, classification == "ovr"))
  884. ```
  885. #### Figure 3c
  886. ```{r, echo=TRUE}
  887. plot_seq_cor_period = function(data, variable){
  888. # select the variable of interest, determine y-axis label and adjust axis:
  889. if (variable == "mean_slope") {
  890. ylabel = "Regression slope"
  891. adjust_axis = 0.1
  892. } else if (variable == "mean_cor") {
  893. ylabel = expression("Correlation ("*tau*")")
  894. adjust_axis = 1
  895. } else if (variable == "mean_step") {
  896. ylabel = "Mean step size"
  897. adjust_axis = 1
  898. }
  899. dt_forward = data.table(xmin = 0, xmax = 5.5, ymin = 0, ymax = 0.4 * adjust_axis)
  900. dt_backward = data.table(xmin = 0, xmax = 5.5, ymin = 0, ymax = -0.4 * adjust_axis)
  901. # average across participants for every speed at every TR:
  902. plot_data = data %>% setDT(.) %>%
  903. .[, by = .(classification, id, tITI, period_short), .(
  904. mean_variable = mean(get(variable))
  905. )] %>% filter(classification == "ovr" & period_short != "excluded")
  906. plot_stat = seq_test_period(data = data, variable = variable)
  907. # plot average correlation or betas for each speed condition and time period:
  908. plot = ggplot(data = plot_data, aes(
  909. x = fct_rev(as.factor(period_short)), y = as.numeric(mean_variable),
  910. fill = as.factor(as.numeric(tITI) * 1000))) +
  911. geom_bar(stat = "summary", fun = "mean", width = 0.9, show.legend = TRUE) +
  912. geom_dotplot(binaxis = "y", stackdir = "center", stackratio = 0.5, alpha = 0.2,
  913. binwidth = 0.01 * adjust_axis, show.legend = FALSE) +
  914. #geom_point(position = position_jitterdodge(jitter.height = 0, seed = 4, jitter.width = 0.2),
  915. # pch = 21, alpha = 0.2, show.legend = FALSE) +
  916. geom_errorbar(stat = "summary", fun.data = "mean_se", width = 0.0, color = "black") +
  917. geom_text(data = subset(plot_stat, classification == "ovr"), aes(
  918. x = fct_rev(as.factor(period_short)), y = round_updown(as.numeric(mean_variable), 0.6 * adjust_axis),
  919. label = paste0("d=", sprintf("%.2f", cohens_d), significance)), size = 3.3, show.legend = FALSE,
  920. color = subset(plot_stat, classification == "ovr")$color) +
  921. facet_wrap(~ as.factor(as.numeric(tITI) * 1000), strip.position = "bottom", nrow = 1) +
  922. xlab("Period") + ylab(ylabel) +
  923. scale_fill_viridis(name = "Speed (ms)", discrete = TRUE, option = "cividis") +
  924. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(-0.6, 0.6) * adjust_axis) +
  925. theme(panel.border = element_blank(), axis.line = element_line()) +
  926. theme(axis.line = element_line(colour = "black"),
  927. panel.grid.major = element_blank(),
  928. panel.grid.minor = element_blank(),
  929. panel.border = element_blank(),
  930. panel.background = element_blank()) +
  931. theme(axis.ticks.x = element_line(color = "white"),
  932. axis.line.x = element_line(color = "white")) +
  933. theme(legend.position = "top", legend.direction = "horizontal", legend.box = "vertical",
  934. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  935. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  936. theme(panel.spacing = unit(0, "lines"), strip.background = element_blank(),
  937. strip.placement = "outside", strip.text = element_blank())
  938. return(plot)
  939. }
  940. fig_seq_cor_period = plot_seq_cor_period(data = dt_pred_seq_cor, variable = "mean_cor")
  941. fig_seq_slope_period = plot_seq_cor_period(data = dt_pred_seq_cor, variable = "mean_slope")
  942. fig_seq_step_period = plot_seq_cor_period(data = dt_pred_seq_cor, variable = "mean_step")
  943. fig_seq_cor_period; fig_seq_step_period; fig_seq_slope_period;
  944. ```
  945. #### Source Data File Fig. 3e / S5b / S5d
  946. ```{r}
  947. dt_pred_seq_cor %>%
  948. select(-classification, -num_trials, -color) %>%
  949. write.csv(., file = file.path(path_sourcedata, "source_data_figure_3c.csv"),
  950. row.names = FALSE)
  951. dt_pred_seq_cor %>%
  952. select(-classification, -num_trials, -color) %>%
  953. write.csv(., file = file.path(path_sourcedata, "source_data_figure_s5b.csv"),
  954. row.names = FALSE)
  955. dt_pred_seq_cor %>%
  956. select(-classification, -num_trials, -color) %>%
  957. write.csv(., file = file.path(path_sourcedata, "source_data_figure_s5d.csv"),
  958. row.names = FALSE)
  959. ```
  960. #### Source Data File Fig. S5b
  961. ```{r}
  962. dt_pred_seq_cor %>%
  963. select(-classification, -num_trials) %>%
  964. write.csv(., file = file.path(path_sourcedata, "source_data_figure_3c.csv"),
  965. row.names = FALSE)
  966. ```
  967. #### Source Data File Fig. S5c
  968. ```{r}
  969. dt_pred_seq_cor %>%
  970. select(-classification, -num_trials) %>%
  971. write.csv(., file = file.path(path_sourcedata, "source_data_figure_3c.csv"),
  972. row.names = FALSE)
  973. ```
  974. ```{r, echo = FALSE}
  975. plot_seq_cor_facet = function(dt, variable){
  976. # create separate datatable to plot rectangles indicating forward / backward period:
  977. dt_reduced = dt %>% setDT(.) %>%
  978. .[, by = .(classification, tITI, period), .(
  979. xmin = min(seq_tr) - 0.5,
  980. xmax = max(seq_tr) + 0.5
  981. )] %>%
  982. filter(period != "excluded") %>%
  983. mutate(fill = ifelse(period == "forward", "dodgerblue", "red"))
  984. # select the variable of interest, determine y-axis label and adjust axis:
  985. if (variable == "mean_slope") {
  986. ylabel = "Regression slope"
  987. adjust_axis = 0.1
  988. } else if (variable == "mean_cor") {
  989. ylabel = expression("Correlation ("*tau*")")
  990. adjust_axis = 1
  991. } else if (variable == "mean_step") {
  992. ylabel = "Mean step size"
  993. adjust_axis = 1
  994. }
  995. plot = ggplot(data = dt, mapping = aes(
  996. x = as.factor(seq_tr), y = as.numeric(mean_variable),
  997. group = as.factor(tITI), fill = as.factor(tITI), color = as.factor(tITI))) +
  998. # add background rectangles to indicate the forward and backward period:
  999. geom_rect(data = dt_reduced, aes(
  1000. xmin = xmin, xmax = xmax, ymin = -0.4 * adjust_axis, ymax = 0.4 * adjust_axis),
  1001. alpha = 0.05, inherit.aes = FALSE, show.legend = FALSE, fill = dt_reduced$fill) +
  1002. geom_hline(aes(yintercept = 0), linetype = "solid", color = "gray") +
  1003. facet_wrap(facets = ~ as.factor(tITI), labeller = get_labeller(dt$tITI), nrow = 1) +
  1004. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  1005. geom_line() +
  1006. geom_point(data = subset(dt, pvalue_adjust < 0.05), pch = 21, fill = "red",
  1007. color = "black", show.legend = FALSE) +
  1008. xlab("Time from sequence onset (TRs)") + ylab(ylabel) +
  1009. theme(legend.position = "top", legend.direction = "horizontal",
  1010. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  1011. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  1012. scale_colour_viridis(name = "Speed (ms)", discrete = TRUE, option = "cividis", guide = FALSE) +
  1013. scale_fill_viridis(name = "Speed (ms)", discrete = TRUE, option = "cividis", guide = FALSE) +
  1014. scale_x_discrete(labels = label_fill(seq(1, 13, 1), mod = 4), breaks = seq(1, 13, 1)) +
  1015. theme(strip.text.x = element_text(margin = margin(b = 2, t = 2))) +
  1016. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(-0.4, 0.4) * adjust_axis) +
  1017. theme(axis.line = element_line(colour = "black"),
  1018. panel.grid.major = element_blank(),
  1019. panel.grid.minor = element_blank(),
  1020. panel.border = element_blank(),
  1021. panel.background = element_blank())
  1022. return(plot)
  1023. }
  1024. ```
  1025. We repeat the same calculations, just splitting up the data by the serial position of the cued image:
  1026. ```{r}
  1027. seq_test_period_cue <- function(data, variable){
  1028. data_out = data %>%
  1029. # filter out the excluded time period (select only forward and backward period):
  1030. filter(period != "excluded") %>% setDT(.) %>%
  1031. # average for each time period and speed condition for every participant:
  1032. .[, by = .(classification, id, tITI, period, cue_pos_label), .(
  1033. mean_variable = mean(get(variable)))] %>%
  1034. # average across participants for every speed at every TR:
  1035. # check if the number of participants matches:
  1036. .[, by = .(classification, tITI, period, cue_pos_label), {
  1037. # perform a two-sided one-sample t-test against zero (baseline):
  1038. ttest_results = t.test(mean_variable, alternative = "two.sided", mu = 0);
  1039. list(
  1040. num_subs = .N,
  1041. mean_variable = mean(mean_variable),
  1042. pvalue = ttest_results$p.value,
  1043. tvalue = round(abs(ttest_results$statistic), 2),
  1044. df = ttest_results$parameter,
  1045. cohens_d = abs(round((mean(mean_variable) - 0) / sd(mean_variable), 2)),
  1046. sem_upper = mean(mean_variable) + (sd(mean_variable)/sqrt(.N)),
  1047. sem_lower = mean(mean_variable) - (sd(mean_variable)/sqrt(.N))
  1048. )
  1049. }] %>% verify(all(num_subs == 36)) %>% verify(all((num_subs - df) == 1)) %>%
  1050. # adjust p-values for multiple comparisons:
  1051. # check if the number of comparisons matches expectations:
  1052. .[period %in% c("forward", "backward"), by = .(classification), ":=" (
  1053. num_comp = .N,
  1054. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  1055. )] %>% #verify(num_comp == 10) %>%
  1056. # add variable that indicates significance with stupid significance stars:
  1057. mutate(significance = ifelse(pvalue < 0.05, "*", "")) %>%
  1058. # round the original p-values according to APA manual:
  1059. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  1060. # round the adjusted p-value:
  1061. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  1062. # sort data table:
  1063. setorder(classification, period, tITI) %>%
  1064. # shorten the period name:
  1065. mutate(period_short = ifelse(period == "forward", "fwd", period)) %>%
  1066. transform(period_short = ifelse(period == "backward", "bwd", period_short)) %>%
  1067. mutate(color = ifelse(period_short == "fwd", "dodgerblue", "red")) %>% setDT(.)
  1068. return(data_out)
  1069. }
  1070. ```
  1071. ```{r}
  1072. rmarkdown::paged_table(
  1073. seq_test_period_cue(data = dt_pred_seq_cor_cue, variable = "mean_cor") %>%
  1074. filter(pvalue_adjust < 0.05, classification == "ovr"))
  1075. rmarkdown::paged_table(
  1076. seq_test_period_cue(data = dt_pred_seq_cor_cue, variable = "mean_step") %>%
  1077. filter(pvalue_adjust < 0.05, classification == "ovr"))
  1078. rmarkdown::paged_table(
  1079. seq_test_period_cue(data = dt_pred_seq_cor_cue, variable = "mean_slope") %>%
  1080. filter(pvalue_adjust < 0.05, classification == "ovr"))
  1081. ```
  1082. We plot the same data, just splitting up the data by the serial position of the cued image:
  1083. ```{r}
  1084. plot_seq_cor_period_cue = function(data, variable){
  1085. # select the variable of interest, determine y-axis label and adjust axis:
  1086. if (variable == "mean_slope") {
  1087. ylabel = "Regression slope"
  1088. adjust_axis = 0.1
  1089. } else if (variable == "mean_cor") {
  1090. ylabel = expression("Correlation ("*tau*")")
  1091. adjust_axis = 1
  1092. } else if (variable == "mean_step") {
  1093. ylabel = "Mean step size"
  1094. adjust_axis = 1
  1095. }
  1096. dt_forward = data.table(xmin = 0, xmax = 5.5, ymin = 0, ymax = 0.4 * adjust_axis)
  1097. dt_backward = data.table(xmin = 0, xmax = 5.5, ymin = 0, ymax = -0.4 * adjust_axis)
  1098. # average across participants for every speed at every TR:
  1099. plot_data = data %>% setDT(.) %>%
  1100. .[, by = .(classification, id, tITI, period_short, cue_pos_label), .(
  1101. mean_variable = mean(get(variable))
  1102. )] %>% filter(classification == "ovr" & period_short != "excluded")
  1103. plot_stat = seq_test_period_cue(data = data, variable = variable)
  1104. # plot average correlation or betas for each speed condition and time period:
  1105. plot = ggplot(data = plot_data, aes(
  1106. x = fct_rev(as.factor(period_short)), y = as.numeric(mean_variable),
  1107. fill = as.factor(as.numeric(tITI) * 1000))) +
  1108. geom_bar(stat = "summary", fun = "mean", width = 0.9, show.legend = TRUE) +
  1109. geom_dotplot(binaxis = "y", stackdir = "center", stackratio = 0.5, alpha = 0.2,
  1110. binwidth = 0.01 * adjust_axis, show.legend = FALSE) +
  1111. geom_errorbar(stat = "summary", fun.data = "mean_se", width = 0.0, color = "black") +
  1112. geom_text(data = subset(plot_stat, classification == "ovr"), aes(
  1113. x = fct_rev(as.factor(period_short)), y = round_updown(as.numeric(mean_variable), 0.5 * adjust_axis),
  1114. label = paste0("d=", cohens_d, significance)), size = 3.0, show.legend = FALSE,
  1115. color = subset(plot_stat, classification == "ovr")$color) +
  1116. facet_grid(rows = vars(cue_pos_label),
  1117. cols = vars(as.factor(as.numeric(tITI) * 1000))) +
  1118. xlab("Period") + ylab(ylabel) +
  1119. scale_fill_viridis(name = "Speed (ms)", discrete = TRUE, option = "cividis") +
  1120. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(-0.6, 0.6) * adjust_axis) +
  1121. theme(panel.border = element_blank(), axis.line = element_line()) +
  1122. theme(axis.line = element_line(colour = "black"),
  1123. panel.grid.major = element_blank(),
  1124. panel.grid.minor = element_blank(),
  1125. panel.border = element_blank(),
  1126. panel.background = element_blank()) +
  1127. #theme(axis.ticks.x = element_blank(), axis.line.x = element_blank()) +
  1128. theme(legend.position = "top", legend.direction = "horizontal", legend.box = "vertical",
  1129. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1130. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0))
  1131. #theme(panel.spacing = unit(0, "lines"), strip.background = element_blank(),
  1132. # strip.placement = "outside", strip.text = element_blank())
  1133. return(plot)
  1134. }
  1135. fig_seq_cor_period_cue = plot_seq_cor_period_cue(data = dt_pred_seq_cor_cue, variable = "mean_cor")
  1136. fig_seq_slope_period_cue = plot_seq_cor_period_cue(data = dt_pred_seq_cor_cue, variable = "mean_slope")
  1137. fig_seq_step_period_cue = plot_seq_cor_period_cue(data = dt_pred_seq_cor_cue, variable = "mean_step")
  1138. fig_seq_cor_period_cue; fig_seq_step_period_cue; fig_seq_slope_period_cue;
  1139. ```
  1140. Combine plots for cue period:
  1141. ```{r, echo=FALSE}
  1142. plot_grid(fig_seq_probas_cue, fig_seq_slope_time_cue, fig_seq_slope_period_cue,
  1143. labels = c("a", "b", "c"), nrow = 3, rel_heights = c(5, 4, 6))
  1144. ```
  1145. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  1146. ggsave(filename = "highspeed_plot_decoding_sequence_cue_effects.pdf",
  1147. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1148. dpi = "retina", width = 6, height = 10)
  1149. ```
  1150. ```{r}
  1151. # create a data frame with the relevant data to run the LME:
  1152. lme_seq_cor_data = dt_pred_seq_cor %>%
  1153. filter(classification == "ovr" & period != "excluded") %>%
  1154. transform(tITI = as.factor(tITI))
  1155. # define linear mixed effects model with by-participant random intercepts:
  1156. lme_seq_cor = lmer(mean_slope ~ tITI * period + (1|id),
  1157. data = lme_seq_cor_data, na.action = na.omit, control = lcctrl)
  1158. summary(lme_seq_cor)
  1159. anova(lme_seq_cor)
  1160. emmeans_results = emmeans(lme_seq_cor, list(pairwise ~ period | tITI))
  1161. emmeans_pvalues = round_pvalues(summary(emmeans_results[[2]])$p.value)
  1162. ```
  1163. ### Serial target position
  1164. We calculate the average serial position at each TR:
  1165. ```{r}
  1166. dt_pred_seq_pos = dt_pred_seq %>%
  1167. # get the position with the highest probability at every TR:
  1168. .[, by = .(classification, id, period, tITI, trial_tITI, seq_tr), .(
  1169. num_positions = .N,
  1170. max_position = position[which.max(probability_norm)]
  1171. )] %>%
  1172. # verify that the number of position per TR matches:
  1173. verify(all(num_positions == 5)) %>%
  1174. # average the maximum position across trials for each speed condition:
  1175. .[, by = .(classification, id, period, tITI, seq_tr), .(
  1176. num_trials = .N,
  1177. mean_position = mean(max_position)
  1178. )] %>%
  1179. # verify that the number of trials per participant is correct:
  1180. verify(all(num_trials == 15)) %>%
  1181. # calculate the difference of the mean position from baseline (which is 3)
  1182. mutate(position_diff = mean_position - 3) %>%
  1183. setDT(.) %>%
  1184. # set the speed condition and period variable to a factorial variable:
  1185. transform(tTII = as.factor(tITI)) %>%
  1186. transform(period = as.factor(period))
  1187. ```
  1188. We calculate whether the average serial position is significantly different
  1189. from baseline separately for every speed and period (forward vs. backward):
  1190. ```{r}
  1191. dt_pred_seq_pos_period = dt_pred_seq_pos %>%
  1192. # focus on the forward and backward period only:
  1193. filter(period != "excluded") %>% setDT(.) %>%
  1194. # average the mean position across trs for each period and speed condition:
  1195. .[, by = .(classification, id, period, tITI), .(
  1196. position_diff = mean(position_diff)
  1197. )] %>%
  1198. # average across participants for each speed condition and volume:
  1199. .[, by = .(classification, period, tITI), {
  1200. ttest_results = t.test(position_diff, alternative = "two.sided", mu = 0)
  1201. list(
  1202. num_subs = .N,
  1203. tvalue = round(ttest_results$statistic, 2),
  1204. pvalue = ttest_results$p.value,
  1205. df = ttest_results$parameter,
  1206. cohens_d = abs(round((mean(position_diff) - 0) / sd(position_diff), 2)),
  1207. position_diff = mean(position_diff),
  1208. conf_lb = round(ttest_results$conf.int[1], 2),
  1209. conf_ub = round(ttest_results$conf.int[2], 2),
  1210. sd_position = sd(position_diff),
  1211. sem_upper = mean(position_diff) + (sd(position_diff)/sqrt(.N)),
  1212. sem_lower = mean(position_diff) - (sd(position_diff)/sqrt(.N))
  1213. )
  1214. }] %>% verify(all(num_subs == 36)) %>%
  1215. # adjust p-values for multiple comparisons:
  1216. .[, by = .(classification), ":=" (
  1217. num_comp = .N,
  1218. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  1219. )] %>%
  1220. verify(all(num_comp == 10)) %>%
  1221. # add variable that indicates significance with stupid significance stars:
  1222. mutate(significance = ifelse(pvalue_adjust < 0.05, "*", "")) %>%
  1223. # round the original p-values:
  1224. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  1225. # round the p-values adjusted for multiple comparisons:
  1226. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  1227. # sort the datatable for each speed and TR:
  1228. setorder(., classification, period, tITI) %>%
  1229. # shorten the period name:
  1230. mutate(period_short = ifelse(period == "forward", "fwd", period)) %>%
  1231. transform(period_short = ifelse(period == "backward", "bwd", period_short)) %>%
  1232. mutate(color = ifelse(period_short == "fwd", "dodgerblue", "red")) %>% setDT(.)
  1233. dt_pred_seq_pos_period %>%
  1234. filter(classification == "ovr", pvalue_adjust < 0.05) %>%
  1235. rmarkdown::paged_table(.)
  1236. ```
  1237. We calculate the mean serial position at every TR and compare it against baseline:
  1238. ```{r}
  1239. dt_pred_seq_pos_tr = dt_pred_seq_pos %>%
  1240. # average across participants for each speed condition and volume:
  1241. .[, by = .(classification, period, tITI, seq_tr), {
  1242. ttest_results = t.test(mean_position, alternative = "two.sided", mu = 3)
  1243. list(
  1244. num_subs = .N,
  1245. tvalue = ttest_results$statistic,
  1246. pvalue = ttest_results$p.value,
  1247. df = ttest_results$parameter,
  1248. mean_position = mean(mean_position),
  1249. sd_position = sd(mean_position),
  1250. sem_upper = mean(mean_position) + (sd(mean_position)/sqrt(.N)),
  1251. sem_lower = mean(mean_position) - (sd(mean_position)/sqrt(.N))
  1252. )
  1253. }] %>% verify(all(num_subs == 36)) %>%
  1254. # adjust p-values for multiple comparisons:
  1255. .[period %in% c("forward", "backward"), by = .(classification), ":=" (
  1256. num_comp = .N,
  1257. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  1258. )] %>% verify(all(num_comp == 39, na.rm = TRUE)) %>%
  1259. # round the original p-values:
  1260. mutate(pvalue_rounded = round_pvalues(pvalue)) %>%
  1261. # round the p-values adjusted for multiple comparisons:
  1262. mutate(pvalue_adjust_rounded = round_pvalues(pvalue_adjust)) %>%
  1263. #
  1264. mutate(significance = ifelse(pvalue_adjust < 0.05, "***", "")) %>%
  1265. # sort the datatable for each speed and TR:
  1266. setorder(., classification, period, tITI, seq_tr)
  1267. dt_pred_seq_pos_tr %>%
  1268. filter(classification == "ovr", pvalue_adjust < 0.05) %>%
  1269. rmarkdown::paged_table(.)
  1270. ```
  1271. ```{r, eval = FALSE, echo=TRUE}
  1272. cfg = list(variable = "mean_position", threshold = 2.021, baseline = 3,
  1273. grouping = c("classification", "tITI"), n_perms = 10000, n_trs = 13)
  1274. dt_pred_seq_pos_cluster = cluster_permutation(dt_pred_seq_pos_sub, cfg)
  1275. ```
  1276. ```{r}
  1277. # define linear mixed effects model with by-participant random intercepts:
  1278. lme_seq_pos = lmer(position_diff ~ tITI * period + (1 + tITI + period |id),
  1279. data = subset(dt_pred_seq_pos, classification == "ovr" & period != "excluded"),
  1280. na.action = na.omit, control = lcctrl)
  1281. summary(lme_seq_pos)
  1282. anova(lme_seq_pos)
  1283. emmeans_results = emmeans(lme_seq_pos, list(pairwise ~ period | tITI))
  1284. emmeans_pvalues = round_pvalues(summary(emmeans_results[[2]])$p.value)
  1285. ```
  1286. #### Figure 3g
  1287. ```{r, echo=TRUE}
  1288. variable = "position_diff"
  1289. plot_data = dt_pred_seq_pos %>%
  1290. # average across participants for every speed at every TR:
  1291. .[, by = .(classification, id, tITI, period), .(
  1292. mean_variable = mean(get(variable))
  1293. )] %>%
  1294. filter(classification == "ovr" & period != "excluded") %>%
  1295. # shorten the period name:
  1296. mutate(period_short = ifelse(period == "forward", "fwd", period)) %>%
  1297. transform(period_short = ifelse(period == "backward", "bwd", period_short)) %>%
  1298. mutate(color = ifelse(period_short == "fwd", "dodgerblue", "red")) %>%
  1299. setDT(.)
  1300. # plot average correlation or betas for each speed condition and time period:
  1301. fig_seq_pos_period = ggplot(data = plot_data, aes(
  1302. x = fct_rev(as.factor(period_short)), y = as.numeric(mean_variable),
  1303. fill = as.factor(as.numeric(tITI) * 1000))) +
  1304. geom_bar(stat = "summary", fun = "mean", width = 0.9, show.legend = TRUE) +
  1305. geom_dotplot(binaxis = "y", stackdir = "center", stackratio = 0.5, alpha = 0.2,
  1306. binwidth = 0.05, show.legend = FALSE) +
  1307. geom_errorbar(stat = "summary", fun.data = "mean_se", width = 0.0, color = "black") +
  1308. geom_text(data = subset(dt_pred_seq_pos_period, classification == "ovr"), aes(
  1309. x = fct_rev(as.factor(period_short)), y = round_updown(as.numeric(get(variable)), 1.2),
  1310. label = paste0("d=", sprintf("%.2f", cohens_d), significance)), show.legend = FALSE, size = 3.2,
  1311. color = subset(dt_pred_seq_pos_period, classification == "ovr")$color) +
  1312. facet_wrap(~ as.factor(as.numeric(tITI) * 1000), strip.position = "bottom", nrow = 1) +
  1313. xlab("Period") + ylab("Event position\ncompared to baseline") +
  1314. scale_colour_viridis(name = "Speed (ms)", discrete = TRUE, option = "cividis") +
  1315. scale_fill_viridis(name = "Speed (ms)", discrete = TRUE, option = "cividis") +
  1316. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(-1.5, 1.5)) +
  1317. theme(legend.position = "top", legend.direction = "horizontal",
  1318. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1319. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1320. theme(panel.spacing = unit(0, "lines"), strip.background = element_blank(),
  1321. strip.placement = "outside", strip.text = element_blank()) +
  1322. theme(axis.ticks.x = element_line(colour = "white"),
  1323. axis.line.x = element_line(colour = "white")) +
  1324. theme(axis.line.y = element_line(colour = "black"),
  1325. panel.grid.major = element_blank(),
  1326. panel.grid.minor = element_blank(),
  1327. panel.border = element_blank(),
  1328. panel.background = element_blank())
  1329. fig_seq_pos_period
  1330. ```
  1331. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  1332. ggsave(filename = "highspeed_plot_decoding_sequence_position_period.pdf",
  1333. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1334. dpi = "retina", width = 5, height = 3)
  1335. ```
  1336. #### Source Data File Fig. 3g
  1337. ```{r, echo=TRUE}
  1338. subset(plot_data, classification == "ovr") %>%
  1339. select(-classification, -color) %>%
  1340. write.csv(., file = file.path(path_sourcedata, "source_data_figure_3g.csv"),
  1341. row.names = FALSE)
  1342. ```
  1343. #### Figure 3f
  1344. ```{r, echo = FALSE}
  1345. plot_seq_pos <- function(dt){
  1346. # create separate datatable to plot rectangles indicating forward / backward period:
  1347. dt_reduced = dt %>% setDT(.) %>%
  1348. .[, by = .(classification, tITI, period), .(
  1349. xmin = min(seq_tr) - 0.5,
  1350. xmax = max(seq_tr) + 0.5
  1351. )] %>%
  1352. filter(period != "excluded") %>%
  1353. mutate(fill = ifelse(period == "forward", "dodgerblue", "red"))
  1354. ggplot(data = dt, mapping = aes(
  1355. x = as.factor(seq_tr), y = as.numeric(mean_position),
  1356. group = as.factor(as.numeric(tITI)*1000), fill = as.factor(as.numeric(tITI)*1000))) +
  1357. #geom_rect(data = dt_reduced, aes(xmin = xmin, xmax = xmax, ymin = 2, ymax = 4),
  1358. # alpha = 0.05, inherit.aes = FALSE, show.legend = FALSE, fill = dt_reduced$fill) +
  1359. geom_hline(aes(yintercept = 3), linetype = "solid", color = "gray") +
  1360. #facet_wrap(facets = ~ as.factor(tITI), labeller = get_labeller(dt$tITI), nrow = 1) +
  1361. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  1362. geom_line(mapping = aes(color = as.factor(as.numeric(tITI)*1000))) +
  1363. xlab("Time from sequence onset (TRs)") +
  1364. ylab("Event position") +
  1365. scale_colour_viridis(discrete = TRUE, option = "cividis", name = "Speed (ms)", guide = FALSE) +
  1366. scale_fill_viridis(discrete = TRUE, option = "cividis", name = "Speed (ms)", guide = FALSE) +
  1367. scale_x_discrete(labels = label_fill(seq(1, 13, 1), mod = 4), breaks = seq(1, 13, 1)) +
  1368. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(2,4)) +
  1369. theme(strip.text.x = element_text(margin = margin(b = 2, t = 2))) +
  1370. theme(legend.position = "top", legend.direction = "horizontal",
  1371. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1372. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1373. geom_segment(aes(x = 0.9, xend = 0.9, y = 3.01, yend = 4),
  1374. arrow = arrow(length = unit(5, "pt")), color = "darkgray") +
  1375. geom_segment(aes(x = 0.9, xend = 0.9, y = 3.01, yend = 2),
  1376. arrow = arrow(length = unit(5, "pt")), color = "darkgray") +
  1377. annotate(geom = "text", x = 1.4, y = 3.5, label = "Later",
  1378. color = "darkgray", angle = 90, size = 3) +
  1379. annotate(geom = "text", x = 1.4, y = 2.5, label = "Earlier",
  1380. color = "darkgray", angle = 90, size = 3) +
  1381. annotate("text", x = 13, y = 2, label = "1 TR = 1.25 s",
  1382. hjust = 1, size = rel(2)) +
  1383. theme(axis.line = element_line(colour = "black"),
  1384. panel.grid.major = element_blank(),
  1385. panel.grid.minor = element_blank(),
  1386. panel.border = element_blank(),
  1387. panel.background = element_blank())
  1388. }
  1389. fig_seq_pos_time = plot_seq_pos(dt = subset(dt_pred_seq_pos_tr, classification == "ovr"))
  1390. fig_seq_pos_time
  1391. ```
  1392. #### Source Data File Fig. 3f
  1393. ```{r, echo=TRUE}
  1394. subset(dt_pred_seq_pos_tr, classification == "ovr") %>%
  1395. select(-classification, -num_subs, -num_comp, -pvalue_adjust,
  1396. -pvalue_rounded, -pvalue_adjust_rounded, -pvalue, -df,
  1397. -tvalue, -significance, -sd_position) %>%
  1398. write.csv(., file = file.path(path_sourcedata, "source_data_figure_3f.csv"),
  1399. row.names = FALSE)
  1400. ```
  1401. ```{r, echo = FALSE}
  1402. plot_seq_pos_facet <- function(dt){
  1403. # create separate datatable to plot rectangles indicating forward / backward period:
  1404. dt_reduced = dt %>% setDT(.) %>%
  1405. .[, by = .(classification, tITI, period), .(
  1406. xmin = min(seq_tr) - 0.5,
  1407. xmax = max(seq_tr) + 0.5
  1408. )] %>%
  1409. filter(period != "excluded") %>%
  1410. mutate(fill = ifelse(period == "forward", "dodgerblue", "red"))
  1411. ggplot(data = dt, mapping = aes(
  1412. x = as.factor(seq_tr), y = as.numeric(mean_position),
  1413. group = as.factor(as.numeric(tITI)*1000), fill = as.factor(as.numeric(tITI)*1000))) +
  1414. geom_rect(data = dt_reduced, aes(xmin = xmin, xmax = xmax, ymin = 2, ymax = 4),
  1415. alpha = 0.05, inherit.aes = FALSE, show.legend = FALSE, fill = dt_reduced$fill) +
  1416. geom_hline(aes(yintercept = 3), linetype = "solid", color = "gray") +
  1417. facet_wrap(facets = ~ as.factor(tITI), labeller = get_labeller(dt$tITI), nrow = 1) +
  1418. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  1419. geom_line(mapping = aes(color = as.factor(as.numeric(tITI)*1000))) +
  1420. geom_point(data = subset(dt, pvalue_adjust < 0.05), pch = 21, fill = "red",
  1421. color = "black", show.legend = FALSE) +
  1422. xlab("Time from sequence onset (TRs)") +
  1423. ylab("Event position") +
  1424. scale_colour_viridis(discrete = TRUE, option = "cividis", name = "Speed (ms)", guide = FALSE) +
  1425. scale_fill_viridis(discrete = TRUE, option = "cividis", name = "Speed (ms)", guide = FALSE) +
  1426. scale_x_discrete(labels = label_fill(seq(1, 13, 1), mod = 4), breaks = seq(1, 13, 1)) +
  1427. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(2,4)) +
  1428. theme(strip.text.x = element_text(margin = margin(b = 2, t = 2))) +
  1429. theme(legend.position = "top", legend.direction = "horizontal",
  1430. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1431. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1432. theme(axis.line = element_line(colour = "black"),
  1433. panel.grid.major = element_blank(),
  1434. panel.grid.minor = element_blank(),
  1435. panel.border = element_blank(),
  1436. panel.background = element_blank())
  1437. }
  1438. fig_seq_pos_time_facet = plot_seq_pos_facet(dt = subset(dt_pred_seq_pos_tr, classification == "ovr"))
  1439. fig_seq_pos_time_facet
  1440. ```
  1441. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  1442. ggsave(filename = "highspeed_plot_decoding_sequence_timecourse_position.pdf",
  1443. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1444. dpi = "retina", width = 6, height = 3)
  1445. ```
  1446. ### Transititons
  1447. We calculate the step size between consecutively decoded (highest probability) events:
  1448. ```{r}
  1449. dt_pred_seq_step = dt_pred_seq %>%
  1450. # get the position with the highest probability for every TR:
  1451. .[, by = .(classification, id, tITI, trial_tITI, seq_tr), ":=" (
  1452. num_classes = .N,
  1453. rank_order = rank(-probability),
  1454. max_prob = as.numeric(probability == max(probability))
  1455. )] %>%
  1456. # verify that there are five classes per TR:
  1457. verify(all(num_classes == 5)) %>%
  1458. # sort the data table:
  1459. setorder(., classification, id, tITI, trial_tITI, seq_tr) %>%
  1460. # select only classes with the highest probability for every TR:
  1461. filter(max_prob == 1) %>%
  1462. setDT(.) %>%
  1463. # check if the rank order of the event with highest probability match:
  1464. verify(all(rank_order == max_prob)) %>%
  1465. # group by classification, id, speed and trial and calculate step sizes:
  1466. .[, by = .(classification, id, tITI, trial_tITI),
  1467. step := position - shift(position)]
  1468. ```
  1469. We calculate the mean step size for early and late period in the forward and backward phase:
  1470. ```{r}
  1471. dt_pred_seq_step_mean = dt_pred_seq_step %>%
  1472. filter(period != "excluded") %>%
  1473. filter(!(is.na(zone))) %>% setDT(.) %>%
  1474. # shorten the period name:
  1475. mutate(period_short = ifelse(period == "forward", "fwd", period)) %>%
  1476. transform(period_short = ifelse(period == "backward", "bwd", period_short)) %>%
  1477. setDT(.) %>%
  1478. .[, by = .(classification, id, tITI, period_short, zone), .(
  1479. mean_step = mean(step, na.rm = TRUE))]
  1480. ```
  1481. We compare the forward and the backward period using t-tests:
  1482. ```{r}
  1483. dt_pred_seq_step_stat = dt_pred_seq_step_mean %>%
  1484. spread(key = period_short, value = mean_step, drop = TRUE) %>%
  1485. mutate(difference = fwd - bwd) %>% setDT(.) %>%
  1486. # average across participants for each speed condition and volume:
  1487. .[, by = .(classification, tITI, zone), {
  1488. ttest_results = t.test(fwd, bwd, alternative = "two.sided", paired = TRUE)
  1489. list(
  1490. num_subs = .N,
  1491. tvalue = round(ttest_results$statistic, 2),
  1492. pvalue = ttest_results$p.value,
  1493. df = ttest_results$parameter,
  1494. cohens_d = abs(round((mean(fwd) - mean(bwd)) / sd(fwd - bwd), 2)),
  1495. mean_step = mean(difference),
  1496. sd_step = sd(difference),
  1497. sem_upper = mean(difference) + (sd(difference)/sqrt(.N)),
  1498. sem_lower = mean(difference) - (sd(difference)/sqrt(.N))
  1499. )
  1500. }] %>% verify(all(num_subs == 36)) %>%
  1501. # adjust p-values for multiple comparisons:
  1502. .[, by = .(classification), ":=" (
  1503. num_comp = .N,
  1504. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  1505. )] %>%
  1506. verify(all(num_comp == 10)) %>%
  1507. # add variable that indicates significance with stupid significance stars:
  1508. mutate(significance = ifelse(pvalue < 0.05, "*", "")) %>%
  1509. # round the original p-values:
  1510. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  1511. # round the p-values adjusted for multiple comparisons:
  1512. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  1513. # sort the datatable for each speed and TR:
  1514. setorder(., classification, zone, tITI)
  1515. dt_pred_seq_step_stat %>%
  1516. filter(classification == "ovr") %>%
  1517. rmarkdown::paged_table(.)
  1518. ```
  1519. We compare each period to the baseline:
  1520. ```{r}
  1521. dt_pred_seq_step_stat_baseline = dt_pred_seq_step_mean %>%
  1522. # average across participants for each speed condition and volume:
  1523. .[, by = .(classification, tITI, period_short, zone), {
  1524. ttest_results = t.test(mean_step, mu = 0, alternative = "two.sided")
  1525. list(
  1526. num_subs = .N,
  1527. tvalue = round(ttest_results$statistic, 2),
  1528. pvalue = ttest_results$p.value,
  1529. df = ttest_results$parameter,
  1530. cohens_d = abs(round((mean(mean_step) - 0) / sd(mean_step), 2)),
  1531. mean_step = mean(mean_step),
  1532. sd_step = sd(mean_step),
  1533. sem_upper = mean(mean_step) + (sd(mean_step)/sqrt(.N)),
  1534. sem_lower = mean(mean_step) - (sd(mean_step)/sqrt(.N))
  1535. )
  1536. }] %>% verify(all(num_subs == 36)) %>%
  1537. # adjust p-values for multiple comparisons:
  1538. .[, by = .(classification), ":=" (
  1539. num_comp = .N,
  1540. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  1541. )] %>%
  1542. # verf
  1543. verify(all(num_comp == 20)) %>%
  1544. # add variable that indicates significance with stupid significance stars:
  1545. mutate(significance = ifelse(pvalue_adjust < 0.05, "*", "")) %>%
  1546. # round the original p-values:
  1547. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  1548. # round the p-values adjusted for multiple comparisons:
  1549. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  1550. # sort the datatable for each speed and TR:
  1551. setorder(., classification, period_short, zone, tITI)
  1552. dt_pred_seq_step_stat_baseline %>%
  1553. filter(classification == "ovr", pvalue < 0.05) %>%
  1554. rmarkdown::paged_table(.)
  1555. ```
  1556. #### Figure 3h
  1557. ```{r}
  1558. # plot average correlation or betas for each speed condition and time period:
  1559. fig_seq_step = ggplot(data = subset(dt_pred_seq_step_mean, classification == "ovr"), aes(
  1560. x = fct_rev(as.factor(period_short)), y = as.numeric(mean_step),
  1561. fill = as.factor(as.numeric(tITI) * 1000)), color = as.factor(as.numeric(tITI) * 1000)) +
  1562. facet_grid(vars(as.factor(zone)), vars(as.factor(as.numeric(tITI) * 1000)), switch = "x") +
  1563. geom_bar(stat = "summary", fun = "mean", width = 0.9) +
  1564. geom_point(position = position_jitterdodge(jitter.height = 0, seed = 4, jitter.width = 0.2),
  1565. pch = 21, alpha = 0.05, color = "black", show.legend = FALSE) +
  1566. geom_errorbar(stat = "summary", fun.data = "mean_se", width = 0.0, color = "black") +
  1567. geom_text(data = subset(dt_pred_seq_step_stat, classification == "ovr"), aes(
  1568. y = 2, label = paste0("d=", sprintf("%.2f", cohens_d), significance), x = 1.5),
  1569. inherit.aes = FALSE, color = "black", size = 3.3) +
  1570. xlab("Period") + ylab("Step size") +
  1571. scale_colour_viridis(discrete = TRUE, option = "cividis", name = "Speed (ms)") +
  1572. scale_fill_viridis(discrete = TRUE, option = "cividis", name = "Speed (ms)") +
  1573. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(-2, 2)) +
  1574. theme(legend.position = "top", legend.direction = "horizontal",
  1575. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = -5, l = 0),
  1576. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1577. theme(panel.spacing.x = unit(0, "lines"), strip.background.x = element_blank(),
  1578. strip.placement.x = "outside", strip.text.x = element_blank()) +
  1579. theme(axis.ticks.x = element_line(colour = "white"),
  1580. axis.line.x = element_line(colour = "white")) +
  1581. #theme(axis.title.x = element_blank()) +
  1582. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1583. theme(axis.line.y = element_line(colour = "black"),
  1584. panel.grid.major = element_blank(),
  1585. panel.grid.minor = element_blank(),
  1586. panel.border = element_blank(),
  1587. panel.background = element_blank())
  1588. fig_seq_step
  1589. ```
  1590. #### Source Data File Fig. 3h
  1591. ```{r, echo=TRUE}
  1592. subset(dt_pred_seq_step_mean, classification == "ovr") %>%
  1593. select(-classification) %>%
  1594. write.csv(., file = file.path(path_sourcedata, "source_data_figure_3h.csv"),
  1595. row.names = FALSE)
  1596. ```
  1597. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  1598. ggsave(filename = "highspeed_plot_decoding_sequence_step_size.pdf",
  1599. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1600. dpi = "retina", width = 5, height = 3)
  1601. ```
  1602. ```{r, fig.width = 10, fig.height = 4}
  1603. plot_grid(fig_seq_pos_period, fig_seq_step, labels = "auto",
  1604. ncol = 2, label_fontface = "bold", rel_widths = c(5, 6))
  1605. ```
  1606. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  1607. ggsave(filename = "highspeed_plot_decoding_sequence_between_tr.pdf",
  1608. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1609. dpi = "retina", width = 10, height = 4)
  1610. ```
  1611. ```{r}
  1612. plot_grid(fig_seq_cor_time, fig_seq_cor_period, fig_seq_step_time, fig_seq_step_period,
  1613. labels = "auto", ncol = 2, nrow = 2, label_fontface = "bold")
  1614. ```
  1615. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  1616. ggsave(filename = "highspeed_plot_decoding_sequence_correlation_step.pdf",
  1617. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1618. dpi = "retina", width = 10, height = 7)
  1619. ggsave(filename = "wittkuhn_schuck_figure_s5.pdf",
  1620. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1621. dpi = "retina", width = 10, height = 7)
  1622. ```
  1623. #### Source Data File Fig. 3f
  1624. ```{r}
  1625. seq_test_time(data = dt_pred_seq_cor, variable = "mean_slope") %>%
  1626. filter(pvalue_adjust < 0.05 & classification == "ovr") %>%
  1627. rmarkdown::paged_table(.)
  1628. seq_test_time(data = dt_pred_seq_cor, variable = "mean_cor") %>%
  1629. filter(pvalue_adjust < 0.05 & classification == "ovr") %>%
  1630. rmarkdown::paged_table(.)
  1631. seq_test_time(data = dt_pred_seq_cor, variable = "mean_step") %>%
  1632. filter(pvalue_adjust < 0.05 & classification == "ovr") %>%
  1633. rmarkdown::paged_table(.)
  1634. ```
  1635. #### Figure S6
  1636. ```{r}
  1637. fig_seq_slope_time_facet = plot_seq_cor_facet(dt = subset(
  1638. seq_test_time(data = dt_pred_seq_cor, variable = "mean_slope"),
  1639. classification == "ovr"), variable = "mean_slope")
  1640. fig_seq_cor_time_facet = plot_seq_cor_facet(dt = subset(
  1641. seq_test_time(data = dt_pred_seq_cor, variable = "mean_cor"),
  1642. classification == "ovr"), variable = "mean_cor")
  1643. fig_seq_step_time_facet = plot_seq_cor_facet(dt = subset(
  1644. seq_test_time(data = dt_pred_seq_cor, variable = "mean_step"),
  1645. classification == "ovr"), variable = "mean_step")
  1646. remove_xaxis = theme(axis.title.x = element_blank())
  1647. remove_facets = theme(strip.background = element_blank(), strip.text.x = element_blank())
  1648. plot_grid(fig_seq_slope_time_facet + remove_xaxis,
  1649. fig_seq_pos_time_facet + remove_xaxis + theme(legend.position = "none") + remove_facets,
  1650. fig_seq_cor_time_facet + remove_xaxis + theme(legend.position = "none") + remove_facets,
  1651. fig_seq_step_time_facet + theme(legend.position = "none") + remove_facets,
  1652. labels = "auto", ncol = 1, label_fontface = "bold")
  1653. ```
  1654. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  1655. ggsave(filename = "highspeed_plot_decoding_sequence_timecourse_slope_correlation_step.pdf",
  1656. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1657. dpi = "retina", width = 7, height = 9)
  1658. ```
  1659. ```{r}
  1660. ggsave(filename = "wittkuhn_schuck_figure_s6.pdf",
  1661. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1662. dpi = "retina", width = 7, height = 9)
  1663. ```
  1664. #### Source Data File Fig. S6a
  1665. ```{r}
  1666. subset(seq_test_time(data = dt_pred_seq_cor, variable = "mean_slope"),
  1667. classification == "ovr") %>%
  1668. select(-classification, -num_subs, -num_comp) %>%
  1669. write.csv(., file = file.path(path_sourcedata, "source_data_figure_s6a.csv"),
  1670. row.names = FALSE)
  1671. ```
  1672. #### Source Data File Fig. S6b
  1673. ```{r}
  1674. subset(dt_pred_seq_pos_tr, classification == "ovr") %>%
  1675. select(-classification, -num_subs, -num_comp) %>%
  1676. write.csv(., file = file.path(path_sourcedata, "source_data_figure_s6b.csv"),
  1677. row.names = FALSE)
  1678. ```
  1679. #### Source Data File Fig. S6c
  1680. ```{r}
  1681. subset(seq_test_time(data = dt_pred_seq_cor, variable = "mean_cor"),
  1682. classification == "ovr") %>%
  1683. select(-classification, -num_subs, -num_comp) %>%
  1684. write.csv(., file = file.path(path_sourcedata, "source_data_figure_s6c.csv"),
  1685. row.names = FALSE)
  1686. ```
  1687. #### Source Data File Fig. S6d
  1688. ```{r}
  1689. subset(seq_test_time(data = dt_pred_seq_cor, variable = "mean_step"),
  1690. classification == "ovr") %>%
  1691. select(-classification, -num_subs, -num_comp) %>%
  1692. write.csv(., file = file.path(path_sourcedata, "source_data_figure_s6d.csv"),
  1693. row.names = FALSE)
  1694. ```
  1695. ### Figure 3
  1696. Plot Figure 3 in the main text:
  1697. ```{r}
  1698. plot_grid(
  1699. plot_grid(fig_seq_probas, labels = c("a"), nrow = 1),
  1700. plot_grid(fig_seq_slope_time, fig_seq_slope_period, labels = c("b", "c"),
  1701. ncol = 2, nrow = 1, label_fontface = "bold", rel_widths = c(4.9, 5)),
  1702. plot_grid(fig_seq_cor_between, fig_seq_cor_within, fig_seq_pos_time,
  1703. labels = c("d", "e", "f"), ncol = 3, rel_widths = c(0.325, 0.325, 0.35)),
  1704. plot_grid(fig_seq_pos_period, fig_seq_step, labels = c("g", "h"),
  1705. ncol = 2, label_fontface = "bold", nrow = 1),
  1706. nrow = 4, label_fontface = "bold", rel_heights = c(2, 3)
  1707. )
  1708. ```
  1709. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  1710. ggsave(filename = "highspeed_plot_decoding_sequence_data.pdf",
  1711. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1712. dpi = "retina", width = 10, height = 12)
  1713. ```
  1714. ```{r}
  1715. ggsave(filename = "wittkuhn_schuck_figure_3.pdf",
  1716. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1717. dpi = "retina", width = 10, height = 12)
  1718. ```
  1719. #### Figure S7
  1720. ```{r, include=FALSE, echo=TRUE, eval=FALSE, warning=FALSE, message=FALSE}
  1721. title_nomax = ggdraw() + draw_label("Sequence item with highest probability removed", fontface = "plain")
  1722. title_nofirst = ggdraw() + draw_label("First sequence item removed", fontface = "plain")
  1723. title_nolast = ggdraw() + draw_label("Last sequence item removed", fontface = "plain")
  1724. # create a common legend used for the entire figure panel:
  1725. common_legend <- get_legend(fig_seq_slope_time + theme(legend.position = "top"))
  1726. # create the plot of sequence data with the maximum probability removed:
  1727. plot_nomax = plot_grid(fig_seq_slope_time + theme(legend.position = "none"),
  1728. fig_seq_slope_period + theme(legend.position = "none"),
  1729. rel_widths = c(4, 5), labels = "auto", ncol = 2, nrow = 1,
  1730. label_fontface = "bold")
  1731. plot_nofirst = plot_grid(fig_seq_slope_time + theme(legend.position = "none"),
  1732. fig_seq_slope_period + theme(legend.position = "none"),
  1733. rel_widths = c(4, 5), labels = c("c", "d"), ncol = 2, nrow = 1,
  1734. label_fontface = "bold")
  1735. plot_nolast = plot_grid(fig_seq_slope_time + theme(legend.position = "none"),
  1736. fig_seq_slope_period + theme(legend.position = "none"),
  1737. rel_widths = c(4, 5), labels = c("e", "f"), ncol = 2, nrow = 1,
  1738. label_fontface = "bold")
  1739. plot_all = plot_grid(
  1740. common_legend, title_nomax, plot_nomax,
  1741. title_nofirst, plot_nofirst,
  1742. title_nolast, plot_nolast,
  1743. ncol = 1, rel_heights=c(0.1, 0.1, 1, 0.1, 1, 0.1, 1))
  1744. plot_all
  1745. ```
  1746. ```{r, echo=FALSE, eval=FALSE, include=FALSE}
  1747. ggsave(filename = "highspeed_plot_decoding_sequence_slope_remove_items.pdf",
  1748. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1749. dpi = "retina", width = 10, height = 10)
  1750. ggsave(filename = "wittkuhn_schuck_figure_s7.pdf",
  1751. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1752. dpi = "retina", width = 10, height = 10)
  1753. ```