highspeed-analysis-resting.Rmd 94 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117
  1. ## Decoding: Resting-state
  2. ### Preparation
  3. #### Initialization
  4. We load the data and relevant functions:
  5. ```{r, warning=FALSE, message=FALSE, echo=TRUE}
  6. # find the path to the root of this project:
  7. if (!requireNamespace("here")) install.packages("here")
  8. if ( basename(here::here()) == "highspeed" ) {
  9. path_root = here::here("highspeed-analysis")
  10. } else {
  11. path_root = here::here()
  12. }
  13. # list of participants with chance performance on sequence and repetition trials:
  14. chance_performer = c("sub-24", "sub-31", "sub-37", "sub-40")
  15. ```
  16. The next step is only evaluated during manual execution:
  17. ```{r, eval=FALSE}
  18. # source all relevant functions from the setup R script:
  19. source(file.path(path_root, "code", "highspeed-analysis-setup.R"))
  20. ```
  21. #### Prepare sequence events data
  22. We select all events files for sequence trials when the stimuli were presented:
  23. ```{r, echo=TRUE}
  24. # create a subset of the events data table only including sequence task events:
  25. dt_events_seq = subset(dt_events, condition == "sequence" & trial_type == "stimulus")
  26. rmarkdown::paged_table(dt_events_seq)
  27. ```
  28. #### Prepare resting decoding data
  29. We prepare the resting state data for analysis:
  30. ```{r, results="hold"}
  31. # create a subset that only includes the resting state data:
  32. dt_pred_rest = dt_pred %>%
  33. # filter for rest only data that was created by the union mask:
  34. filter(condition == "rest" & class != "other" & mask == "union") %>%
  35. # sort the data frame by classification, session, etc.:
  36. setorder(id, classification, session, run_study, seq_tr, class) %>%
  37. # add random positions for the events detected during resting state:
  38. # group by classification and participants to generate class specific indices:
  39. group_by(classification, id) %>%
  40. # create class specific position indices randomly for every participant:
  41. group_modify( ~ { .x %>% mutate(position = mapvalues(
  42. .x$class, from = unique(.x$class), to = sample(length(unique(.x$class)))))}) %>%
  43. ungroup %>%
  44. transform(position = as.numeric(position)) %>%
  45. setDT(.) %>%
  46. # there should be a unique position for every class in every participant:
  47. verify(all(.[, by = .(classification, id, class), .(
  48. unique_positions = length(unique(position))
  49. )]$unique_positions == 1)) %>%
  50. # verify that every class is represented by all indices across participants:
  51. verify(all(.[, by = .(classification, class), .(
  52. unique_positions = length(unique(position))
  53. )]$unique_positions == 5)) %>%
  54. # check that the number of TRs for each resting state run is correct
  55. verify(all(.[, by = .(classification, id, run_study, session, class), .(
  56. num_trs = .N)]$num_trs == 233))
  57. ```
  58. We did not acquire pre-task resting-state data in Session 1.
  59. We filter for these participants below:
  60. ```{r, results="hold"}
  61. # create an array with all participant ids who do not have all four resting state runs:
  62. ids_no_pre_s1 = unique(
  63. (dt_pred_rest %>% setDT(.) %>%
  64. .[, by = .(id, session), .(num_runs = length(unique(run_study)))] %>%
  65. filter(!(num_runs == 2)))$id)
  66. # print the ids of the four participants without pre-task session 1 rest:
  67. print(ids_no_pre_s1)
  68. ```
  69. We select data from all remaining participants with pre-task session 1 rest:
  70. ```{r, results = "hold"}
  71. dt_pred_rest_s1 = dt_pred_rest %>%
  72. # exclude participants without session 1 pre-task rest:
  73. filter(!(id %in% c(ids_no_pre_s1))) %>%
  74. # exclude participants who performed below chance in seq and rep trials:
  75. filter(!(id %in% c(chance_performer))) %>%
  76. # filter for pre-task resting state data in session 1:
  77. #filter(run_study == "run-pre" & session == "ses-01") %>%
  78. # filter for the one-versus-rest classification approach:
  79. filter(classification == "ovr") %>%
  80. # filter resting state data to the same length as concatenated sequence data:
  81. #filter(seq_tr %in% seq(1, 180)) %>%
  82. # check if the number of participants is correct:
  83. verify(length(unique(id)) == 32) %>%
  84. setDT(.) %>%
  85. # create unique run indices through the combination of run and session labels:
  86. .[grepl("run-pre", run_study, fixed = TRUE) & session == "ses-01",
  87. run_study := "run-pre_ses-01"] %>%
  88. .[grepl("run-pre", run_study, fixed = TRUE) & session == "ses-02",
  89. run_study := "run-pre_ses-02"] %>%
  90. .[grepl("run-post", run_study, fixed = TRUE) & session == "ses-01",
  91. run_study := "run-post_ses-01"] %>%
  92. .[grepl("run-post", run_study, fixed = TRUE) & session == "ses-02",
  93. run_study := "run-post_ses-02"] %>%
  94. setDT(.)
  95. ```
  96. #### Prepare sequence decoding data
  97. We prepare the sequence trial data that will be combined with the resting state data:
  98. ```{r}
  99. # create a list of all participants that are excluded from the resting state analysis:
  100. sub_exclude_rest = unique(dt_events_seq$subject[
  101. !(dt_events_seq$subject %in% dt_pred_rest_s1$id)])
  102. # create a subset of the decoding data only including the sequence task data:
  103. dt_pred_seq = dt_pred %>%
  104. # create a subset of the decoding data only including the sequence task data:
  105. filter(condition == "sequence" & class != "other" & mask == "cv" & stim != "cue") %>%
  106. setDT(.) %>%
  107. # add the serial position, change trial and target cue to the sequence data table:
  108. .[, c("position", "change", "trial_cue", "trial_cue_position") := get_pos(
  109. .SD, dt_events_seq), by = .(id, trial, class),
  110. .SDcols = c("id", "trial", "class")] %>%
  111. # remove columns not needed to align data table with resting state data:
  112. mutate(change = NULL, trial_cue = NULL, trial_cue_position = NULL) %>%
  113. # rename the speed condition to include the "ms" label and express in milliseconds:
  114. transform(tITI = paste(as.character(as.numeric(tITI) * 1000), "ms")) %>%
  115. # remove participants that should be excluded from resting state analysis:
  116. filter(!(id %in% sub_exclude_rest)) %>%
  117. setDT(.) %>%
  118. # only select TRs of the forward and backward period
  119. filter(seq_tr %in% seq(2, 13)) %>%
  120. setDT(.) %>%
  121. # check if the number of TRs for each participant is correct:
  122. verify(.[, by = .(classification, id, tITI, trial_tITI, class), .(
  123. num_trs = .N)]$num_trs == 12)
  124. ```
  125. #### Combine sequence and resting data
  126. We combine the sequence and resting state decoding data in one data table:
  127. ```{r, echo=TRUE}
  128. #c("run-pre_ses-01", "run-post_ses-01", "run-pre_ses-02", "run-post_ses-02")
  129. deselect_rest_run <- c("run-pre_ses-02", "run-post_ses-02")
  130. dt_pred_all = rbind(dt_pred_seq, dt_pred_rest_s1) %>%
  131. # filter for pre-task resting state of session 1 only:
  132. filter(!(run_study %in% deselect_rest_run)) %>%
  133. # reorder the factor levels of the combined data table to sort in ascending order:
  134. transform(tITI = factor(tITI, levels(as.factor(tITI))[c(3,5,1,4,2,6)])) %>%
  135. # concatenate all session 1 pre-task resting state scans and add counter:
  136. filter(tITI %in% c("32 ms", "2048 ms", "rest")) %>%
  137. setDT(.) %>%
  138. # adjust resting state grouping variable to accommodate multiple runs:
  139. .[tITI == "rest", tITI := paste0(tITI, "_", run_study)] %>%
  140. # filter out one-versus-rest classification trials only:
  141. filter(classification == "ovr") %>%
  142. setDT(.) %>%
  143. # sort the data table:
  144. setorder(id, condition, tITI, trial_tITI, seq_tr, position) %>%
  145. setDT(.)
  146. ```
  147. We use a short function to run some checks on the combined data table:
  148. ```{r, echo=TRUE}
  149. checkdata = function(dt){
  150. library(data.table); library(tidyverse);
  151. dt %>%
  152. # check the number of trs in all resting state data:
  153. verify(.[stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI, class), .(
  154. num_trs = .N
  155. )]$num_trs == 233) %>%
  156. # check the number of trs in all sequence trial data:
  157. verify(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI, trial_tITI, class), .(
  158. num_trs = .N
  159. )]$num_trs == 12) %>%
  160. # check the number of unique trials for each sequence speed condition:
  161. verify(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI), .(
  162. num_trials = length(unique(trial_tITI))
  163. )]$num_trials == 15) %>%
  164. # check the number of speed conditions in sequence trials
  165. verify(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id), .(
  166. num_speeds = length(unique(tITI))
  167. )]$num_speeds == 2) %>%
  168. # check the number of speed conditions in resting state data:
  169. verify(.[stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI), .(
  170. num_speeds = length(unique(tITI))
  171. )]$num_speeds == 1) %>%
  172. # check the mask name in resting state data:
  173. verify(all(.[stringr::str_detect(tITI, "rest"), .(name_mask = unique(mask))]$name_mask == "union")) %>%
  174. # check the mask name in sequence trial data:
  175. verify(all(.[!stringr::str_detect(tITI, "rest"), .(name_mask = unique(mask))]$name_mask == "cv")) %>%
  176. # check if the number of items per run study is as expected:
  177. verify(all(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI, trial_tITI, class), .(
  178. num_trs = .N
  179. )]$num_trs == 12))
  180. }
  181. # check the current data table:
  182. rmarkdown::paged_table(checkdata(dt_pred_all))
  183. ```
  184. ### Decoding probabilities
  185. We draw a random example participant used for plotting:
  186. ```{r}
  187. set.seed(3)
  188. example_id = sample(unique(dt_pred_all$id), 1)
  189. ```
  190. We concatenate all fast and slow sequence trials and resting state:
  191. ```{r}
  192. dt_pred_conc = dt_pred_all %>%
  193. # we add a consecutive counter that will be used to concatenate data:
  194. .[, by = .(classification, id, tITI, class), ":=" (t = seq(1, .N))] %>%
  195. # omit the last TRs in the resting state data:
  196. filter(!(stringr::str_detect(tITI, "rest") & seq_tr > 180)) %>%
  197. # we will focus all further analyses on the one-versus-rest classifier:
  198. filter(classification == "ovr") %>%
  199. setDT(.) %>%
  200. # verify that both resting and sequence trial data are restricted to 180 TRs:
  201. verify(.[, by = .(classification, id, tITI, class, tITI), .(
  202. num_trs = .N)]$num_trs == 180) %>%
  203. setDT(.)
  204. ```
  205. We plot the probabilities of all concatenated sequence trials and the session 1
  206. resting state data (separately for each speed condition):
  207. ```{r, echo=FALSE}
  208. fig_prob_time_seq = ggplot(data = subset(dt_pred_conc, id == example_id), mapping = aes(
  209. x = as.numeric(t) * 1.25, y = as.numeric(probability * 100), group = as.factor(position))) +
  210. geom_line(mapping = aes(color = as.factor(position))) +
  211. facet_wrap(~ as.factor(tITI)) +
  212. theme(legend.position = "top", legend.direction = "horizontal",
  213. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  214. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  215. xlab("Time (TRs)") + ylab("Probability (%)") +
  216. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  217. ylim = c(0, 100), xlim = c(0, 200)) +
  218. theme(panel.border = element_blank(), axis.line = element_line()) +
  219. theme(axis.line = element_line(colour = "black"),
  220. panel.grid.major = element_blank(),
  221. panel.grid.minor = element_blank(),
  222. panel.border = element_blank(),
  223. panel.background = element_blank()) +
  224. scale_colour_manual(name = "Serial event", values = color_events, guide = FALSE) +
  225. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  226. guides(color = guide_legend(nrow = 1))
  227. fig_prob_time_seq
  228. ```
  229. ### Calculating sequentiality
  230. We create a function to determine whether a certain sequential combination of the task stimuli has been presented to the participants on sequence trials or not:
  231. ```{r}
  232. did_you_see_that <- function(seq, dt_events_seq) {
  233. dt_events_seq_sub <- dt_events_seq %>%
  234. #filter(subject == sub) %>%
  235. setDT(.) %>%
  236. # calculate if the input sequence was seen for every trial:
  237. .[, by = .(subject, interval_time, trial), .(
  238. num_events = .N,
  239. seen = as.numeric(all(stim_label[serial_position] == seq))
  240. )] %>%
  241. # check if the number of events per sequence is 5:
  242. verify(num_events == 5) %>%
  243. # the number a seen sequences should be either 0 (not seen) or 5 (seen five)
  244. # time for the five different speed conditions:
  245. verify(.[, by = .(subject), .(sum_seen = sum(seen))]$sum_seen %in% c(0, 5)) %>%
  246. # for each speed condition, the number should be 1 (seen) or 0 (not seen):
  247. verify(.[, by = .(subject, interval_time), .(sum_seen = sum(seen))]$sum_seen %in% c(0, 1))
  248. # return if sequence was seen (1) or not (0):
  249. seen <- as.numeric(sum(dt_events_seq_sub$seen) == 5)
  250. return(seen)
  251. }
  252. ```
  253. We verify that the function works properly:
  254. ```{r, message=FALSE}
  255. sequences = permn(1:5, sort = TRUE)
  256. # list of all possible stimuli:
  257. seq <- c("cat", "chair", "face", "house", "shoe")
  258. # select one example participant:
  259. sub = "sub-14"
  260. # determine if participant saw 15 combinations of the five stimuli:
  261. sum_seq = sum(unlist(lapply(X = sequences, function(x)
  262. did_you_see_that(seq = seq[x], subset(dt_events_seq, subject == sub)))))
  263. verify(data.table(sum_seq), sum_seq == 15)
  264. ```
  265. We calculate the regression slopes for every TR and assumed sequence in resting-state data:
  266. ```{r, echo=TRUE, eval=FALSE}
  267. # create an array with all the stimulus labels:
  268. stimuli <- c("cat", "chair", "face", "house", "shoe")
  269. # register the computation start time:
  270. start_time <- Sys.time()
  271. # calculate sd probability, slope for every permutation of a sequence:
  272. dt_pred_conc_slope_rest = dt_pred_conc %>%
  273. # filter for resting state analyses only:
  274. filter(stringr::str_detect(tITI, "rest")) %>%
  275. setDT(.) %>%
  276. # calculate sd probability, slope and if sequence was seen or not:
  277. .[, by = .(id, classification, tITI, trial_tITI, seq_tr), .(
  278. # calculate the number of events per TR and the mean position:
  279. num_events = .N,
  280. mean_position = mean(position),
  281. # calculate the standard deviation of the probabilities:
  282. sd_prob = sd(probability),
  283. # calculate the slope for each permuted sequence:
  284. slope = unlist(lapply(sequences, function(x) coef(lm(probability ~ x))[2] * (-1))),
  285. # determine if the permuted sequence was actually seen by the subject:
  286. seen = unlist(lapply(sequences, function(x)
  287. did_you_see_that(seq = stimuli[x], subset(dt_events_seq, subject == unique(id)))))
  288. )] %>%
  289. # add a counter for all sequential combinations:
  290. .[, by = .(id, classification, tITI, trial_tITI, seq_tr), "sequence" := seq(1,.N)]
  291. # stop the counter and print the calculation time:
  292. end_time = Sys.time()
  293. print(end_time - start_time)
  294. ```
  295. We calculate the regression slopes for sequence data:
  296. ```{r, echo=TRUE, eval=FALSE}
  297. dt_pred_conc_slope_seq = dt_pred_conc %>%
  298. # filter for sequence data only:
  299. filter(!stringr::str_detect(tITI, "rest")) %>%
  300. base::droplevels() %>%
  301. setDT(.) %>%
  302. # calculate the slope for every participant, speed and TR:
  303. .[, by = .(id, classification, tITI, trial_tITI, seq_tr), .(
  304. # calculate the number of events per TR and the mean position:
  305. num_events = .N,
  306. mean_position = mean(position),
  307. # calculate the standard deviation of the probabilities:
  308. sd_prob = sd(probability),
  309. # calculate the slope for each permuted sequence:
  310. slope = coef(lm(probability ~ position))[2] * (-1)
  311. )] %>%
  312. # add variables if sequence was seen or not and sequence counter:
  313. .[, "seen" := 1] %>%
  314. .[, "sequence" := 1]
  315. ```
  316. We combine the resting state and sequence trial slope data:
  317. ```{r, echo=TRUE, eval=FALSE}
  318. dt_pred_conc_slope = rbind(dt_pred_conc_slope_seq, dt_pred_conc_slope_rest) %>%
  319. # verify that there are 5 events per TR and their mean position is 5:
  320. verify(all(num_events == 5)) %>%
  321. verify(all(mean_position == 3)) %>%
  322. # sort the columns by classification approach, id, speed, trial and TR:
  323. setorder(classification, id, tITI, trial_tITI, seq_tr) %>%
  324. # add trial counter for every speed condition (and sequence):
  325. .[stringr::str_detect(tITI, "rest"),
  326. by = .(classification, id, tITI, sequence), ":=" (t = seq(1, .N))] %>%
  327. .[!stringr::str_detect(tITI, "rest"),
  328. by = .(classification, id, tITI), ":=" (t = seq(1, .N))] %>%
  329. # group by classification, id, speed and trial and calculate step sizes:
  330. .[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI),
  331. trial_start := trial_tITI - shift(trial_tITI)] %>%
  332. # create a variable that signifies the trial starts for sequence trials:
  333. transform(trial_start = ifelse(
  334. is.na(trial_start) & !stringr::str_detect(tITI, "rest"), 1, trial_start)) %>%
  335. # create new variable names for resting state data:
  336. .[tITI == "rest_run-pre_ses-01", tITI := "Rest Pre S1"] %>%
  337. .[tITI == "rest_run-post_ses-01", tITI := "Rest Post S1"] %>%
  338. .[tITI == "rest_run-pre_ses-02", tITI := "Rest Pre S2"] %>%
  339. .[tITI == "rest_run-post_ses-02", tITI := "Rest Post S2"] %>%
  340. # sort the factor levels of the speed conditions:
  341. transform(tITI = factor(tITI, levels = c(
  342. "32 ms", "2048 ms", "Rest Pre S1", "Rest Post S1",
  343. "Rest Pre S2", "Rest Post S2"))) %>%
  344. # rename the factor for seen (1) and unseen (0) sequences:
  345. .[, seen := ifelse(seen == 1, "More frequent", "Less frequent")] %>%
  346. setDT(.)
  347. ```
  348. We save the regression slope data during manual execution, save it to the repository and reload it for all following analyses:
  349. ```{r, echo=TRUE, eval=FALSE}
  350. save(dt_pred_conc_slope, file = file.path(path_root, "data", "tmp", "dt_pred_conc_slope.Rdata"))
  351. ```
  352. ```{r, echo=TRUE}
  353. load(file.path(path_root, "data", "tmp", "dt_pred_conc_slope.Rdata"))
  354. ```
  355. ### Mean slope coefficients
  356. #### Fixed event order
  357. We calculate the mean slope coefficients assuming a fixed ordering of sequence events for sequence trials:
  358. ```{r}
  359. # calculate the mean slope across participants for each time point:
  360. dt_pred_conc_slope_mean <- dt_pred_conc_slope %>%
  361. filter(sequence == 1) %>%
  362. setDT(.) %>%
  363. .[, by = .(classification, tITI, t, trial_start), .(
  364. num_subs = .N,
  365. mean_slope = mean(slope),
  366. sem_upper = mean(slope) + (sd(slope)/sqrt(.N)),
  367. sem_lower = mean(slope) - (sd(slope)/sqrt(.N))
  368. )] %>%
  369. verify(all(num_subs == 32)) %>%
  370. filter(classification == "ovr") %>%
  371. # sort the factor levels of the speed conditions:
  372. transform(tITI = factor(tITI, levels = c(
  373. "2048 ms", "32 ms", "Rest Pre S1", "Rest Post S1",
  374. "Rest Pre S2", "Rest Post S2"))) %>%
  375. setDT(.)
  376. ```
  377. #### Figure 5c
  378. We plot the mean regression coefficient separately for
  379. all sequence speed conditions and the pre-task resting state data.
  380. Trial boundaries (for sequence trials) are shown as gray vertical lines.
  381. ```{r, echo=FALSE}
  382. grays = colorRampPalette(c("lightgray", "black"))
  383. #colors = c(hcl.colors(5, "Cividis")[c(5,1)], "gray")
  384. colors = c(hcl.colors(5, "Cividis")[c(5,1)], grays(4))
  385. plot_trial_starts = dt_pred_conc_slope_mean %>%
  386. filter(trial_start == 1, !stringr::str_detect(tITI, "rest"))
  387. fig_prob_slope = ggplot(
  388. data = dt_pred_conc_slope_mean %>% filter(tITI != "Rest Post S1"),
  389. mapping = aes(
  390. x = as.numeric(t), y = as.numeric(mean_slope), color = as.factor(tITI),
  391. fill = as.factor(tITI))) +
  392. facet_wrap(~ as.factor(tITI), ncol = 1) +
  393. geom_vline(data = plot_trial_starts, aes(xintercept = t), linetype = "dashed", alpha = 0.1) +
  394. geom_hline(aes(yintercept = 0), linetype = "solid", color = "gray") +
  395. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  396. geom_line() +
  397. xlab("Time (TRs)") + ylab("Regression slope") +
  398. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, xlim = c(1, 200)) +
  399. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  400. scale_color_manual(values = colors, guide = FALSE) +
  401. scale_fill_manual(values = colors, guide = FALSE) +
  402. scale_y_continuous(labels = label_fill(seq(-0.1, 0.1, 0.05), mod = 2),
  403. breaks = seq(-0.1, 0.1, 0.05)) +
  404. theme(panel.border = element_blank(), axis.line = element_line()) +
  405. theme(axis.line = element_line(colour = "black"),
  406. panel.grid.major = element_blank(),
  407. panel.grid.minor = element_blank(),
  408. panel.border = element_blank(),
  409. panel.background = element_blank())
  410. fig_prob_slope
  411. ```
  412. #### Source Data File Fig. 5c
  413. ```{r}
  414. dt_pred_conc_slope_mean %>% filter(tITI != "Rest Post S1") %>%
  415. select(-num_subs, -classification) %>%
  416. write.csv(., file = file.path(path_sourcedata, "source_data_figure_5c.csv"),
  417. row.names = FALSE)
  418. ```
  419. We calculate the mean regression slopes and compare sequence and rest:
  420. ```{r}
  421. # calculate mean regression slopes for fast and slow sequence trials and rest:
  422. dt_pred_conc_slope_stat = dt_pred_conc_slope %>%
  423. filter(sequence == 1) %>%
  424. setDT(.) %>%
  425. .[, by = .(classification, id, tITI), .(
  426. num_trs = .N,
  427. mean_abs_slope = mean(abs(slope)),
  428. abs_mean_slope = abs(mean(slope)),
  429. mean_sd_prob = mean(sd_prob)
  430. )] %>% verify(all(num_trs == 180)) %>%
  431. filter(classification == "ovr") %>%
  432. transform(tITI = factor(tITI, levels = c(
  433. "32 ms", "2048 ms", "Rest Pre S1", "Rest Post S1",
  434. "Rest Pre S2", "Rest Post S2"))) %>%
  435. setDT(.)
  436. ```
  437. ```{r, results="hold"}
  438. # compare mean probabilities between resting state and fast sequence data:
  439. mean_sd_rest_pre = subset(dt_pred_conc_slope_stat, tITI == "Rest Pre S1")$mean_sd_prob
  440. mean_sd_rest_post = subset(dt_pred_conc_slope_stat, tITI == "Rest Post S1")$mean_sd_prob
  441. mean_sd_fast = subset(dt_pred_conc_slope_stat, tITI == "32 ms")$mean_sd_prob
  442. mean_sd_slow = subset(dt_pred_conc_slope_stat, tITI == "2048 ms")$mean_sd_prob
  443. fast_vs_rest_pre = t.test(x = mean_sd_rest_pre, y = mean_sd_fast, alternative = "two.sided", paired = TRUE)
  444. fast_vs_rest_post = t.test(x = mean_sd_rest_post, y = mean_sd_fast, alternative = "two.sided", paired = TRUE)
  445. fast_vs_slow = t.test(x = mean_sd_slow, y = mean_sd_fast, alternative = "two.sided", paired = TRUE)
  446. slow_vs_rest_pre = t.test(x = mean_sd_rest_pre, y = mean_sd_slow, alternative = "two.sided", paired = TRUE)
  447. rest_pre_vs_rest_post = t.test(x = mean_sd_rest_pre, y = mean_sd_rest_post, alternative = "two.sided", paired = TRUE)
  448. print(round(c(mean(mean_sd_rest_pre), mean(mean_sd_fast), mean(mean_sd_slow)), 2))
  449. round_pvalues(p.adjust(
  450. c(fast_vs_rest_pre$p.value, fast_vs_slow$p.value, slow_vs_rest_pre$p.value,
  451. rest_pre_vs_rest_post$p.value, fast_vs_rest_post$p.value), method = "fdr"))
  452. cohensd_rest = round((mean(mean_sd_rest_pre) - mean(mean_sd_fast)) / sd(mean_sd_rest_pre - mean_sd_fast), 2)
  453. cohensd_slow = round((mean(mean_sd_slow) - mean(mean_sd_fast)) / sd(mean_sd_slow - mean_sd_fast), 2)
  454. print(cohensd_rest); print(cohensd_slow)
  455. fast_vs_rest_pre; fast_vs_slow
  456. ```
  457. #### Figure 5a / 5b
  458. We plot the mean absolute regression slope for all speed conditions
  459. and pre-task resting state data:
  460. ```{r}
  461. plot_means = function(data, variable, ylabel, ylim){
  462. #colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
  463. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(4))
  464. fig_mean_beta = ggplot(
  465. data = data %>% filter(tITI != "Rest Post S1"),
  466. aes(
  467. y = get(variable), x = tITI, fill = tITI)) +
  468. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9)) +
  469. geom_point(position = position_jitter(width = 0.1, height = 0, seed = 2),
  470. alpha = 1, inherit.aes = TRUE, pch = 21,
  471. color = "white") +
  472. stat_summary(fun.data = mean_se, geom = "errorbar",
  473. position = position_dodge(0.9), width = 0, color = "black") +
  474. xlab("Data") + ylab(ylabel) +
  475. coord_capped_cart(left = "both", expand = TRUE, ylim = ylim) +
  476. scale_fill_manual(values = colors, guide = FALSE) +
  477. theme(axis.ticks.x = element_blank(), axis.line.x = element_blank(),
  478. axis.title.x = element_blank()) +
  479. theme(axis.text.x = element_text(angle = 45, hjust = 0.9)) +
  480. theme(panel.border = element_blank(), axis.line.y = element_line()) +
  481. theme(axis.line.y = element_line(colour = "black"),
  482. panel.grid.major = element_blank(),
  483. panel.grid.minor = element_blank(),
  484. panel.background = element_blank())
  485. }
  486. fig_mean_beta = plot_means(data = dt_pred_conc_slope_stat, variable = "abs_mean_slope",
  487. ylabel = "Absolute mean regression slope", ylim = c(0, 0.02))
  488. fig_mean_beta_abs = plot_means(data = dt_pred_conc_slope_stat, variable = "mean_abs_slope",
  489. ylabel = "Regression slope (abs)", ylim = c(0, 0.1))
  490. fig_sd_prob = plot_means(data = dt_pred_conc_slope_stat, variable = "mean_sd_prob",
  491. ylabel = "SD of probability", ylim = c(0, 0.4))
  492. fig_mean_beta; fig_mean_beta_abs; fig_sd_prob;
  493. ```
  494. #### Source Data File Fig. 5a / 5b
  495. ```{r}
  496. dt_pred_conc_slope_stat %>%
  497. select(-classification, -num_trs) %>%
  498. write.csv(., file = file.path(path_sourcedata, "source_data_figure_5a.csv"),
  499. row.names = FALSE)
  500. dt_pred_conc_slope_stat %>%
  501. select(-classification, -num_trs) %>%
  502. write.csv(., file = file.path(path_sourcedata, "source_data_figure_5b.csv"),
  503. row.names = FALSE)
  504. ```
  505. #### All possible event orders
  506. We calculate the mean slope for every time point (for every TR) depending on
  507. if a sequence was actually seen or not (relevant for resting state data).
  508. Note, that by this definition, all sequences on sequence trials were "seen".
  509. ```{r}
  510. # calculate the mean slope across participants for each time point:
  511. dt_pred_conc_slope_mean_seen <- dt_pred_conc_slope %>%
  512. # average slopes for each participant and each time point:
  513. .[, by = .(id, classification, tITI, t, trial_start, seen), .(
  514. num_seq = .N,
  515. mean_slope_sub = mean(slope)
  516. )] %>%
  517. # number of sequences will be 1 (sequence conditions), 15 (seen permuted
  518. # sequence condition), or 105 (not seen permuted sequence conditions):
  519. verify(num_seq %in% c(1, 15, 105)) %>%
  520. # calculate the mean slope across participants (including SEM):
  521. .[, by = .(classification, tITI, t, trial_start, seen), .(
  522. num_subs = .N,
  523. mean_slope = mean(mean_slope_sub),
  524. sem_upper = mean(mean_slope_sub) + (sd(mean_slope_sub)/sqrt(.N)),
  525. sem_lower = mean(mean_slope_sub) - (sd(mean_slope_sub)/sqrt(.N))
  526. )] %>%
  527. # verify that data was averaged across the correct number of participants:
  528. verify(all(num_subs == 32)) %>%
  529. filter(classification == "ovr") %>%
  530. # sort factor levels of the speed condition:
  531. transform(tITI = factor(tITI, levels = c(
  532. "2048 ms", "32 ms", "Rest Pre S1", "Rest Post S1",
  533. "Rest Pre S2", "Rest Post S2"))) %>%
  534. setDT(.)
  535. ```
  536. ```{r, echo = FALSE}
  537. grays = colorRampPalette(c("lightgray", "black"))
  538. #colors = c(hcl.colors(5, "Cividis")[c(5,1)], "gray")
  539. colors = c(hcl.colors(5, "Cividis")[c(5,1)], grays(4))
  540. plot_trial_starts = dt_pred_conc_slope_mean_seen %>%
  541. filter(trial_start == 1, !stringr::str_detect(tITI, "rest"))
  542. fig_prob_slope_seen = ggplot(data = dt_pred_conc_slope_mean, mapping = aes(
  543. x = as.numeric(t), y = as.numeric(mean_slope), color = as.factor(tITI),
  544. fill = as.factor(tITI))) +
  545. facet_grid(vars(as.factor(tITI)), vars(as.factor(seen))) +
  546. geom_vline(data = plot_trial_starts, aes(xintercept = t), linetype = "dashed", alpha = 0.1) +
  547. geom_hline(aes(yintercept = 0), linetype = "solid", color = "gray") +
  548. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  549. geom_line() +
  550. xlab("Time (TRs)") + ylab("Regression slope") +
  551. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, xlim = c(1, 200)) +
  552. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  553. scale_color_manual(values = colors, guide = FALSE) +
  554. scale_fill_manual(values = colors, guide = FALSE) +
  555. scale_y_continuous(labels = label_fill(seq(-0.1, 0.1, 0.05), mod = 2),
  556. breaks = seq(-0.1, 0.1, 0.05)) +
  557. theme(panel.border = element_blank(), axis.line = element_line()) +
  558. theme(axis.line = element_line(colour = "black"),
  559. panel.grid.major = element_blank(),
  560. panel.grid.minor = element_blank(),
  561. panel.border = element_blank(),
  562. panel.background = element_blank())
  563. fig_prob_slope_seen
  564. ```
  565. We calculate the mean absolute and non-absolute slopes for sequence and rest trials
  566. depending on whether the permuted sequence was actually seen or not:
  567. ```{r}
  568. # calculate mean regression slopes for fast and slow sequence trials and rest:
  569. dt_pred_conc_slope_stat_seen = dt_pred_conc_slope %>%
  570. .[, by = .(classification, id, tITI, seen), .(
  571. num_trs = .N,
  572. mean_slope = mean(slope),
  573. mean_abs_slope = mean(abs(slope)),
  574. abs_mean_slope = abs(mean(slope)),
  575. abs_pos_slope = mean(abs(slope[slope > 0])),
  576. abs_neg_slope = mean(abs(slope[slope < 0])),
  577. mean_sd_prob = mean(sd_prob)
  578. )] %>%
  579. # check if the average slope was calculated across the correct number of TRs
  580. # which should be 180 for sequence trials and 180 times number of sequences
  581. # not seen (105) and 180 times sequences seen (15) in permuted rest data:
  582. verify(all(num_trs %in% c(180, 180 * (120 - 15), 180 * 15))) %>%
  583. filter(classification == "ovr") %>%
  584. setDT(.)
  585. ```
  586. ```{r, echo=FALSE, include=FALSE, eval=FALSE, results="hold"}
  587. lme_sd_prob <- lmerTest::lmer(
  588. sd_prob ~ tITI + (tITI | id),
  589. data = dt_pred_conc_slope, control = lcctrl
  590. )
  591. rmarkdown::paged_table(anova(lme_sd_prob))
  592. emmeans_results = emmeans(lme_sd_prob, list(pairwise ~ tITI))
  593. emmeans_results
  594. ```
  595. ```{r, echo=FALSE, include=FALSE, eval=FALSE, results="hold"}
  596. lme_rest_sd = lmerTest::lmer(
  597. mean_sd_prob ~ tITI + (1 | id),
  598. data = dt_pred_conc_slope_stat, control = lcctrl)
  599. rmarkdown::paged_table(anova(lme_rest_sd))
  600. emmeans_results = emmeans(lme_rest_sd, list(pairwise ~ tITI))
  601. emmeans_pvalues = round_pvalues(summary(emmeans_results[[2]])$p.value)
  602. emmeans_results
  603. ```
  604. ```{r, echo=FALSE, include=FALSE, eval=FALSE, results="hold"}
  605. lme_rest_slope = lmerTest::lmer(
  606. mean_abs_slope ~ tITI + (1 | id),
  607. data = dt_pred_conc_slope_stat, control = lcctrl)
  608. rmarkdown::paged_table(anova(lme_rest_slope))
  609. emmeans_results = emmeans(lme_rest_slope, list(pairwise ~ tITI))
  610. emmeans_pvalues = round_pvalues(summary(emmeans_results[[2]])$p.value)
  611. emmeans_results
  612. ```
  613. ```{r}
  614. plot_means_seen = function(data, variable, ylabel, ylim){
  615. #colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
  616. grays = colorRampPalette(c("lightgray", "black"))
  617. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(4))
  618. fig_mean_beta = ggplot(data = data, aes(
  619. y = get(variable), x = tITI, fill = tITI)) +
  620. facet_wrap(~ seen) +
  621. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9)) +
  622. geom_point(position = position_jitter(width = 0.1, height = 0, seed = 2),
  623. alpha = 1, inherit.aes = TRUE, pch = 21,
  624. color = "white") +
  625. stat_summary(fun.data = mean_se, geom = "errorbar",
  626. position = position_dodge(0.9), width = 0, color = "black") +
  627. xlab("Data") + ylab(ylabel) +
  628. coord_capped_cart(left = "both", expand = TRUE, ylim = ylim) +
  629. scale_fill_manual(values = colors, guide = FALSE) +
  630. theme(axis.ticks.x = element_blank(), axis.line.x = element_blank(),
  631. axis.title.x = element_blank()) +
  632. theme(axis.text.x = element_text(angle = 45, hjust = 0.9)) +
  633. theme(panel.border = element_blank(), axis.line.y = element_line()) +
  634. theme(axis.line.y = element_line(colour = "black"),
  635. panel.grid.major = element_blank(),
  636. panel.grid.minor = element_blank(),
  637. panel.background = element_blank())
  638. }
  639. fig_mean_beta_seen = plot_means_seen(
  640. data = dt_pred_conc_slope_stat_seen, variable = "abs_mean_slope",
  641. ylabel = "Absolute mean regression slope", ylim = c(-0.01, 0.02))
  642. fig_mean_beta_abs_seen = plot_means_seen(
  643. data = dt_pred_conc_slope_stat_seen, variable = "mean_abs_slope",
  644. ylabel = "Mean absolute regression slope", ylim = c(0, 0.1))
  645. fig_sd_prob_seen = plot_means_seen(
  646. data = dt_pred_conc_slope_stat_seen, variable = "mean_sd_prob",
  647. ylabel = "SD of probability", ylim = c(0, 0.4))
  648. fig_mean_pos_abs_seen = plot_means_seen(
  649. data = dt_pred_conc_slope_stat_seen, variable = "abs_pos_slope",
  650. ylabel = "Mean absolute regression slope", ylim = c(0, 0.1))
  651. fig_mean_neg_abs_seen = plot_means_seen(
  652. data = dt_pred_conc_slope_stat_seen, variable = "abs_neg_slope",
  653. ylabel = "Mean absolute regression slope", ylim = c(0, 0.1))
  654. fig_mean_slope_seen = plot_means_seen(
  655. data = dt_pred_conc_slope_stat_seen, variable = "mean_slope",
  656. ylabel = "Absolute mean regression slope", ylim = c(-0.01, 0.02))
  657. fig_mean_beta_seen; fig_mean_beta_abs_seen; fig_sd_prob_seen;
  658. fig_mean_pos_abs_seen; fig_mean_neg_abs_seen; fig_mean_slope_seen
  659. ```
  660. ### Frequency spectrum analyses
  661. #### Fixed event order
  662. We create a data table with frequency expectations:
  663. ```{r}
  664. fitfreq = 1 / 5.26
  665. stim_dur = 0.1
  666. num_seq_stim = 5
  667. # calculate the delta
  668. delta_freq_fast = fitfreq / (1 + fitfreq * (
  669. num_seq_stim * stim_dur + (num_seq_stim - 1) * 0.032))
  670. delta_freq_slow = fitfreq / (1 + fitfreq * (
  671. num_seq_stim * stim_dur + (num_seq_stim - 1) * 2.048))
  672. delta_freq_rest = 0.01
  673. # create a data table with the expected frequencies:
  674. frequency_expectation = data.table(xintercept = c(
  675. delta_freq_fast, delta_freq_slow, rep(delta_freq_rest, 4)))
  676. ```
  677. We calculate the frequency spectra based on the time courses of regression slope coefficients:
  678. ```{r}
  679. library(lomb)
  680. # create a time vector with random gaps:
  681. time_concat = as.vector(sapply(0:14, FUN = function(x) seq(0, 1*11, by = 1) + x*100 + round(runif(1)*50)))
  682. # get the frequency spectrum of fast and slow trials:
  683. dt_pred_rest_lsp = dt_pred_conc_slope %>%
  684. filter(sequence == 1) %>%
  685. setDT(.) %>%
  686. .[, by = .(classification, id, tITI), {
  687. frequency_spectrum = lsp(x = slope, times = time_concat, from = 0, to = 0.3, plot = FALSE)
  688. list(num_trs = .N, freq_bins = frequency_spectrum$scanned,
  689. filter_width = round(0.01/mean(diff(frequency_spectrum$scanned))),
  690. power = frequency_spectrum$power
  691. )}] %>% verify(all(num_trs == 180)) %>% verify(length(unique(filter_width)) == 1) %>%
  692. # smooth the frequency spectra and frequency bins:
  693. .[, by = .(classification, id, tITI), ":=" (
  694. power_smooth = stats::filter(power, rep(1/unique(filter_width), unique(filter_width))),
  695. freq_bins_smooth = stats::filter(freq_bins, rep(1/unique(filter_width), unique(filter_width)))
  696. )]
  697. ```
  698. ```{r}
  699. # calculate the mean frequency profile across participants:
  700. dt_pred_rest_lsp_mean = dt_pred_rest_lsp %>%
  701. # filter out NA frequency spectrum:
  702. filter(!is.na(power_smooth)) %>%
  703. setDT(.) %>%
  704. # calculate the mean smoothed frequency spectrum across all frequency bins:
  705. .[, by = .(classification, tITI, freq_bins_smooth), .(
  706. num_subs = .N,
  707. mean_spec = mean(power_smooth),
  708. sem_upper = mean(power_smooth) + (sd(power_smooth)/sqrt(.N)),
  709. sem_lower = mean(power_smooth) - (sd(power_smooth)/sqrt(.N))
  710. )] %>%
  711. verify(all(num_subs == 32))
  712. ```
  713. #### Figure 5d
  714. ```{r}
  715. #colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
  716. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(1), "black", grays(2))
  717. fig_freq_lsp = ggplot(
  718. data = dt_pred_rest_lsp_mean %>% filter(tITI != "Rest Post S1"),
  719. aes(
  720. color = tITI, y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
  721. geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
  722. linetype = "dashed", color = colors) +
  723. geom_line() +
  724. geom_ribbon(aes(fill = tITI, ymin = sem_lower, ymax = sem_upper),
  725. alpha = 0.3, color = NA) +
  726. geom_text(data = frequency_expectation, aes(
  727. x = xintercept + 0.02, y = 0.1, label = paste(round(xintercept, 2))),
  728. color = colors) +
  729. xlab("Frequency") + ylab("Power") +
  730. scale_color_manual(name = "", values = colors) +
  731. scale_fill_manual(name = "", values = colors) +
  732. theme(legend.position = "top", legend.direction = "horizontal",
  733. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  734. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  735. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  736. ylim = c(0, 3), xlim = c(0, 0.3)) +
  737. guides(color = guide_legend(nrow = 1)) +
  738. scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
  739. breaks = seq(0, 0.3, by = 0.05)) +
  740. theme(panel.border = element_blank(), axis.line = element_line()) +
  741. theme(axis.line = element_line(colour = "black"),
  742. panel.grid.major = element_blank(),
  743. panel.grid.minor = element_blank(),
  744. panel.border = element_blank(),
  745. panel.background = element_blank())
  746. fig_freq_lsp
  747. ```
  748. #### Source Data File Fig. 5d
  749. ```{r}
  750. dt_pred_rest_lsp_mean %>% filter(tITI != "Rest Post S1") %>%
  751. select(- classification, -num_subs) %>%
  752. write.csv(., file = file.path(path_sourcedata, "source_data_figure_5d.csv"),
  753. row.names = FALSE)
  754. ```
  755. #### All possible event orders
  756. We calculate the frequency spectrum for all permuted resting state sequences and
  757. sequence trials:
  758. ```{r}
  759. # create a time vector with random gaps:
  760. time_concat = as.vector(sapply(0:14, FUN = function(x) seq(0, 1*11, by = 1) + x*100 + round(runif(1)*50)))
  761. # get the frequency spectrum of fast and slow trials:
  762. dt_pred_rest_lsp_seen = dt_pred_conc_slope %>%
  763. # calculate the frequency spectrum for each permuted sequence and participant:
  764. .[, by = .(classification, id, tITI, sequence, seen), {
  765. frequency_spectrum = lomb::lsp(
  766. x = slope, times = time_concat, from = 0, to = 0.3, plot = FALSE)
  767. list(num_trs = .N, freq_bins = frequency_spectrum$scanned,
  768. filter_width = round(0.01/mean(diff(frequency_spectrum$scanned))),
  769. power = frequency_spectrum$power
  770. )}] %>%
  771. # the number of TRs for each stretch of activation patterns should be 180:
  772. verify(all(num_trs == 180)) %>%
  773. # there should only be one filtering width per sequence:
  774. verify(length(unique(filter_width)) == 1) %>%
  775. # there should be 120 sequence permutations in the resting condition:
  776. verify(.[, by = .(classification, id, tITI), .(
  777. num_seq = length(unique(sequence)))]$num_seq %in% c(1, 120)) %>%
  778. # smooth the frequency spectra and frequency bins:
  779. .[, by = .(classification, id, tITI, sequence, seen), ":=" (
  780. power_smooth = stats::filter(power, rep(1/unique(filter_width), unique(filter_width))),
  781. freq_bins_smooth = stats::filter(freq_bins, rep(1/unique(filter_width), unique(filter_width)))
  782. )]
  783. ```
  784. We calculate the mean frequency spectra for each participant, classification approach,
  785. speed condition, smoothed frequency bin and if the (permuted) rest sequence
  786. was actually seen during the task or not:
  787. ```{r}
  788. # calculate the mean frequency profile across participants:
  789. dt_pred_rest_lsp_mean_seen = dt_pred_rest_lsp_seen %>%
  790. # filter out NA frequency spectrum:
  791. filter(!is.na(power_smooth)) %>%
  792. setDT(.) %>%
  793. # calculate the mean smoothed frequency spectrum across all frequency bins and sequences:
  794. .[, by = .(classification, id, tITI, freq_bins_smooth), .(
  795. num_seqs = .N,
  796. power_smooth = mean(power_smooth)
  797. )] %>%
  798. # the number of sequences should be 1 for sequence trials, 15 for seen
  799. # permuted rest repetitions and 105 for unseen sequence repetitions:
  800. verify(num_seqs %in% c(1, 120)) %>%
  801. # calculate the mean smoothed frequency spectrum across all frequency bins:
  802. .[, by = .(classification, tITI, freq_bins_smooth), .(
  803. num_subs = .N,
  804. mean_spec = mean(power_smooth),
  805. sem_upper = mean(power_smooth) + (sd(power_smooth)/sqrt(.N)),
  806. sem_lower = mean(power_smooth) - (sd(power_smooth)/sqrt(.N))
  807. )] %>%
  808. # verify if data was averaged across the expected number of participants:
  809. verify(all(num_subs == 32))
  810. ```
  811. ```{r, echo=TRUE}
  812. calc_delta <- function(speed_s) {
  813. # input speed_s: sequence speed (ISI), in seconds
  814. fitfreq = 1 / 5.26
  815. stim_dur = 0.1
  816. num_seq_stim = 5
  817. delta = fitfreq / (1 + fitfreq * (num_seq_stim * stim_dur + (num_seq_stim - 1) * speed_s))
  818. return(delta)
  819. }
  820. # create a data table with the expected frequencies:
  821. frequency_expectation = data.table(xintercept = c(
  822. calc_delta(0.032), calc_delta(2.048), 0.01),
  823. tITI = c("32 ms", "2048 ms", "Rest Pre S1"))
  824. ```
  825. #### Figure 6a
  826. ```{r, echo=FALSE}
  827. colors_all = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
  828. colors_rest = c("black", "darkred")
  829. plot_data <- dt_pred_rest_lsp_mean_seen %>%
  830. filter(tITI %in% c("Rest Pre S1", "Rest Post S1"))
  831. fig_freq_post <- ggplot(data = plot_data, aes(
  832. color = tITI, y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
  833. geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
  834. linetype = "dashed", color = colors_all) +
  835. geom_line() +
  836. geom_ribbon(aes(fill = tITI, ymin = sem_lower, ymax = sem_upper),
  837. alpha = 0.3, color = NA) +
  838. geom_text(data = frequency_expectation, aes(
  839. x = xintercept + 0.03, y = 0.1, label = paste(round(xintercept, 2))),
  840. color = colors_all) +
  841. xlab("Frequency") + ylab("Power") +
  842. scale_color_manual(name = "", values = colors_rest) +
  843. scale_fill_manual(name = "", values = colors_rest) +
  844. theme(legend.position = "top", legend.direction = "horizontal",
  845. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  846. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  847. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  848. ylim = c(0, 3), xlim = c(0, 0.3)) +
  849. guides(color = guide_legend(nrow = 1)) +
  850. scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
  851. breaks = seq(0, 0.3, by = 0.05)) +
  852. theme(panel.border = element_blank(), axis.line = element_line()) +
  853. theme(axis.line = element_line(colour = "black"),
  854. panel.grid.major = element_blank(),
  855. panel.grid.minor = element_blank(),
  856. panel.border = element_blank(),
  857. panel.background = element_blank())
  858. fig_freq_post
  859. ```
  860. #### Source Data File Fig. 6a
  861. ```{r}
  862. dt_pred_rest_lsp_mean_seen %>%
  863. filter(tITI %in% c("Rest Pre S1", "Rest Post S1")) %>%
  864. select(-classification, -num_subs) %>%
  865. write.csv(., file = file.path(path_sourcedata, "source_data_figure_6a.csv"),
  866. row.names = FALSE)
  867. ```
  868. We calculate the relative power (difference between pre- and post task resting state data) across the entire frequency spectrum:
  869. ```{r}
  870. # calculate the mean frequency profile across participants:
  871. dt_pred_rest_lsp_mean_rel_seen = dt_pred_rest_lsp_seen %>%
  872. filter(tITI %in% c("Rest Pre S1", "Rest Post S1")) %>%
  873. transform(tITI = ifelse(tITI == "Rest Pre S1", "rest_pre_s1", "rest_post_s1")) %>%
  874. # filter out NA frequency spectrum:
  875. filter(!is.na(power_smooth)) %>%
  876. setDT(.) %>%
  877. # calculate the mean smoothed frequency spectrum across all frequency bins and sequences:
  878. .[, by = .(classification, id, tITI, freq_bins_smooth), .(
  879. num_seqs = .N,
  880. power_smooth = mean(power_smooth)
  881. )] %>%
  882. # the number of sequences should be 1 for sequence trials, 15 for seen
  883. # permuted rest repetitions and 105 for unseen sequence repetitions:
  884. verify(num_seqs %in% c(1, 120)) %>%
  885. # transform resting state data from long to wide to calculate relative power:
  886. spread(data = ., key = tITI, value = power_smooth) %>%
  887. mutate(rel_power = rest_post_s1 - rest_pre_s1) %>%
  888. setDT(.) %>%
  889. # calculate the mean smoothed frequency spectrum across all frequency bins:
  890. .[, by = .(classification, freq_bins_smooth), .(
  891. num_subs = .N,
  892. mean_spec = mean(rel_power),
  893. sem_upper = mean(rel_power) + (sd(rel_power)/sqrt(.N)),
  894. sem_lower = mean(rel_power) - (sd(rel_power)/sqrt(.N))
  895. )] %>%
  896. # verify if data was averaged across the expected number of participants:
  897. verify(all(num_subs == 32))
  898. ```
  899. We determine in which frequency range the peak in difference between pre- and post-task rest is:
  900. ```{r}
  901. increase_slow = round(dt_pred_rest_lsp_mean_rel_seen$freq_bins_smooth[which.max(dt_pred_rest_lsp_mean_rel_seen$mean_spec)], 2)
  902. increase_fast = round(dt_pred_rest_lsp_mean_rel_seen$freq_bins_smooth[dt_pred_rest_lsp_mean_rel_seen$freq_bins_smooth > 0.07][which.max(dt_pred_rest_lsp_mean_rel_seen[dt_pred_rest_lsp_mean_rel_seen$freq_bins_smooth > 0.07]$mean_spec)], 2)
  903. increase_slow; increase_fast;
  904. ```
  905. #### Figure 6b
  906. ```{r, echo=FALSE}
  907. colors_all = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
  908. colors_rest = c("black", "darkred")
  909. fig_freq_post_rel <- ggplot(data = dt_pred_rest_lsp_mean_rel_seen, aes(
  910. y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
  911. geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
  912. linetype = "dashed", color = colors_all) +
  913. geom_hline(aes(yintercept = 0)) +
  914. geom_line() +
  915. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper),
  916. alpha = 0.3, color = NA) +
  917. geom_text(data = frequency_expectation, aes(
  918. x = xintercept + 0.03, y = -0.25, label = paste(round(xintercept, 2))),
  919. color = colors_all) +
  920. xlab("Frequency") + ylab("Relative power (Pre vs. Post)") +
  921. scale_color_manual(name = "", values = colors_rest) +
  922. scale_fill_manual(name = "", values = colors_rest) +
  923. theme(legend.position = "top", legend.direction = "horizontal",
  924. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  925. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  926. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  927. xlim = c(0, 0.3)) +
  928. guides(color = guide_legend(nrow = 1)) +
  929. scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
  930. breaks = seq(0, 0.3, by = 0.05)) +
  931. theme(panel.border = element_blank(), axis.line = element_line()) +
  932. theme(axis.line = element_line(colour = "black"),
  933. panel.grid.major = element_blank(),
  934. panel.grid.minor = element_blank(),
  935. panel.border = element_blank(),
  936. panel.background = element_blank())
  937. fig_freq_post_rel
  938. ```
  939. #### Source Data File Fig. 6b
  940. ```{r}
  941. dt_pred_rest_lsp_mean_rel_seen %>%
  942. select(-classification, -num_subs) %>%
  943. write.csv(., file = file.path(path_sourcedata, "source_data_figure_6b.csv"),
  944. row.names = FALSE)
  945. ```
  946. #### Frequency spectra of frequent vs. less frequent sequences
  947. We calculate the mean frequency spectra depending on whether the assumed sequences occured more frequency or less frequently during the task:
  948. ```{r}
  949. # calculate the mean frequency profile across participants:
  950. dt_pred_rest_lsp_mean_seen = dt_pred_rest_lsp_seen %>%
  951. # filter out NA frequency spectrum:
  952. filter(!is.na(power_smooth)) %>%
  953. setDT(.) %>%
  954. # calculate the mean smoothed frequency spectrum across all frequency bins and sequences:
  955. .[, by = .(classification, id, tITI, freq_bins_smooth, seen), .(
  956. num_seqs = .N,
  957. power_smooth = mean(power_smooth)
  958. )] %>%
  959. # the number of sequences should be 1 for sequence trials, 15 for seen
  960. # permuted rest repetitions and 105 for unseen sequence repetitions:
  961. verify(num_seqs %in% c(1, 15, 105)) %>%
  962. # calculate the mean smoothed frequency spectrum across all frequency bins:
  963. .[, by = .(classification, tITI, freq_bins_smooth, seen), .(
  964. num_subs = .N,
  965. mean_spec = mean(power_smooth),
  966. sem_upper = mean(power_smooth) + (sd(power_smooth)/sqrt(.N)),
  967. sem_lower = mean(power_smooth) - (sd(power_smooth)/sqrt(.N))
  968. )] %>%
  969. # verify if data was averaged across the expected number of participants:
  970. verify(all(num_subs == 32))
  971. ```
  972. ```{r, echo=FALSE}
  973. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(1), "black")
  974. fig_freq_lsp_seen = ggplot(data = dt_pred_rest_lsp_mean_seen, aes(
  975. color = tITI, y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
  976. facet_wrap(~ seen) +
  977. geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
  978. linetype = "dashed", color = rep(colors[1:3], 2)) +
  979. geom_line() +
  980. geom_ribbon(aes(fill = tITI, ymin = sem_lower, ymax = sem_upper),
  981. alpha = 0.3, color = NA) +
  982. geom_text(data = frequency_expectation, aes(
  983. x = xintercept + 0.02, y = 0.1, label = paste(round(xintercept, 2))),
  984. color = rep(colors[1:3], 2)) +
  985. xlab("Frequency") + ylab("Power") +
  986. scale_color_manual(name = "", values = colors) +
  987. scale_fill_manual(name = "", values = colors) +
  988. theme(legend.position = "top", legend.direction = "horizontal",
  989. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  990. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  991. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  992. ylim = c(0, 3), xlim = c(0, 0.3)) +
  993. guides(color = guide_legend(nrow = 1)) +
  994. scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
  995. breaks = seq(0, 0.3, by = 0.05)) +
  996. theme(panel.border = element_blank(), axis.line = element_line()) +
  997. theme(axis.line = element_line(colour = "black"),
  998. panel.grid.major = element_blank(),
  999. panel.grid.minor = element_blank(),
  1000. panel.border = element_blank(),
  1001. panel.background = element_blank())
  1002. fig_freq_lsp_seen
  1003. ```
  1004. ### Relative frequency spectra (fixed order)
  1005. We compare the power of the expected frequencies of resting state, fast, and slow
  1006. sequence trial data in data from resting state, fast, and slow sequence trial data.
  1007. ```{r}
  1008. fitfreq = 1 / 5.26
  1009. stim_dur = 0.1
  1010. num_seq_stim = 5
  1011. # calculate the delta
  1012. delta_freq_fast = fitfreq / (1 + fitfreq * (num_seq_stim * stim_dur + (num_seq_stim - 1) * 0.032))
  1013. delta_freq_slow = fitfreq / (1 + fitfreq * (num_seq_stim * stim_dur + (num_seq_stim - 1) * 2.048))
  1014. delta_freq_rest = 0.01
  1015. # create a data table with the expected frequencies:
  1016. frequency_expectation = data.table(xintercept = c(
  1017. delta_freq_fast, delta_freq_slow, rep(delta_freq_rest, 4)))
  1018. ```
  1019. ```{r}
  1020. dt_pred_rest_lsp_exp = dt_pred_rest_lsp %>%
  1021. .[, by = .(classification, id, tITI), .(
  1022. power_index_fast = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_fast))],
  1023. power_index_slow = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_slow))],
  1024. power_index_rest = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_rest))]
  1025. )] %>%
  1026. # melt all index variables into one column:
  1027. gather(grep("power", names(.), fixed = TRUE), key = "index", value = "power") %>% setDT(.) %>%
  1028. .[grepl("index_fast", index), label := paste0("Fast (", round(delta_freq_fast, 2), ")")] %>%
  1029. .[grepl("index_slow", index), label := paste0("Slow (", round(delta_freq_slow, 2), ")")] %>%
  1030. .[grepl("index_rest", index), label := paste0("Rest (", round(delta_freq_rest, 2), ")")] %>%
  1031. setorder(., classification, id, tITI) %>%
  1032. filter(classification == "ovr") %>%
  1033. setDT(.)
  1034. ```
  1035. #### Figure 5e
  1036. ```{r, echo=TRUE}
  1037. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray", "black")
  1038. fig_freq_exp = ggplot(
  1039. data = dt_pred_rest_lsp_exp %>% filter(tITI != "Rest Post S1"),
  1040. aes(y = power, x = fct_rev(label), fill = tITI)) +
  1041. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
  1042. width = 0.8) +
  1043. geom_point(position = position_jitterdodge(
  1044. jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
  1045. alpha = 1, inherit.aes = TRUE, pch = 21,
  1046. color = "white") +
  1047. stat_summary(fun.data = "mean_se", geom = "errorbar",
  1048. position = position_dodge(0.9), width = 0, color = "black") +
  1049. xlab("Predicted frequency") + ylab("Power") +
  1050. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0, 5)) +
  1051. scale_fill_manual(values = colors, name = "") +
  1052. theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
  1053. theme(legend.position = "top", legend.direction = "horizontal",
  1054. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1055. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1056. theme(panel.border = element_blank(), axis.line = element_line()) +
  1057. theme(axis.line = element_line(colour = "black"),
  1058. panel.grid.major = element_blank(),
  1059. panel.grid.minor = element_blank(),
  1060. panel.border = element_blank(),
  1061. panel.background = element_blank())
  1062. fig_freq_exp
  1063. ```
  1064. ```{r, results="hold"}
  1065. lme_power <- lmerTest::lmer(
  1066. power ~ tITI + (1 | id),
  1067. data = dt_pred_rest_lsp_exp %>%
  1068. filter(tITI != "Rest Post S1" & index == "power_index_fast"),
  1069. control = lcctrl)
  1070. summary(lme_power)
  1071. anova(lme_power)
  1072. emmeans_results = emmeans(lme_power, list(pairwise ~ tITI))
  1073. emmeans_results
  1074. ```
  1075. ```{r}
  1076. # get the power spectrum of the expected fast frequency in fast data:
  1077. power_fast = subset(dt_pred_rest_lsp_exp, index == "power_index_fast" & tITI == "32 ms")$power
  1078. # get the power spectrum of the expected fast frequency in slow data:
  1079. power_slow = subset(dt_pred_rest_lsp_exp, index == "power_index_fast" & tITI == "2048 ms")$power
  1080. # get the power spectrum of the expected fast frequency in rest data:
  1081. power_rest_pre = subset(dt_pred_rest_lsp_exp, index == "power_index_fast" & tITI == "Rest Pre S1")$power
  1082. # compare fast vs. slow and fast vs. rest:
  1083. ttest_power_fastvsslow = t.test(power_fast, power_slow, paired = TRUE)
  1084. ttest_power_fastvsrestpre = t.test(power_fast, power_rest_pre, paired = TRUE)
  1085. ttest_power_slowvsrestpre = t.test(power_slow, power_rest_pre, paired = TRUE)
  1086. round_pvalues(p.adjust(c(ttest_power_fastvsslow$p.value,
  1087. ttest_power_fastvsrestpre$p.value,
  1088. ttest_power_slowvsrestpre$p.value), method = "fdr"))
  1089. ttest_power_fastvsslow; ttest_power_fastvsrestpre; ttest_power_slowvsrestpre
  1090. ```
  1091. #### Source Data File Fig. 5e
  1092. ```{r}
  1093. dt_pred_rest_lsp_exp %>% filter(tITI != "Rest Post S1") %>%
  1094. select(-classification, -index) %>%
  1095. write.csv(., file = file.path(path_sourcedata, "source_data_figure_5e.csv"),
  1096. row.names = FALSE)
  1097. ```
  1098. ### Relative frequency spectra (all orders)
  1099. ```{r}
  1100. dt_pred_rest_lsp_exp_seen = dt_pred_rest_lsp_seen %>%
  1101. .[, by = .(classification, id, tITI, sequence, seen), .(
  1102. power_index_fast = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_fast))],
  1103. power_index_slow = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_slow))],
  1104. power_index_rest = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_rest))]
  1105. )] %>%
  1106. # melt all index variables into one column:
  1107. gather(grep("power", names(.), fixed = TRUE), key = "index", value = "power") %>%
  1108. setDT(.) %>%
  1109. .[grepl("index_fast", index), label := paste0("Fast (", round(delta_freq_fast, 2), ")")] %>%
  1110. .[grepl("index_slow", index), label := paste0("Slow (", round(delta_freq_slow, 2), ")")] %>%
  1111. .[grepl("index_rest", index), label := paste0("Rest (", round(delta_freq_rest, 2), ")")] %>%
  1112. # average across sequences for seen and unseen sequences:
  1113. .[, by = .(classification, id, tITI, seen, index, label), .(
  1114. num_seqs = .N,
  1115. power = mean(power)
  1116. )] %>%
  1117. # check if the number of sequences matches for sequence (1), seen (15) and
  1118. # unseen (105) sequences in permuted resting state sequences:
  1119. verify(num_seqs %in% c(1, 15, 105)) %>%
  1120. setorder(., classification, id, tITI) %>%
  1121. filter(classification == "ovr") %>%
  1122. setDT(.)
  1123. ```
  1124. ```{r}
  1125. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray", "black")
  1126. fig_freq_exp_seen = ggplot(data = dt_pred_rest_lsp_exp_seen, aes(
  1127. y = power, x = fct_rev(label), fill = tITI)) +
  1128. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
  1129. width = 0.8) +
  1130. geom_point(position = position_jitterdodge(
  1131. jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
  1132. alpha = 1, inherit.aes = TRUE, pch = 21,
  1133. color = "white") +
  1134. facet_wrap(~ seen) +
  1135. stat_summary(fun.data = "mean_se", geom = "errorbar",
  1136. position = position_dodge(0.9), width = 0, color = "black") +
  1137. xlab("Predicted frequency") + ylab("Power") +
  1138. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0, 5)) +
  1139. scale_fill_manual(values = colors, name = "") +
  1140. theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
  1141. theme(legend.position = "top", legend.direction = "horizontal",
  1142. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1143. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1144. theme(panel.border = element_blank(), axis.line = element_line()) +
  1145. theme(axis.line = element_line(colour = "black"),
  1146. panel.grid.major = element_blank(),
  1147. panel.grid.minor = element_blank(),
  1148. panel.border = element_blank(),
  1149. panel.background = element_blank())
  1150. fig_freq_exp_seen
  1151. ```
  1152. We examine the effect of resting-state session (pre- vs. post-task rest) and sequence frequency (less frequent vs. more frequent) on the power in the high frequency range (predicted by our model) indicative of fast sequential reactivation:
  1153. ```{r, results="hold"}
  1154. lme_power <- lmerTest::lmer(
  1155. power ~ tITI * seen + (tITI + seen | id),
  1156. data = subset(dt_pred_rest_lsp_exp_seen, index == "power_index_fast" &
  1157. stringr::str_detect(tITI, "Rest")),
  1158. control = lcctrl)
  1159. summary(lme_power)
  1160. anova(lme_power)
  1161. emmeans_results = emmeans(lme_power, list(pairwise ~ tITI | seen))
  1162. emmeans_results
  1163. ```
  1164. ```{r, echo=TRUE, results="hold"}
  1165. lme_power <- lmerTest::lmer(
  1166. power ~ tITI + (seen | id),
  1167. data = subset(dt_pred_rest_lsp_exp_seen, index == "power_index_fast" &
  1168. stringr::str_detect(tITI, "Rest")),
  1169. control = lcctrl)
  1170. summary(lme_power)
  1171. anova(lme_power)
  1172. emmeans_results = emmeans(lme_power, list(pairwise ~ tITI))
  1173. emmeans_results
  1174. ```
  1175. ```{r, echo=TRUE, results="hold"}
  1176. lme_power <- lmerTest::lmer(
  1177. power ~ seen + (tITI | id),
  1178. data = subset(dt_pred_rest_lsp_exp_seen, index == "power_index_fast" &
  1179. stringr::str_detect(tITI, "Rest")), control = lcctrl)
  1180. summary(lme_power)
  1181. anova(lme_power)
  1182. emmeans_results = emmeans(lme_power, list(pairwise ~ seen))
  1183. emmeans_results
  1184. ```
  1185. We compare the power in the fast frequency spectrum between pre- and post-task resting-state data:
  1186. ```{r}
  1187. dt_pred_rest_lsp_exp_seen %>%
  1188. # select resting state data and power in the fast frequency range:
  1189. filter(stringr::str_detect(tITI, "Rest") & index == "power_index_fast") %>%
  1190. group_by(index, seen) %>%
  1191. do(broom::tidy(t.test(formula = power ~ tITI, data = ., paired = TRUE))) %>%
  1192. setDT(.) %>%
  1193. .[, "p.value_adjust" := p.adjust(p.value, method = "fdr")] %>%
  1194. .[, "p.value_round" := round_pvalues(p.value)] %>%
  1195. .[, "p.value_adjust_round" := round_pvalues(p.value_adjust)] %>%
  1196. # check if the degrees-of-freedom are n - 1:
  1197. verify(parameter == 32 - 1) %>%
  1198. rmarkdown::paged_table(.)
  1199. ```
  1200. #### Figure 6c
  1201. ```{r}
  1202. plot_data <- dt_pred_rest_lsp_exp_seen %>%
  1203. filter(tITI %in% c("Rest Pre S1", "Rest Post S1")) %>%
  1204. filter(index == "power_index_fast")
  1205. colors = c(brewer.pal(n = 8, name = "RdBu")[6], hcl.colors(5, "Cividis")[c(1)])
  1206. fig_freq_exp_post = ggplot(data = plot_data, aes(
  1207. y = power, x = tITI, fill = seen)) +
  1208. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
  1209. width = 0.8) +
  1210. geom_point(position = position_jitterdodge(
  1211. jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
  1212. alpha = 1, inherit.aes = TRUE, pch = 21,
  1213. color = "white") +
  1214. stat_summary(fun.data = "mean_se", geom = "errorbar",
  1215. position = position_dodge(0.9), width = 0, color = "black") +
  1216. xlab("Resting-state data") + ylab("Power at fast frequency (0.17)") +
  1217. #ggtitle("Predicted fast frequency (0.17 Hz)") +
  1218. theme(plot.title = element_text(hjust = 1)) +
  1219. coord_capped_cart(left = "both", bottom = "both", expand = TRUE) +
  1220. scale_fill_manual(values = colors, name = "Sequences") +
  1221. theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
  1222. theme(legend.position = "top", legend.direction = "horizontal",
  1223. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1224. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1225. guides(fill = guide_legend(title.position = "top", title.hjust = 0.5)) +
  1226. theme(panel.border = element_blank(), axis.line = element_line()) +
  1227. theme(axis.line = element_line(colour = "black"),
  1228. panel.grid.major = element_blank(),
  1229. panel.grid.minor = element_blank(),
  1230. panel.border = element_blank(),
  1231. panel.background = element_blank())
  1232. #theme(legend.title = element_text(size = 10),
  1233. # legend.text = element_text(size = 10))
  1234. fig_freq_exp_post
  1235. ```
  1236. #### Source Data File Fig. 6c
  1237. ```{r}
  1238. dt_pred_rest_lsp_exp_seen %>%
  1239. filter(tITI %in% c("Rest Pre S1", "Rest Post S1")) %>%
  1240. filter(index == "power_index_fast") %>%
  1241. select(-classification, -index, -label, -num_seqs) %>%
  1242. write.csv(., file = file.path(path_sourcedata, "source_data_figure_6c.csv"),
  1243. row.names = FALSE)
  1244. ```
  1245. ### Figure 6
  1246. ```{r}
  1247. plot_grid(fig_freq_post, fig_freq_post_rel, fig_freq_exp_post, nrow = 1, ncol = 3,
  1248. labels = c("a", "b", "c"), rel_widths = c(3, 3, 3))
  1249. ```
  1250. ```{r, echo=TRUE, eval=FALSE}
  1251. ggsave(filename = "highspeed_plot_decoding_resting_post.pdf",
  1252. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1253. dpi = "retina", width = 9, height = 3)
  1254. ```
  1255. ```{r}
  1256. ggsave(filename = "wittkuhn_schuck_figure_6.pdf",
  1257. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1258. dpi = "retina", width = 9, height = 3)
  1259. ```
  1260. ### Inserting sequence trials into rest
  1261. We create a function to sample sequence trials:
  1262. ```{r}
  1263. subseqsample = function(sequence, seq_size = 12, seq_num = 15) {
  1264. # initialize an empty array for the resulting indices:
  1265. index = rep(NA, length(sequence))
  1266. # calculate how much space would be allocated to gaps:
  1267. total_gaps = length(sequence) - seq_size * seq_num
  1268. # calculate random gaps
  1269. random_gaps = diff(sort(c(0, sample(1:(total_gaps + seq_num), seq_num), total_gaps + seq_num + 1)))
  1270. # we subtract 1 for each element of seq_num we added above:
  1271. random_gaps = random_gaps - 1
  1272. # make sure that the sum of random gaps matches the total gaps:
  1273. if (sum(random_gaps) != total_gaps) {warning("sum of gaps does not match!")}
  1274. # generate a random order of subsequences:
  1275. seq_order = sample(1:seq_num, seq_num)
  1276. # initialize the first starting point of potential sequences:
  1277. cstart = 0
  1278. # loop through all subsequences:
  1279. for (cseq in seq(1, seq_num)) {
  1280. # select the first random gap:
  1281. cgap = random_gaps[cseq]
  1282. # take the starting point and add the current random gap:
  1283. cstart = cstart + cgap + 1 # +1 because gap can be 0
  1284. # calculate the current subsequence index:
  1285. cidx = seq(cstart, (cstart + seq_size - 1))
  1286. # insert the random sequence index into the index array:
  1287. index[cidx] = seq_order[cseq]
  1288. # define a new starting position for the next subsequence:
  1289. cstart = max(cidx)
  1290. }
  1291. # return the indices:
  1292. return(index)
  1293. }
  1294. subseqsample(sequence = seq(1, 233))
  1295. ```
  1296. We randomly sample 15 sequences of 12 TRs each from resting state data and
  1297. compare the mean frequency spectrum across those trials between resting state,
  1298. fast and slow sequence trials.
  1299. ```{r}
  1300. dt_pred_rest_freq = dt_pred_conc_slope %>%
  1301. # we deselect post-task resting state data and assume on fixed ordering:
  1302. filter(!(tITI == "Rest Post S1") & sequence == 1) %>%
  1303. # filter for TRs in the forward and backward period only in sequence trials:
  1304. filter(!(tITI %in% c("32 ms", "2048 ms") & !(seq_tr %in% seq(2, 13)))) %>%
  1305. setDT(.) %>%
  1306. # select random chunks of TRs in the resting state data
  1307. .[tITI == "Rest Pre S1", by = .(classification, id), ":=" (trial_tITI = subseqsample(seq_tr))] %>%
  1308. # filter out all NA trials on resting state data
  1309. filter(!(is.na(trial_tITI) & tITI == "Rest Pre S1")) %>%
  1310. setDT(.) %>%
  1311. # assert that all the three conditions (two sequence trials and rest data) are present:
  1312. assert(in_set("32 ms", "2048 ms", "Rest Pre S1"), tITI) %>%
  1313. # calculate the frequency spectrum for every
  1314. .[, by = .(classification, id, tITI, trial_tITI), {
  1315. frequency_spectrum = stats::spectrum(
  1316. x = scale(slope), plot = FALSE, detrend = TRUE, demean = FALSE, spans = 5)
  1317. list(
  1318. num_trs = .N,
  1319. freq_bins = frequency_spectrum$freq,
  1320. spectrum = frequency_spectrum$spec
  1321. )}] %>% verify(all(num_trs %in% c(12, 6))) %>%
  1322. # normalize the frequencies per participant and trial to remove mean differences:
  1323. .[, by = .(classification, id, tITI, trial_tITI), ":=" (spectrum = spectrum/sum(spectrum))] %>%
  1324. # multiply the frequency levels by 1/TR:
  1325. transform(freq_bins = freq_bins * (1/1.25)) %>% setDT(.) %>%
  1326. # calculate the mean frequency spectrum per participant and condition:
  1327. .[, by = .(classification, id, tITI, freq_bins), .(
  1328. num_trials = .N,
  1329. mean_spec = mean(spectrum)
  1330. )] %>% verify(all(num_trials == 15)) %>%
  1331. # calculate the average frequency spectrum across all participants:
  1332. .[, by = .(classification, tITI, freq_bins), .(
  1333. num_subs = .N,
  1334. mean_spec = mean(mean_spec),
  1335. sem_upper = mean(mean_spec) + (sd(mean_spec)/sqrt(.N)),
  1336. sem_lower = mean(mean_spec) - (sd(mean_spec)/sqrt(.N))
  1337. )] %>%
  1338. verify(all(num_subs == 32))
  1339. ```
  1340. We insert a number of events with probabilities scaled at different SNR levels:
  1341. ```{r}
  1342. dt_pred_conc = dt_pred_all %>%
  1343. # we add a consecutive counter that will be used to concatenate data:
  1344. .[, by = .(classification, id, tITI, class), ":=" (t = seq(1, .N))] %>%
  1345. # we will focus all further analyses on the one-versus-rest classifier:
  1346. filter(classification == "ovr") %>% setDT(.)
  1347. # create separate dataframes for resting state data and sequence trial data:
  1348. dt_pred_rest_s1 = subset(dt_pred_conc, tITI == "rest_run-pre_ses-01")
  1349. dt_pred_seq_cut = subset(dt_pred_conc, tITI %in% c("32 ms", "2048 ms"))
  1350. speed_levels = unique(dt_pred_seq_cut$tITI)
  1351. num_speeds = length(speed_levels)
  1352. # define the number of SNR levels and sequence inserts:
  1353. snr_levels = c(4/5, 1/2, 1/4, 1/8, 0)
  1354. snr_levels_char = c("4/5", "1/2", "1/4", "1/8", "0")
  1355. num_snrs = length(snr_levels)
  1356. num_inserts = 6
  1357. insert_length = 12
  1358. # get the number of TRs during resting state and warn if incorrect:
  1359. num_rest_trs = max(dt_pred_rest_s1$t)
  1360. if (num_rest_trs != 233) {warning("number of resting state TRs incorrect")}
  1361. num_seq_trials = max(dt_pred_seq_cut$trial_tITI)
  1362. if (num_seq_trials != 15) {warning("number of sequence trials incorrect")}
  1363. num_subs = length(unique(dt_pred_rest_s1$id))
  1364. if (num_subs != 32) {warning("number of participants incorrect")}
  1365. # create a vector of seeds to make computations reproducible:
  1366. #seeds = seq(1, num_snrs * num_inserts * num_subs * num_speeds)
  1367. dt_pred_rest_insert <- list()
  1368. count = 1
  1369. for (sub in unique(dt_pred_rest_s1$id)) {
  1370. # sample random sequence trials to be cut out depending on the number of inserts:
  1371. cutout_trials = sort(sample(1:num_seq_trials, num_inserts, replace = FALSE))
  1372. # sample random insert sequences depending on the number of inserts (no overlap):
  1373. rest_seq = subseqsample(sequence = seq(1, num_rest_trs), seq_size = insert_length, seq_num = num_inserts * 2)
  1374. subsequence_starts = sort(sapply(X = seq(1, num_inserts * 2), FUN = function(x){which(rest_seq == x)[1]}))
  1375. insert_starts = sort(sample(subsequence_starts, num_inserts, replace = FALSE))
  1376. insert_stops = sort(insert_starts + insert_length - 1)
  1377. blend_starts = sort(setdiff(subsequence_starts, insert_starts))
  1378. blend_stops = sort(blend_starts + insert_length - 1)
  1379. for (ninserts in seq(1, num_inserts)) {
  1380. snr_index = 0
  1381. for (snr in snr_levels) {
  1382. snr_index = snr_index + 1
  1383. for (speed in speed_levels) {
  1384. # create a resting state data subset for the current settings:
  1385. dt_pred_rest_tmp = dt_pred_rest_s1 %>%
  1386. filter(id == sub) %>% setorder(classification, id, t, class) %>%
  1387. mutate(snr = snr) %>% mutate(insert = ninserts) %>% mutate(position_insert = position) %>%
  1388. mutate(speed = speed) %>% mutate(snr_char = snr_levels_char[snr_index]) %>%
  1389. mutate(prob_insert = probability) %>% mutate(prob_insert_blend = probability) %>%
  1390. mutate(insert_index = NA) %>% mutate(insert_start = NA) %>% setDT(.) %>%
  1391. verify(.[, by = .(classification, id, position), .(num_trs = .N)]$num_trs == num_rest_trs)
  1392. # loop through all events to cutout and insert fast sequences:
  1393. for (cevent in 1:ninserts) {
  1394. # create the sequence for sequence inserting:
  1395. insert_seq = seq(insert_starts[cevent], insert_stops[cevent])
  1396. # create the sequence for the blending rest trials:
  1397. blend_seq = seq(blend_starts[cevent], blend_stops[cevent])
  1398. # select the sequence from fast trials:
  1399. dt_pred_seq_cutout = dt_pred_seq_cut %>%
  1400. # filter for specififc participant and specific trial:
  1401. filter(id == sub & trial_tITI == cutout_trials[cevent]) %>%
  1402. # filter for specific speed and specific trs:
  1403. filter(tITI == speed & seq_tr %in% seq(2, insert_length + 1)) %>%
  1404. # sort by classification, id, timepoint and class (as rest data, see above):
  1405. setorder(classification, id, t, class) %>%
  1406. # multiply the probability with the current SNR level:
  1407. mutate(prob_snr = probability * snr)
  1408. # insert the snr-adjusted sequence trial probabilities:
  1409. dt_pred_rest_tmp$prob_insert[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$prob_snr
  1410. # keep the position indices of the inserted sequence trials:
  1411. dt_pred_rest_tmp$position_insert[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$position
  1412. # blend the snr-adjusted sequence trials with data from snr-adjusted res trials:
  1413. dt_pred_rest_tmp$prob_insert_blend[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$prob_snr +
  1414. dt_pred_rest_tmp$probability[dt_pred_rest_tmp$t %in% blend_seq] * (1 - snr)
  1415. # create a new column with the (SNR-adjusted) sequence probabilities:
  1416. dt_pred_rest_tmp$prob_seq[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$probability
  1417. dt_pred_rest_tmp$prob_seq_snr[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$prob_snr
  1418. # include indices that indicate where data was inserted:
  1419. dt_pred_rest_tmp$insert_index[dt_pred_rest_tmp$t %in% insert_seq] = "inserted"
  1420. dt_pred_rest_tmp$insert_num[dt_pred_rest_tmp$t %in% insert_seq] = cevent
  1421. dt_pred_rest_tmp$insert_start[dt_pred_rest_tmp$t %in% min(insert_seq)] = 1
  1422. }
  1423. dt_pred_rest_insert[[count]] = dt_pred_rest_tmp
  1424. count = count + 1
  1425. }
  1426. }
  1427. }
  1428. }
  1429. # combine data in one datatable:
  1430. dt_pred_rest_insert <- do.call("rbind", dt_pred_rest_insert)
  1431. # calculate the differences in probability:
  1432. dt_pred_rest_insert$prob_diff = dt_pred_rest_insert$probability - dt_pred_rest_insert$prob_insert_blend
  1433. ```
  1434. Plot differences between inserted sequence speeds at different SNR levels:
  1435. ```{r}
  1436. cols_order = colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5)
  1437. plot_data = subset(dt_pred_rest_insert, id == example_id) %>%
  1438. filter(insert_index == "inserted") %>% filter(speed == "32 ms") %>%
  1439. filter(insert == 1) %>%
  1440. transform(snr_char = factor(as.factor(snr_char), levels = c("4/5", "1/2", "1/4", "1/8", "0"))) %>% setDT(.)
  1441. fig_prob_diff_snr = ggplot(data = plot_data, aes(
  1442. x = as.factor(t), y = prob_seq_snr, color = snr_char, group = snr_char)) +
  1443. geom_line() + geom_point(alpha = 0.3) +
  1444. facet_wrap(~ class) + ylim(c(0,1)) +
  1445. scale_color_manual(values = cols_order, name = "SNR level") +
  1446. ylab("Probability (%)") + xlab("Time (TRs)") +
  1447. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1448. coord_capped_cart(left = "both", bottom = "both", expand = TRUE) +
  1449. theme(legend.position = c(1, 0), legend.justification = c(1, 0)) +
  1450. scale_x_discrete(labels = label_fill(seq(1, 12, 1), mod = 11),
  1451. breaks = seq(min(plot_data$t), max(plot_data$t), 1)) +
  1452. theme(panel.border = element_blank(), axis.line = element_line()) +
  1453. theme(axis.line = element_line(colour = "black"),
  1454. panel.grid.major = element_blank(),
  1455. panel.grid.minor = element_blank(),
  1456. panel.border = element_blank(),
  1457. panel.background = element_blank())
  1458. fig_prob_diff_snr
  1459. ```
  1460. Plot differences in probability between true and augmented rest data.
  1461. ```{r}
  1462. plot_data = subset(dt_pred_rest_insert, id == example_id & speed == "32 ms") %>%
  1463. filter(snr_char %in% c("4/5", "1/4") & insert == c(2, 6)) %>%
  1464. mutate(snr_char = paste("SNR level:", snr_char)) %>%
  1465. mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts")))
  1466. # create data frame to plot rectangles where trials were inserted:
  1467. plot_rectangles = plot_data %>%
  1468. filter(insert_start == 1) %>%
  1469. mutate(xmin = t) %>%
  1470. mutate(xmax = ifelse(speed == "32 ms", xmin + 11, xmin + 11)) %>%
  1471. mutate(ymin = -100, ymax = 100)
  1472. fig_inserts = ggplot(data = plot_data, mapping = aes(
  1473. x = as.numeric(t), y = as.numeric(prob_diff * 100), group = as.factor(position))) +
  1474. geom_hline(aes(yintercept = 0), color = "gray") +
  1475. geom_line(mapping = aes(color = as.factor(position))) +
  1476. facet_grid(vars(as.factor(insert)), vars(fct_rev(as.factor(snr_char)))) +
  1477. geom_rect(data = plot_rectangles, aes(
  1478. xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax),
  1479. alpha = 0.2, inherit.aes = FALSE, show.legend = FALSE, fill = "gray") +
  1480. theme(legend.position = "top", legend.direction = "horizontal",
  1481. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  1482. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  1483. xlab("Time (TRs)") + ylab("Difference in probability (%)\nof sequence-free vs. -inserted rest") +
  1484. scale_colour_manual(name = "Serial position", values = color_events) +
  1485. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  1486. xlim = c(0, 250), ylim = c(-100, 100)) +
  1487. guides(color = guide_legend(nrow = 1)) +
  1488. scale_x_continuous(labels = label_fill(seq(0, 250, 25), mod = 5),
  1489. breaks = seq(0, 250, 25)) +
  1490. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1491. theme(panel.border = element_blank(), axis.line = element_line()) +
  1492. theme(axis.line = element_line(colour = "black"),
  1493. panel.grid.major = element_blank(),
  1494. panel.grid.minor = element_blank(),
  1495. panel.border = element_blank(),
  1496. panel.background = element_blank())
  1497. fig_inserts
  1498. ```
  1499. We calculate the slopes for each speed, snr and insert level.
  1500. Attention: This takes some time to run because a lot of linear models are estimated...
  1501. About 13 minutes, OMG.
  1502. ```{r}
  1503. dt_pred_rest_insert %>%
  1504. verify(.[position != position_insert,]$insert_index == "inserted")
  1505. ```
  1506. ```{r}
  1507. start_time = Sys.time()
  1508. # calculate the slope for each inserted and snr-adjusted rest data segment:
  1509. dt_pred_rest_insert_slope = dt_pred_rest_insert %>% setDT(.) %>%
  1510. # calculate the regression slopes for each id, trial, speed, snr, insert and TR:
  1511. .[, by = .(id, classification, speed, trial_tITI, insert, snr, snr_char, t, insert_num), .(
  1512. num_events = .N,
  1513. mean_position = mean(position),
  1514. mean_position_insert = mean(position_insert),
  1515. slope_rest = coef(lm(probability ~ position))[2] * (-1),
  1516. slope_insert = coef(lm(prob_insert_blend ~ position_insert))[2] * (-1),
  1517. sd_prob_rest = sd(probability),
  1518. sd_prob_insert = sd(prob_insert_blend)
  1519. # verify that the slope was calculated across five events with mean position of 3:
  1520. )] %>% verify(all(num_events == 5)) %>% verify(all(mean_position == 3)) %>%
  1521. verify(all(mean_position_insert == 3)) %>%
  1522. # sort the data table by speed, trial, number of inserts, snr level and time:
  1523. setorder(id, speed, trial_tITI, insert, snr, snr_char, t) %>%
  1524. # filter for one-versus-rest classification only:
  1525. filter(classification == "ovr") %>% setDT(.)
  1526. end_time = Sys.time()
  1527. print(end_time - start_time)
  1528. ```
  1529. Plot slopes with inserts:
  1530. ```{r}
  1531. colors = c(hcl.colors(5, "Cividis")[c(5,1)], "gray")
  1532. plot_rest = subset(dt_pred_rest_insert_slope, id == example_id) %>%
  1533. filter(snr_char %in% c("0", "4/5") & insert == c(6)) %>%
  1534. mutate(snr_char = paste("SNR level:", snr_char)) %>%
  1535. mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts")))
  1536. # create data frame to plot rectangles where trials were inserted:
  1537. plot_rectangles = plot_rest %>%
  1538. filter(!is.na(insert_num)) %>% setDT(.) %>%
  1539. .[, by = .(id, classification, speed, insert, snr, snr_char, insert_num), .(
  1540. xmin = min(t), xmax = max(t), ymin = -0.25, ymax = 0.25
  1541. )]
  1542. plot_inserts = plot_rest %>%
  1543. filter(!is.na(insert_num)) %>% setDT(.)
  1544. # plot the
  1545. fig_inserts_slopes = ggplot(data = plot_rest, mapping = aes(
  1546. x = as.numeric(t), y = as.numeric(slope_rest), color = fct_rev(speed))) +
  1547. geom_hline(aes(yintercept = 0), color = "black") +
  1548. geom_line(aes(color = "rest")) +
  1549. facet_grid(vars(as.factor(insert)), vars(as.factor(snr_char))) +
  1550. #facet_wrap(~ speed, nrow = 2, ncol = 1) +
  1551. facet_grid(vars(fct_rev(as.factor(speed))), vars(fct_rev(as.factor(snr_char)))) +
  1552. geom_rect(data = plot_rectangles, aes(
  1553. xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax, fill = speed),
  1554. alpha = 0.2, inherit.aes = FALSE, show.legend = FALSE) +
  1555. geom_line(data = plot_inserts, aes(
  1556. y = slope_insert, color = speed, group = interaction(insert_num, speed))) +
  1557. theme(legend.position = "top", legend.direction = "horizontal",
  1558. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  1559. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  1560. xlab("Time (TRs)") + ylab("Regression slope") +
  1561. scale_colour_manual(name = "Speed", values = colors) +
  1562. scale_fill_manual(name = "Speed", values = colors) +
  1563. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, xlim = c(0, 250)) +
  1564. guides(color = guide_legend(nrow = 1)) +
  1565. scale_x_continuous(labels = label_fill(seq(0, 250, 25), mod = 5), breaks = seq(0, 250, 25)) +
  1566. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1567. theme(axis.line = element_line(colour = "black"),
  1568. panel.grid.major = element_blank(),
  1569. panel.grid.minor = element_blank(),
  1570. panel.border = element_blank(),
  1571. panel.background = element_blank())
  1572. fig_inserts_slopes
  1573. ```
  1574. ```{r}
  1575. # select variable: either slopes or standard deviation:
  1576. insert_stat_func = function(dt, variable) {
  1577. dt_pred_rest_insert_slope_stat = dt %>%
  1578. # calculate mean slope coefficients across all augmented rest TRs:
  1579. .[, by = .(id, classification, speed, insert, snr, snr_char), .(
  1580. num_trs = .N,
  1581. mean_slope_rest = mean(abs(slope_rest)),
  1582. mean_slope_insert = mean(abs(slope_insert)),
  1583. mean_sd_prob_rest = mean(sd_prob_rest),
  1584. mean_sd_prob_insert = mean(sd_prob_insert)
  1585. )] %>% verify(all(num_trs == num_rest_trs)) %>%
  1586. # calculate the difference between the inserted and sequence-free rest data:
  1587. mutate(mean_slope_diff = mean_slope_insert - mean_slope_rest) %>%
  1588. mutate(mean_sd_diff = mean_sd_prob_insert - mean_sd_prob_rest) %>% setDT(.) %>%
  1589. # concuct a t-test comparing sequence-free vs. sequence-including rests:
  1590. .[, by = .(classification, speed, insert, snr, snr_char), {
  1591. ttest_results = t.test(get(variable), mu = 0, alternative = "two.sided")
  1592. list(
  1593. num_subs = .N,
  1594. mean_variable_diff = mean(get(variable)),
  1595. pvalue = ttest_results$p.value,
  1596. tvalue = round(ttest_results$statistic, 2),
  1597. df = ttest_results$parameter,
  1598. sem_upper = mean(get(variable)) + (sd(get(variable))/sqrt(.N)),
  1599. sem_lower = mean(get(variable)) - (sd(get(variable))/sqrt(.N))
  1600. )}] %>%
  1601. verify(all(num_subs == 32)) %>%
  1602. verify(all((num_subs - df) == 1)) %>%
  1603. # adjust p-values for multiple comparisons:
  1604. # check if the number of comparisons matches expectations:
  1605. .[, by = .(classification, speed), ":=" (
  1606. num_comp = .N,
  1607. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  1608. )] %>%
  1609. verify(all(num_comp == 30, na.rm = TRUE)) %>%
  1610. # round the original p-values according to the apa standard:
  1611. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  1612. # round the adjusted p-value:
  1613. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  1614. # create new variable indicating significance below 0.05
  1615. mutate(significance = ifelse(pvalue_adjust < 0.05, "*", "")) %>%
  1616. # log-transform the p-values:
  1617. mutate(pvalue_log = log(pvalue_adjust, base = 20)) %>%
  1618. # adjust the ordering of the snr levels:
  1619. transform(snr_char = factor(snr_char, levels = c("0", "1/8", "1/4", "1/2", "4/5"))) %>%
  1620. # set order:
  1621. setorder(classification, speed, insert, snr) %>% setDT(.)
  1622. return(dt_pred_rest_insert_slope_stat)
  1623. }
  1624. snr_insert_stat_slope = insert_stat_func(dt = dt_pred_rest_insert_slope, variable = "mean_slope_diff")
  1625. snr_insert_stat_sd = insert_stat_func(dt = dt_pred_rest_insert_slope, variable = "mean_sd_diff")
  1626. snr_insert_stat_slope %>% filter(pvalue_adjust < 0.05)
  1627. snr_insert_stat_sd %>% filter(pvalue_adjust < 0.05)
  1628. ```
  1629. #### Figure 5f / 5g
  1630. Plot average slope regression coefficients and p-values:
  1631. ```{r}
  1632. cols_order = rev(colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5))
  1633. plot_snr_insert = function(dt, variable) {
  1634. if (variable == "pvalue_log") {
  1635. yrange = seq(-10, 0, 1)
  1636. ylabel = "p-value\n(base-20 log-transformed)"
  1637. yintercept = -1
  1638. mod = 2
  1639. dt$sem_upper = 10; dt$sem_lower = 10;
  1640. }
  1641. else if (variable == "mean_slope_diff") {
  1642. variable = "mean_variable_diff"
  1643. yrange = seq(-0.007, 0.003, 0.001)
  1644. mod = 2
  1645. ylabel = "Regression slope\n(relative to pre-task rest)"
  1646. yintercept = 0
  1647. }
  1648. else if (variable == "mean_sd_diff") {
  1649. variable = "mean_variable_diff"
  1650. yrange = seq(-0.03, 0.01, 0.01)
  1651. mod = 1
  1652. ylabel = "SD of probability\n(relative to pre-task rest)"
  1653. yintercept = 0
  1654. }
  1655. ggplot(data = dt, mapping = aes(
  1656. x = as.factor(insert), y = as.numeric(get(variable)), group = as.factor(snr_char),
  1657. color = as.factor(snr_char), fill = as.factor(snr_char))) +
  1658. facet_wrap(~ fct_rev(as.factor(speed))) +
  1659. geom_hline(aes(yintercept = yintercept), color = "black", linetype = "dashed") +
  1660. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  1661. geom_line() + geom_point(pch = 15) +
  1662. theme(legend.position = "top", legend.direction = "horizontal",
  1663. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  1664. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  1665. xlab("Number of inserted sequence trials") + ylab(ylabel) +
  1666. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(min(yrange), max(yrange))) +
  1667. guides(color = guide_legend(nrow = 1)) +
  1668. scale_color_manual(values = cols_order, name = "SNR level") +
  1669. scale_fill_manual(values = cols_order, name = "SNR level") +
  1670. scale_y_continuous(labels = label_fill(yrange, mod = mod), breaks = yrange) +
  1671. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1672. theme(axis.line = element_line(colour = "black"),
  1673. panel.grid.major = element_blank(),
  1674. panel.grid.minor = element_blank(),
  1675. panel.border = element_blank(),
  1676. panel.background = element_blank())
  1677. }
  1678. fig_snr_pmat_slope = plot_snr_insert(dt = snr_insert_stat_slope, variable = "pvalue_log")
  1679. fig_snr_data_slope = plot_snr_insert(dt = snr_insert_stat_slope, variable = "mean_slope_diff")
  1680. fig_snr_pmat_sd = plot_snr_insert(dt = snr_insert_stat_sd, variable = "pvalue_log")
  1681. fig_snr_data_sd = plot_snr_insert(dt = snr_insert_stat_sd, variable = "mean_sd_diff")
  1682. fig_snr_pmat_slope; fig_snr_data_slope; fig_snr_pmat_sd; fig_snr_data_sd;
  1683. ```
  1684. #### Source Data File Fig. 5f
  1685. ```{r}
  1686. snr_insert_stat_sd %>%
  1687. select(-num_subs, -classification, -pvalue, -tvalue, -df, -significance,
  1688. -num_comp, -pvalue_adjust, -pvalue_round, -pvalue_adjust_round) %>%
  1689. write.csv(., file = file.path(path_sourcedata, "source_data_figure_5f.csv"),
  1690. row.names = FALSE)
  1691. ```
  1692. #### Source Data File Fig. 5g
  1693. ```{r}
  1694. snr_insert_stat_sd %>%
  1695. select(-num_subs, -classification, -pvalue, -tvalue, -df, -significance,
  1696. -num_comp, -pvalue_adjust, -pvalue_round, -pvalue_adjust_round) %>%
  1697. write.csv(., file = file.path(path_sourcedata, "source_data_figure_5g.csv"),
  1698. row.names = FALSE)
  1699. ```
  1700. ### Frequency spectra (inserted)
  1701. #### Frequency spectra of augmented rest
  1702. We calculate the frequency spectra for sequence-free and sequence-inserted
  1703. (fast and slow sequences) rest:
  1704. ```{r}
  1705. baseline = 1
  1706. # get frequency spectrum of fast and slow sequence data:
  1707. dt_pred_rest_insert_freq = dt_pred_rest_insert_slope %>%
  1708. # calculate the lomb scargle frequency spectra for every snr and insert level
  1709. .[, by = .(classification, id, speed, insert, snr, snr_char), {
  1710. frequency_spectrum = lsp(x = slope_insert, times = 1:.N, from = 0, to = 0.3, plot = FALSE, ofac = 2)
  1711. list(num_trs = .N, freq_bins = frequency_spectrum$scanned, power = frequency_spectrum$power,
  1712. filter_width = round(0.05/mean(diff(frequency_spectrum$scanned)))
  1713. )}] %>% verify(all(num_trs == 233)) %>% verify(length(unique(filter_width)) == 1) %>%
  1714. # smooth the frequency spectra:
  1715. .[, by = .(classification, id, speed, insert, snr, snr_char), ":=" (
  1716. power_smooth = stats::filter(power, rep(1/unique(filter_width), unique(filter_width))),
  1717. freq_bins_smooth = stats::filter(freq_bins, rep(1/unique(filter_width), unique(filter_width)))
  1718. )] %>%
  1719. # add a counter for the frequency spectrum
  1720. .[, by = .(classification, id, speed, snr, snr_char, insert), ":=" (bin = 1:.N)] %>%
  1721. # verify that the number of frequency bins is the same for all participants, snr, and insert levels:
  1722. verify(length(unique(.[, by = .(classification, id, speed, insert, snr), .(
  1723. num_bins = length(unique(bin)))]$num_bins)) == 1) %>%
  1724. # calculate power of every snr level relative to the snr 0 level:
  1725. .[, by = .(classification, id, speed, insert), ":=" (
  1726. power_smooth_rel = as.vector(sapply(X = sort(unique(snr)), FUN = function(x){
  1727. power_smooth[snr == x]/power_smooth[snr == 0]}) - baseline)
  1728. )]
  1729. # calculate the mean smoothed frequency for every snr and insert level across frequency bins:
  1730. dt_pred_rest_insert_freq_mean = dt_pred_rest_insert_freq %>%
  1731. .[, by = .(classification, speed, snr, snr_char, insert, freq_bins_smooth, bin), .(
  1732. num_subs = .N,
  1733. mean_power_smooth_rel = mean(power_smooth_rel),
  1734. sem_upper = mean(power_smooth_rel) + (sd(power_smooth_rel)/sqrt(.N)),
  1735. sem_lower = mean(power_smooth_rel) - (sd(power_smooth_rel)/sqrt(.N))
  1736. )] %>% verify(all(num_subs == 32))
  1737. ```
  1738. #### Figure 5h
  1739. ```{r}
  1740. colors_snr = rev(colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5))
  1741. plot_data = dt_pred_rest_insert_freq_mean %>%
  1742. filter(!is.na(mean_power_smooth_rel)) %>%
  1743. filter(insert %in% c(2, 6)) %>%
  1744. mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts"))) %>%
  1745. # adjust the ordering of the snr levels:
  1746. transform(snr_char = factor(snr_char, levels = c("0", "1/8", "1/4", "1/2", "4/5"))) %>%
  1747. setDT(.)
  1748. freq_range = 0.01
  1749. num_plot_inserts = length(unique(plot_data$insert))
  1750. num_speed_inserts = length(unique(plot_data$speed))
  1751. frequency_expectation <- frequency_expectation[1:3,]
  1752. frequency_expectation_snr = frequency_expectation[1:3,] %>%
  1753. mutate(colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")) %>%
  1754. .[rep(seq_len(nrow(.)), each = num_plot_inserts * num_speed_inserts), ] %>%
  1755. mutate(xmax = xintercept + freq_range, xmin = xintercept - freq_range) %>%
  1756. mutate(speed = rep(unique(plot_data$speed), num_plot_inserts * nrow(frequency_expectation))) %>%
  1757. mutate(insert = rep(rep(unique(plot_data$insert), each = 2), nrow(frequency_expectation))) %>%
  1758. transform(xintercept = ifelse(colors == "gray", 0.03, xintercept)) %>%
  1759. filter(colors != "gray")
  1760. fig_freq_lsp_insert = ggplot(data = plot_data, aes(
  1761. color = as.factor(snr_char), y = as.numeric(mean_power_smooth_rel), x = as.numeric(freq_bins_smooth))) +
  1762. geom_rect(data = frequency_expectation_snr, aes(
  1763. xmin = xmin, xmax = xmax, ymin = -0.2, ymax = 0.3),
  1764. alpha = 0.1, inherit.aes = FALSE, show.legend = FALSE, fill = frequency_expectation_snr$colors) +
  1765. #geom_vline(data = frequency_expectation_snr, aes(xintercept = xintercept),
  1766. # linetype = "dashed", color = frequency_expectation_snr$colors) +
  1767. geom_line() +
  1768. facet_grid(vars(insert), vars(fct_rev(as.factor(speed)))) +
  1769. geom_ribbon(aes(fill = snr_char, ymin = sem_lower, ymax = sem_upper),
  1770. alpha = 0.3, color = NA) +
  1771. #geom_text(data = frequency_expectation_snr, aes(
  1772. # x = xintercept + 0.03, y = -0.15, label = paste(round(xintercept, 2), "Hz")),
  1773. # color = frequency_expectation_snr$colors, size = 3) +
  1774. xlab("Frequency") + ylab("Power\n(relative to pre-task rest)") +
  1775. scale_color_manual(name = "SNR level", values = colors_snr, guide = FALSE) +
  1776. scale_fill_manual(name = "SNR level", values = colors_snr, guide = FALSE) +
  1777. theme(legend.position = "top", legend.direction = "horizontal",
  1778. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  1779. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  1780. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  1781. xlim = c(0, 0.3), ylim = c(-0.2, 0.3)) +
  1782. #guides(color = guide_legend(nrow = 1)) +
  1783. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1784. theme(axis.line = element_line(colour = "black"),
  1785. panel.grid.major = element_blank(),
  1786. panel.grid.minor = element_blank(),
  1787. panel.border = element_blank(),
  1788. panel.background = element_blank())
  1789. fig_freq_lsp_insert
  1790. ```
  1791. #### Source Data Fig. 5h
  1792. ```{r}
  1793. plot_data %>%
  1794. select(-classification, -bin, -num_subs) %>%
  1795. write.csv(., file = file.path(path_sourcedata, "source_data_figure_5h.csv"),
  1796. row.names = FALSE)
  1797. ```
  1798. #### Expected frequency ranges
  1799. ```{r}
  1800. dt_pred_rest_lsp_insert_exp = dt_pred_rest_insert_freq %>%
  1801. .[, by = .(classification, id, speed, snr, snr_char, insert), {
  1802. fast_min = which.min(abs(freq_bins_smooth - delta_freq_fast + freq_range))
  1803. fast_max = which.min(abs(freq_bins_smooth - delta_freq_fast - freq_range))
  1804. slow_min = which.min(abs(freq_bins_smooth - delta_freq_slow + freq_range))
  1805. slow_max = which.min(abs(freq_bins_smooth - delta_freq_slow - freq_range))
  1806. rest_min = which.min(abs(freq_bins_smooth - 0.03 + freq_range))
  1807. rest_max = which.min(abs(freq_bins_smooth - 0.03 - freq_range))
  1808. list(
  1809. fast_min = freq_bins_smooth[fast_min], fast_max = freq_bins_smooth[fast_max],
  1810. slow_min = freq_bins_smooth[slow_min], slow_max = freq_bins_smooth[slow_max],
  1811. power_index_fast = mean(power_smooth_rel[fast_min:fast_max]),
  1812. power_index_slow = mean(power_smooth_rel[slow_min:slow_max]),
  1813. power_index_rest = mean(power_smooth_rel[rest_min:rest_max])
  1814. )}] %>%
  1815. # melt all index variables into one column:
  1816. gather(grep("power", names(.), fixed = TRUE), key = "index", value = "power") %>% setDT(.) %>%
  1817. .[grepl("index_fast", index), label := paste0(round(delta_freq_fast, 2), "")] %>%
  1818. .[grepl("index_slow", index), label := paste0(round(delta_freq_slow, 2), "")] %>%
  1819. .[grepl("index_rest", index), label := paste0(round(delta_freq_rest, 2), "")] %>%
  1820. setorder(., classification, id, speed, snr, insert) %>%
  1821. # filter for specific inserts levels here:
  1822. filter(insert %in% c(2, 6)) %>% setDT(.)
  1823. ```
  1824. ```{r}
  1825. dt_pred_rest_lsp_insert_exp_mean = dt_pred_rest_lsp_insert_exp %>%
  1826. # filter out the irrelvant rest power index:
  1827. filter(index != "power_index_rest") %>% setDT(.) %>%
  1828. # compare the mean power in expected frequnecy for every SNR and insert level:
  1829. .[, by = .(classification, speed, insert, snr, snr_char, index), {
  1830. ttest_results = t.test(power, mu = 0, alternative = "two.sided", paired = FALSE)
  1831. list(
  1832. num_subs = .N,
  1833. pvalue = ttest_results$p.value,
  1834. tvalue = ttest_results$statistic,
  1835. df = ttest_results$parameter,
  1836. cohens_d = (mean(power) - 0)/sd(power)
  1837. )}] %>% verify(all(num_subs == 32)) %>% verify((num_subs - df) == 1) %>%
  1838. # adjust p-values for multiple comparisons:
  1839. # check if the number of comparisons matches expectations:
  1840. .[, by = .(classification, speed), ":=" (
  1841. num_comp = .N,
  1842. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  1843. )] %>%
  1844. # round the original p-values according to the apa standard:
  1845. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  1846. # round the adjusted p-value:
  1847. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  1848. # create new variable indicating significance below 0.05
  1849. mutate(significance = ifelse(pvalue_adjust < 0.05, "*", ""))
  1850. # filter for all p-values below the significance level:
  1851. dt_pred_rest_lsp_insert_exp_mean %>% filter(pvalue < 0.05)
  1852. ```
  1853. #### Figure 5i
  1854. ```{r}
  1855. cols_order = rev(colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5))
  1856. plot_data = dt_pred_rest_lsp_insert_exp %>%
  1857. # add to inserts label:
  1858. mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts"))) %>%
  1859. # adjust the ordering of the snr levels:
  1860. transform(snr_char = factor(snr_char, levels = c("0", "1/4", "1/8", "1/2", "4/5"))) %>%
  1861. # filter out the irrelvant rest power index:
  1862. filter(index != "power_index_rest") %>% setDT(.)
  1863. fig_freq_exp_insert = ggplot(data = plot_data, aes(
  1864. y = power, x = fct_rev(label), fill = snr_char, color = snr_char, group = snr)) +
  1865. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
  1866. width = 0.8) +
  1867. geom_point(position = position_jitterdodge(
  1868. jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
  1869. alpha = 0.1, inherit.aes = TRUE, pch = 16) +
  1870. facet_grid(vars(insert), vars(fct_rev(as.factor(speed)))) +
  1871. stat_summary(fun.data = "mean_se", geom = "errorbar",
  1872. position = position_dodge(0.9), width = 0, color = "black") +
  1873. xlab("Predicted frequency") + ylab("Power\n(relative to pre-task rest)") +
  1874. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(-0.1, 0.3)) +
  1875. scale_fill_manual(values = cols_order, name = "SNR level", guide = FALSE) +
  1876. scale_color_manual(values = cols_order, name = "SNR level", guide = FALSE) +
  1877. theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
  1878. theme(legend.position = "top", legend.direction = "horizontal",
  1879. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1880. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1881. #guides(color = guide_legend(nrow = 1)) +
  1882. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1883. theme(axis.line = element_line(colour = "black"),
  1884. panel.grid.major = element_blank(),
  1885. panel.grid.minor = element_blank(),
  1886. panel.border = element_blank(),
  1887. panel.background = element_blank())
  1888. fig_freq_exp_insert
  1889. ```
  1890. #### Source Data Fig. 5i
  1891. ```{r}
  1892. plot_data %>%
  1893. select(-classification, -fast_min, -fast_max, -slow_min, -slow_max) %>%
  1894. write.csv(., file = file.path(path_sourcedata, "source_data_figure_5i.csv"),
  1895. row.names = FALSE)
  1896. ```
  1897. ### Figure 5
  1898. ```{r}
  1899. panel_1 = plot_grid(fig_sd_prob, fig_mean_beta_abs, ncol = 2, labels = c("a", "b"))
  1900. panel_2 = plot_grid(fig_prob_slope, ncol = 1, labels = c("c"))
  1901. panel_3 = plot_grid(fig_freq_lsp, fig_freq_exp, ncol = 2, labels = c("d", "e"))
  1902. panel_4 = plot_grid(fig_snr_data_sd, fig_snr_pmat_sd, labels = c("f", "g"))
  1903. panel_5 = plot_grid(fig_freq_lsp_insert, fig_freq_exp_insert, labels = c("h", "i"))
  1904. plot_grid(plot_grid(panel_1, panel_2, ncol = 2), panel_3, panel_4, panel_5, ncol = 1)
  1905. ```
  1906. ```{r, eval=FALSE}
  1907. ggsave(filename = "highspeed_plot_decoding_rest.pdf",
  1908. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1909. dpi = "retina", width = 10, height = 12)
  1910. ```
  1911. ```{r}
  1912. ggsave(filename = "wittkuhn_schuck_figure_5.pdf",
  1913. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1914. dpi = "retina", width = 10, height = 12)
  1915. ```
  1916. ## Supplementary Figure
  1917. ```{r}
  1918. plot_grid(fig_prob_time_seq, fig_prob_diff_snr, fig_inserts, fig_inserts_slopes,
  1919. fig_snr_pmat_slope, fig_snr_data_slope, labels = "auto", ncol = 2, nrow = 3)
  1920. ```
  1921. ```{r, eval=FALSE}
  1922. ggsave(filename = "highspeed_plot_decoding_rest_supplement.pdf",
  1923. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1924. dpi = "retina", width = 10, height = 10)
  1925. ```