Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

highspeed-analysis-resting.Rmd 97 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127
  1. ## Resting-state
  2. ### Preparation
  3. #### Initialization
  4. We load the data and relevant functions:
  5. ```{r, warning=FALSE, message=FALSE, echo=TRUE}
  6. # find the path to the root of this project:
  7. if (!requireNamespace("here")) install.packages("here")
  8. if ( basename(here::here()) == "highspeed" ) {
  9. path_root = here::here("highspeed-analysis")
  10. } else {
  11. path_root = here::here()
  12. }
  13. # source all relevant functions from the setup R script:
  14. source(file.path(path_root, "code", "highspeed-analysis-setup.R"))
  15. # list of participants with chance performance on sequence and repetition trials:
  16. chance_performer = c("sub-24", "sub-31", "sub-37", "sub-40")
  17. ```
  18. #### Prepare sequence events data
  19. We select all events files for sequence trials when the stimuli were presented:
  20. ```{r, echo=TRUE}
  21. # create a subset of the events data table only including sequence task events:
  22. dt_events_seq = subset(dt_events, condition == "sequence" & trial_type == "stimulus")
  23. rmarkdown::paged_table(dt_events_seq)
  24. ```
  25. #### Prepare resting decoding data
  26. We prepare the resting state data for analysis:
  27. ```{r, results = "hold"}
  28. # create a subset that only includes the resting state data:
  29. dt_pred_rest = dt_pred %>%
  30. # filter for rest only data that was created by the union mask:
  31. filter(condition == "rest" & class != "other" & mask == "union") %>%
  32. # sort the data frame by classification, session, etc.:
  33. setorder(id, classification, session, run_study, seq_tr, class) %>%
  34. # add random positions for the events detected during resting state:
  35. # group by classification and participants to generate class specific indices:
  36. group_by(classification, id) %>%
  37. # create class specific position indices randomly for every participant:
  38. group_modify( ~ { .x %>% mutate(position = mapvalues(
  39. .x$class, from = unique(.x$class), to = sample(length(unique(.x$class)))))}) %>%
  40. ungroup %>%
  41. transform(position = as.numeric(position)) %>%
  42. setDT(.) %>%
  43. # there should be a unique position for every class in every participant:
  44. verify(all(.[, by = .(classification, id, class), .(
  45. unique_positions = length(unique(position))
  46. )]$unique_positions == 1)) %>%
  47. # verify that every class is represented by all indices across participants:
  48. verify(all(.[, by = .(classification, class), .(
  49. unique_positions = length(unique(position))
  50. )]$unique_positions == 5)) %>%
  51. # check that the number of TRs for each resting state run is correct
  52. verify(all(.[, by = .(classification, id, run_study, session, class), .(
  53. num_trs = .N)]$num_trs == 233))
  54. ```
  55. We did not acquire pre-task resting-state data in Session 1.
  56. We filter for these participants below:
  57. ```{r, results = "hold"}
  58. # create an array with all participant ids who do not have all four resting state runs:
  59. ids_no_pre_s1 = unique(
  60. (dt_pred_rest %>% setDT(.) %>%
  61. .[, by = .(id, session), .(num_runs = length(unique(run_study)))] %>%
  62. filter(!(num_runs == 2)))$id)
  63. # print the ids of the four participants without pre-task session 1 rest:
  64. print(ids_no_pre_s1)
  65. ```
  66. We select data from all remaining participants with pre-task session 1 rest:
  67. ```{r, results = "hold"}
  68. dt_pred_rest_s1 = dt_pred_rest %>%
  69. # exclude participants without session 1 pre-task rest:
  70. filter(!(id %in% c(ids_no_pre_s1))) %>%
  71. # exclude participants who performed below chance in seq and rep trials:
  72. filter(!(id %in% c(chance_performer))) %>%
  73. # filter for pre-task resting state data in session 1:
  74. #filter(run_study == "run-pre" & session == "ses-01") %>%
  75. # filter for the one-versus-rest classification approach:
  76. filter(classification == "ovr") %>%
  77. # filter resting state data to the same length as concatenated sequence data:
  78. #filter(seq_tr %in% seq(1, 180)) %>%
  79. # check if the number of participants is correct:
  80. verify(length(unique(id)) == 32) %>%
  81. setDT(.) %>%
  82. # create unique run indices through the combination of run and session labels:
  83. .[grepl("run-pre", run_study, fixed = TRUE) & session == "ses-01",
  84. run_study := "run-pre_ses-01"] %>%
  85. .[grepl("run-pre", run_study, fixed = TRUE) & session == "ses-02",
  86. run_study := "run-pre_ses-02"] %>%
  87. .[grepl("run-post", run_study, fixed = TRUE) & session == "ses-01",
  88. run_study := "run-post_ses-01"] %>%
  89. .[grepl("run-post", run_study, fixed = TRUE) & session == "ses-02",
  90. run_study := "run-post_ses-02"] %>%
  91. setDT(.)
  92. ```
  93. #### Prepare sequence decoding data
  94. We prepare the sequence trial data that will be combined with the resting state data:
  95. ```{r}
  96. # create a list of all participants that are excluded from the resting state analysis:
  97. sub_exclude_rest = unique(dt_events_seq$subject[
  98. !(dt_events_seq$subject %in% dt_pred_rest_s1$id)])
  99. # create a subset of the decoding data only including the sequence task data:
  100. dt_pred_seq = dt_pred %>%
  101. # create a subset of the decoding data only including the sequence task data:
  102. filter(condition == "sequence" & class != "other" & mask == "cv" & stim != "cue") %>%
  103. setDT(.) %>%
  104. # add the serial position, change trial and target cue to the sequence data table:
  105. .[, c("position", "change", "trial_cue", "trial_cue_position") := get_pos(
  106. .SD, dt_events_seq), by = .(id, trial, class),
  107. .SDcols = c("id", "trial", "class")] %>%
  108. # remove columns not needed to align data table with resting state data:
  109. mutate(change = NULL, trial_cue = NULL, trial_cue_position = NULL) %>%
  110. # rename the speed condition to include the "ms" label and express in milliseconds:
  111. transform(tITI = paste(as.character(as.numeric(tITI) * 1000), "ms")) %>%
  112. # remove participants that should be excluded from resting state analysis:
  113. filter(!(id %in% sub_exclude_rest)) %>%
  114. setDT(.) %>%
  115. # only select TRs of the forward and backward period
  116. filter(seq_tr %in% seq(2, 13)) %>%
  117. setDT(.) %>%
  118. # check if the number of TRs for each participant is correct:
  119. verify(.[, by = .(classification, id, tITI, trial_tITI, class), .(
  120. num_trs = .N)]$num_trs == 12)
  121. ```
  122. #### Combine sequence and resting data
  123. We combine the sequence and resting state decoding data in one data table:
  124. ```{r, echo=TRUE}
  125. #c("run-pre_ses-01", "run-post_ses-01", "run-pre_ses-02", "run-post_ses-02")
  126. deselect_rest_run <- c("run-pre_ses-02", "run-post_ses-02")
  127. dt_pred_all = rbind(dt_pred_seq, dt_pred_rest_s1) %>%
  128. # filter for pre-task resting state of session 1 only:
  129. filter(!(run_study %in% deselect_rest_run)) %>%
  130. # reorder the factor levels of the combined data table to sort in ascending order:
  131. transform(tITI = factor(tITI, levels(as.factor(tITI))[c(3,5,1,4,2,6)])) %>%
  132. # concatenate all session 1 pre-task resting state scans and add counter:
  133. filter(tITI %in% c("32 ms", "2048 ms", "rest")) %>%
  134. setDT(.) %>%
  135. # adjust resting state grouping variable to accommodate multiple runs:
  136. .[tITI == "rest", tITI := paste0(tITI, "_", run_study)] %>%
  137. # filter out one-versus-rest classification trials only:
  138. filter(classification == "ovr") %>%
  139. setDT(.) %>%
  140. # sort the data table:
  141. setorder(id, condition, tITI, trial_tITI, seq_tr, position) %>%
  142. setDT(.)
  143. ```
  144. We use a short function to run some checks on the combined data table:
  145. ```{r, echo=TRUE}
  146. checkdata = function(dt){
  147. library(data.table); library(tidyverse);
  148. dt %>%
  149. # check the number of trs in all resting state data:
  150. verify(.[stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI, class), .(
  151. num_trs = .N
  152. )]$num_trs == 233) %>%
  153. # check the number of trs in all sequence trial data:
  154. verify(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI, trial_tITI, class), .(
  155. num_trs = .N
  156. )]$num_trs == 12) %>%
  157. # check the number of unique trials for each sequence speed condition:
  158. verify(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI), .(
  159. num_trials = length(unique(trial_tITI))
  160. )]$num_trials == 15) %>%
  161. # check the number of speed conditions in sequence trials
  162. verify(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id), .(
  163. num_speeds = length(unique(tITI))
  164. )]$num_speeds == 2) %>%
  165. # check the number of speed conditions in resting state data:
  166. verify(.[stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI), .(
  167. num_speeds = length(unique(tITI))
  168. )]$num_speeds == 1) %>%
  169. # check the mask name in resting state data:
  170. verify(all(.[stringr::str_detect(tITI, "rest"), .(name_mask = unique(mask))]$name_mask == "union")) %>%
  171. # check the mask name in sequence trial data:
  172. verify(all(.[!stringr::str_detect(tITI, "rest"), .(name_mask = unique(mask))]$name_mask == "cv")) %>%
  173. # check if the number of items per run study is as expected:
  174. verify(all(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI, trial_tITI, class), .(
  175. num_trs = .N
  176. )]$num_trs == 12))
  177. }
  178. # check the current data table:
  179. rmarkdown::paged_table(checkdata(dt_pred_all))
  180. ```
  181. ### Decoding probabilities
  182. We draw a random example participant used for plotting:
  183. ```{r}
  184. set.seed(3)
  185. example_id = sample(unique(dt_pred_all$id), 1)
  186. ```
  187. We concatenate all fast and slow sequence trials and resting state:
  188. ```{r}
  189. dt_pred_conc = dt_pred_all %>%
  190. # we add a consecutive counter that will be used to concatenate data:
  191. .[, by = .(classification, id, tITI, class), ":=" (t = seq(1, .N))] %>%
  192. # omit the last TRs in the resting state data:
  193. filter(!(stringr::str_detect(tITI, "rest") & seq_tr > 180)) %>%
  194. # we will focus all further analyses on the one-versus-rest classifier:
  195. filter(classification == "ovr") %>%
  196. setDT(.) %>%
  197. # verify that both resting and sequence trial data are restricted to 180 TRs:
  198. verify(.[, by = .(classification, id, tITI, class, tITI), .(
  199. num_trs = .N)]$num_trs == 180) %>%
  200. setDT(.)
  201. ```
  202. We plot the probabilities of all concatenated sequence trials and the session 1
  203. resting state data (separately for each speed condition):
  204. ```{r, echo=FALSE}
  205. fig_prob_time_seq = ggplot(data = subset(dt_pred_conc, id == example_id), mapping = aes(
  206. x = as.numeric(t) * 1.25, y = as.numeric(probability * 100), group = as.factor(position))) +
  207. geom_line(mapping = aes(color = as.factor(position))) +
  208. facet_wrap(~ as.factor(tITI)) +
  209. theme(legend.position = "top", legend.direction = "horizontal",
  210. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  211. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  212. xlab("Time (TRs)") + ylab("Probability (%)") +
  213. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  214. ylim = c(0, 100), xlim = c(0, 200)) +
  215. theme(panel.border = element_blank(), axis.line = element_line()) +
  216. theme(axis.line = element_line(colour = "black"),
  217. panel.grid.major = element_blank(),
  218. panel.grid.minor = element_blank(),
  219. panel.border = element_blank(),
  220. panel.background = element_blank()) +
  221. scale_colour_manual(name = "Serial event", values = color_events, guide = FALSE) +
  222. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  223. guides(color = guide_legend(nrow = 1))
  224. fig_prob_time_seq
  225. ```
  226. ### Calculating sequentiality
  227. We create a function to determine whether a certain sequential combination of the task stimuli has been presented to the participants on sequence trials or not:
  228. ```{r}
  229. did_you_see_that <- function(seq, dt_events_seq) {
  230. dt_events_seq_sub <- dt_events_seq %>%
  231. #filter(subject == sub) %>%
  232. setDT(.) %>%
  233. # calculate if the input sequence was seen for every trial:
  234. .[, by = .(subject, interval_time, trial), .(
  235. num_events = .N,
  236. seen = as.numeric(all(stim_label[serial_position] == seq))
  237. )] %>%
  238. # check if the number of events per sequence is 5:
  239. verify(num_events == 5) %>%
  240. # the number a seen sequences should be either 0 (not seen) or 5 (seen five)
  241. # time for the five different speed conditions:
  242. verify(.[, by = .(subject), .(sum_seen = sum(seen))]$sum_seen %in% c(0, 5)) %>%
  243. # for each speed condition, the number should be 1 (seen) or 0 (not seen):
  244. verify(.[, by = .(subject, interval_time), .(sum_seen = sum(seen))]$sum_seen %in% c(0, 1))
  245. # return if sequence was seen (1) or not (0):
  246. seen <- as.numeric(sum(dt_events_seq_sub$seen) == 5)
  247. return(seen)
  248. }
  249. ```
  250. We verify that the function works properly:
  251. ```{r, message=FALSE}
  252. sequences = permn(1:5, sort = TRUE)
  253. # list of all possible stimuli:
  254. seq <- c("cat", "chair", "face", "house", "shoe")
  255. # select one example participant:
  256. sub = "sub-14"
  257. # determine if participant saw 15 combinations of the five stimuli:
  258. sum_seq = sum(unlist(lapply(X = sequences, function(x)
  259. did_you_see_that(seq = seq[x], subset(dt_events_seq, subject == sub)))))
  260. verify(data.table(sum_seq), sum_seq == 15)
  261. ```
  262. We calculate the regression slopes for every TR and assumed sequence in resting-state data:
  263. ```{r}
  264. # create an array with all the stimulus labels:
  265. stimuli <- c("cat", "chair", "face", "house", "shoe")
  266. # register the computation start time:
  267. start_time <- Sys.time()
  268. # calculate sd probability, slope for every permutation of a sequence:
  269. dt_pred_conc_slope_rest = dt_pred_conc %>%
  270. # filter for resting state analyses only:
  271. filter(stringr::str_detect(tITI, "rest")) %>%
  272. setDT(.) %>%
  273. # calculate sd probability, slope and if sequence was seen or not:
  274. .[, by = .(id, classification, tITI, trial_tITI, seq_tr), .(
  275. # calculate the number of events per TR and the mean position:
  276. num_events = .N,
  277. mean_position = mean(position),
  278. # calculate the standard deviation of the probabilities:
  279. sd_prob = sd(probability),
  280. # calculate the slope for each permuted sequence:
  281. slope = unlist(lapply(sequences, function(x) coef(lm(probability ~ x))[2] * (-1))),
  282. # determine if the permuted sequence was actually seen by the subject:
  283. seen = unlist(lapply(sequences, function(x)
  284. did_you_see_that(seq = stimuli[x], subset(dt_events_seq, subject == unique(id)))))
  285. )] %>%
  286. # add a counter for all sequential combinations:
  287. .[, by = .(id, classification, tITI, trial_tITI, seq_tr), "sequence" := seq(1,.N)]
  288. # stop the counter and print the calculation time:
  289. end_time = Sys.time()
  290. print(end_time - start_time)
  291. ```
  292. We calculate the regression slopes for sequence data:
  293. ```{r}
  294. dt_pred_conc_slope_seq = dt_pred_conc %>%
  295. # filter for sequence data only:
  296. filter(!stringr::str_detect(tITI, "rest")) %>%
  297. base::droplevels() %>%
  298. setDT(.) %>%
  299. # calculate the slope for every participant, speed and TR:
  300. .[, by = .(id, classification, tITI, trial_tITI, seq_tr), .(
  301. # calculate the number of events per TR and the mean position:
  302. num_events = .N,
  303. mean_position = mean(position),
  304. # calculate the standard deviation of the probabilities:
  305. sd_prob = sd(probability),
  306. # calculate the slope for each permuted sequence:
  307. slope = coef(lm(probability ~ position))[2] * (-1)
  308. )] %>%
  309. # add variables if sequence was seen or not and sequence counter:
  310. .[, "seen" := 1] %>%
  311. .[, "sequence" := 1]
  312. ```
  313. We combine the resting state and sequence trial slope data:
  314. ```{r}
  315. dt_pred_conc_slope = rbind(dt_pred_conc_slope_seq, dt_pred_conc_slope_rest) %>%
  316. # verify that there are 5 events per TR and their mean position is 5:
  317. verify(all(num_events == 5)) %>%
  318. verify(all(mean_position == 3)) %>%
  319. # sort the columns by classification approach, id, speed, trial and TR:
  320. setorder(classification, id, tITI, trial_tITI, seq_tr) %>%
  321. # add trial counter for every speed condition (and sequence):
  322. .[stringr::str_detect(tITI, "rest"),
  323. by = .(classification, id, tITI, sequence), ":=" (t = seq(1, .N))] %>%
  324. .[!stringr::str_detect(tITI, "rest"),
  325. by = .(classification, id, tITI), ":=" (t = seq(1, .N))] %>%
  326. # group by classification, id, speed and trial and calculate step sizes:
  327. .[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI),
  328. trial_start := trial_tITI - shift(trial_tITI)] %>%
  329. # create a variable that signifies the trial starts for sequence trials:
  330. transform(trial_start = ifelse(
  331. is.na(trial_start) & !stringr::str_detect(tITI, "rest"), 1, trial_start)) %>%
  332. # create new variable names for resting state data:
  333. .[tITI == "rest_run-pre_ses-01", tITI := "Rest Pre S1"] %>%
  334. .[tITI == "rest_run-post_ses-01", tITI := "Rest Post S1"] %>%
  335. .[tITI == "rest_run-pre_ses-02", tITI := "Rest Pre S2"] %>%
  336. .[tITI == "rest_run-post_ses-02", tITI := "Rest Post S2"] %>%
  337. # sort the factor levels of the speed conditions:
  338. transform(tITI = factor(tITI, levels = c(
  339. "32 ms", "2048 ms", "Rest Pre S1", "Rest Post S1",
  340. "Rest Pre S2", "Rest Post S2"))) %>%
  341. # rename the factor for seen (1) and unseen (0) sequences:
  342. .[, seen := ifelse(seen == 1, "More frequent", "Less frequent")] %>%
  343. setDT(.)
  344. ```
  345. ### Mean slope coefficients
  346. #### Fixed event order
  347. We calculate the mean slope coefficients assuming a fixed ordering of sequence events for sequence trials:
  348. ```{r}
  349. # calculate the mean slope across participants for each time point:
  350. dt_pred_conc_slope_mean <- dt_pred_conc_slope %>%
  351. filter(sequence == 1) %>%
  352. setDT(.) %>%
  353. .[, by = .(classification, tITI, t, trial_start), .(
  354. num_subs = .N,
  355. mean_slope = mean(slope),
  356. sem_upper = mean(slope) + (sd(slope)/sqrt(.N)),
  357. sem_lower = mean(slope) - (sd(slope)/sqrt(.N))
  358. )] %>%
  359. verify(all(num_subs == 32)) %>%
  360. filter(classification == "ovr") %>%
  361. # sort the factor levels of the speed conditions:
  362. transform(tITI = factor(tITI, levels = c(
  363. "2048 ms", "32 ms", "Rest Pre S1", "Rest Post S1",
  364. "Rest Pre S2", "Rest Post S2"))) %>%
  365. setDT(.)
  366. ```
  367. Plot the mean regression coefficient separately for
  368. all sequence speed conditions and the pre-task resting state data.
  369. Trial boundaries (for sequence trials) are shown as gray vertical lines.
  370. ```{r, echo=FALSE}
  371. grays = colorRampPalette(c("lightgray", "black"))
  372. #colors = c(hcl.colors(5, "Cividis")[c(5,1)], "gray")
  373. colors = c(hcl.colors(5, "Cividis")[c(5,1)], grays(4))
  374. plot_trial_starts = dt_pred_conc_slope_mean %>%
  375. filter(trial_start == 1, !stringr::str_detect(tITI, "rest"))
  376. fig_prob_slope = ggplot(
  377. data = dt_pred_conc_slope_mean %>% filter(tITI != "Rest Post S1"),
  378. mapping = aes(
  379. x = as.numeric(t), y = as.numeric(mean_slope), color = as.factor(tITI),
  380. fill = as.factor(tITI))) +
  381. facet_wrap(~ as.factor(tITI), ncol = 1) +
  382. geom_vline(data = plot_trial_starts, aes(xintercept = t), linetype = "dashed", alpha = 0.1) +
  383. geom_hline(aes(yintercept = 0), linetype = "solid", color = "gray") +
  384. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  385. geom_line() +
  386. xlab("Time (TRs)") + ylab("Regression slope") +
  387. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, xlim = c(1, 200)) +
  388. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  389. scale_color_manual(values = colors, guide = FALSE) +
  390. scale_fill_manual(values = colors, guide = FALSE) +
  391. scale_y_continuous(labels = label_fill(seq(-0.1, 0.1, 0.05), mod = 2),
  392. breaks = seq(-0.1, 0.1, 0.05)) +
  393. theme(panel.border = element_blank(), axis.line = element_line()) +
  394. theme(axis.line = element_line(colour = "black"),
  395. panel.grid.major = element_blank(),
  396. panel.grid.minor = element_blank(),
  397. panel.border = element_blank(),
  398. panel.background = element_blank())
  399. fig_prob_slope
  400. ```
  401. We calculate the mean regression slopes and compare sequence and rest:
  402. ```{r}
  403. # calculate mean regression slopes for fast and slow sequence trials and rest:
  404. dt_pred_conc_slope_stat = dt_pred_conc_slope %>%
  405. filter(sequence == 1) %>%
  406. setDT(.) %>%
  407. .[, by = .(classification, id, tITI), .(
  408. num_trs = .N,
  409. mean_abs_slope = mean(abs(slope)),
  410. abs_mean_slope = abs(mean(slope)),
  411. mean_sd_prob = mean(sd_prob)
  412. )] %>% verify(all(num_trs == 180)) %>%
  413. filter(classification == "ovr") %>%
  414. transform(tITI = factor(tITI, levels = c(
  415. "32 ms", "2048 ms", "Rest Pre S1", "Rest Post S1",
  416. "Rest Pre S2", "Rest Post S2"))) %>%
  417. setDT(.)
  418. ```
  419. ```{r, results=hold}
  420. # compare mean probabilities between resting state and fast sequence data:
  421. mean_sd_rest_pre = subset(dt_pred_conc_slope_stat, tITI == "Rest Pre S1")$mean_sd_prob
  422. mean_sd_rest_post = subset(dt_pred_conc_slope_stat, tITI == "Rest Post S1")$mean_sd_prob
  423. mean_sd_fast = subset(dt_pred_conc_slope_stat, tITI == "32 ms")$mean_sd_prob
  424. mean_sd_slow = subset(dt_pred_conc_slope_stat, tITI == "2048 ms")$mean_sd_prob
  425. fast_vs_rest_pre = t.test(x = mean_sd_rest_pre, y = mean_sd_fast, alternative = "two.sided", paired = TRUE)
  426. fast_vs_rest_post = t.test(x = mean_sd_rest_post, y = mean_sd_fast, alternative = "two.sided", paired = TRUE)
  427. fast_vs_slow = t.test(x = mean_sd_slow, y = mean_sd_fast, alternative = "two.sided", paired = TRUE)
  428. slow_vs_rest_pre = t.test(x = mean_sd_rest_pre, y = mean_sd_slow, alternative = "two.sided", paired = TRUE)
  429. rest_pre_vs_rest_post = t.test(x = mean_sd_rest_pre, y = mean_sd_rest_post, alternative = "two.sided", paired = TRUE)
  430. print(round(c(mean(mean_sd_rest_pre), mean(mean_sd_fast), mean(mean_sd_slow)), 2))
  431. round_pvalues(p.adjust(
  432. c(fast_vs_rest_pre$p.value, fast_vs_slow$p.value, slow_vs_rest_pre$p.value,
  433. rest_pre_vs_rest_post$p.value, fast_vs_rest_post$p.value), method = "fdr"))
  434. cohensd_rest = round((mean(mean_sd_rest_pre) - mean(mean_sd_fast)) / sd(mean_sd_rest_pre - mean_sd_fast), 2)
  435. cohensd_slow = round((mean(mean_sd_slow) - mean(mean_sd_fast)) / sd(mean_sd_slow - mean_sd_fast), 2)
  436. print(cohensd_rest); print(cohensd_slow)
  437. fast_vs_rest_pre; fast_vs_slow
  438. ```
  439. Plot the mean absolute regression slope for all speed conditions
  440. and pre-task resting state data:
  441. ```{r}
  442. plot_means = function(data, variable, ylabel, ylim){
  443. #colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
  444. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(4))
  445. fig_mean_beta = ggplot(
  446. data = data %>% filter(tITI != "Rest Post S1"),
  447. aes(
  448. y = get(variable), x = tITI, fill = tITI)) +
  449. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9)) +
  450. geom_point(position = position_jitter(width = 0.1, height = 0, seed = 2),
  451. alpha = 1, inherit.aes = TRUE, pch = 21,
  452. color = "white") +
  453. stat_summary(fun.data = mean_se, geom = "errorbar",
  454. position = position_dodge(0.9), width = 0, color = "black") +
  455. xlab("Data") + ylab(ylabel) +
  456. coord_capped_cart(left = "both", expand = TRUE, ylim = ylim) +
  457. scale_fill_manual(values = colors, guide = FALSE) +
  458. theme(axis.ticks.x = element_blank(), axis.line.x = element_blank(),
  459. axis.title.x = element_blank()) +
  460. theme(axis.text.x = element_text(angle = 45, hjust = 0.9)) +
  461. theme(panel.border = element_blank(), axis.line.y = element_line()) +
  462. theme(axis.line.y = element_line(colour = "black"),
  463. panel.grid.major = element_blank(),
  464. panel.grid.minor = element_blank(),
  465. panel.background = element_blank())
  466. }
  467. fig_mean_beta = plot_means(data = dt_pred_conc_slope_stat, variable = "abs_mean_slope",
  468. ylabel = "Absolute mean regression slope", ylim = c(0, 0.02))
  469. fig_mean_beta_abs = plot_means(data = dt_pred_conc_slope_stat, variable = "mean_abs_slope",
  470. ylabel = "Regression slope (abs)", ylim = c(0, 0.1))
  471. fig_sd_prob = plot_means(data = dt_pred_conc_slope_stat, variable = "mean_sd_prob",
  472. ylabel = "SD of probability", ylim = c(0, 0.4))
  473. fig_mean_beta; fig_mean_beta_abs; fig_sd_prob;
  474. ```
  475. #### All possible event orders
  476. We calculate the mean slope for every time point (for every TR) depending on
  477. if a sequence was actually seen or not (relevant for resting state data).
  478. Note, that by this definition, all sequences on sequence trials were "seen".
  479. ```{r}
  480. # calculate the mean slope across participants for each time point:
  481. dt_pred_conc_slope_mean_seen <- dt_pred_conc_slope %>%
  482. # average slopes for each participant and each time point:
  483. .[, by = .(id, classification, tITI, t, trial_start, seen), .(
  484. num_seq = .N,
  485. mean_slope_sub = mean(slope)
  486. )] %>%
  487. # number of sequences will be 1 (sequence conditions), 15 (seen permuted
  488. # sequence condition), or 105 (not seen permuted sequence conditions):
  489. verify(num_seq %in% c(1, 15, 105)) %>%
  490. # calculate the mean slope across participants (including SEM):
  491. .[, by = .(classification, tITI, t, trial_start, seen), .(
  492. num_subs = .N,
  493. mean_slope = mean(mean_slope_sub),
  494. sem_upper = mean(mean_slope_sub) + (sd(mean_slope_sub)/sqrt(.N)),
  495. sem_lower = mean(mean_slope_sub) - (sd(mean_slope_sub)/sqrt(.N))
  496. )] %>%
  497. # verify that data was averaged across the correct number of participants:
  498. verify(all(num_subs == 32)) %>%
  499. filter(classification == "ovr") %>%
  500. # sort factor levels of the speed condition:
  501. transform(tITI = factor(tITI, levels = c(
  502. "2048 ms", "32 ms", "Rest Pre S1", "Rest Post S1",
  503. "Rest Pre S2", "Rest Post S2"))) %>%
  504. setDT(.)
  505. ```
  506. ```{r, echo = FALSE}
  507. grays = colorRampPalette(c("lightgray", "black"))
  508. #colors = c(hcl.colors(5, "Cividis")[c(5,1)], "gray")
  509. colors = c(hcl.colors(5, "Cividis")[c(5,1)], grays(4))
  510. plot_trial_starts = dt_pred_conc_slope_mean_seen %>%
  511. filter(trial_start == 1, !stringr::str_detect(tITI, "rest"))
  512. fig_prob_slope_seen = ggplot(data = dt_pred_conc_slope_mean, mapping = aes(
  513. x = as.numeric(t), y = as.numeric(mean_slope), color = as.factor(tITI),
  514. fill = as.factor(tITI))) +
  515. facet_grid(vars(as.factor(tITI)), vars(as.factor(seen))) +
  516. geom_vline(data = plot_trial_starts, aes(xintercept = t), linetype = "dashed", alpha = 0.1) +
  517. geom_hline(aes(yintercept = 0), linetype = "solid", color = "gray") +
  518. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  519. geom_line() +
  520. xlab("Time (TRs)") + ylab("Regression slope") +
  521. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, xlim = c(1, 200)) +
  522. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  523. scale_color_manual(values = colors, guide = FALSE) +
  524. scale_fill_manual(values = colors, guide = FALSE) +
  525. scale_y_continuous(labels = label_fill(seq(-0.1, 0.1, 0.05), mod = 2),
  526. breaks = seq(-0.1, 0.1, 0.05)) +
  527. theme(panel.border = element_blank(), axis.line = element_line()) +
  528. theme(axis.line = element_line(colour = "black"),
  529. panel.grid.major = element_blank(),
  530. panel.grid.minor = element_blank(),
  531. panel.border = element_blank(),
  532. panel.background = element_blank())
  533. fig_prob_slope_seen
  534. ```
  535. We calculate the mean absolute and non-absolute slopes for sequence and rest trials
  536. depending on whether the permuted sequence was actually seen or not:
  537. ```{r}
  538. # calculate mean regression slopes for fast and slow sequence trials and rest:
  539. dt_pred_conc_slope_stat_seen = dt_pred_conc_slope %>%
  540. .[, by = .(classification, id, tITI, seen), .(
  541. num_trs = .N,
  542. mean_slope = mean(slope),
  543. mean_abs_slope = mean(abs(slope)),
  544. abs_mean_slope = abs(mean(slope)),
  545. abs_pos_slope = mean(abs(slope[slope > 0])),
  546. abs_neg_slope = mean(abs(slope[slope < 0])),
  547. mean_sd_prob = mean(sd_prob)
  548. )] %>%
  549. # check if the average slope was calculated across the correct number of TRs
  550. # which should be 180 for sequence trials and 180 times number of sequences
  551. # not seen (105) and 180 times sequences seen (15) in permuted rest data:
  552. verify(all(num_trs %in% c(180, 180 * (120 - 15), 180 * 15))) %>%
  553. filter(classification == "ovr") %>%
  554. setDT(.)
  555. ```
  556. ```{r, echo=FALSE, include=FALSE, eval=FALSE, results=hold}
  557. lme_sd_prob <- lmerTest::lmer(
  558. sd_prob ~ tITI + (tITI | id),
  559. data = dt_pred_conc_slope, control = lcctrl
  560. )
  561. rmarkdown::paged_table(anova(lme_sd_prob))
  562. emmeans_results = emmeans(lme_sd_prob, list(pairwise ~ tITI))
  563. emmeans_results
  564. ```
  565. ```{r, echo=FALSE, include=FALSE, eval=FALSE, results=hold}
  566. lme_rest_sd = lmerTest::lmer(
  567. mean_sd_prob ~ tITI + (1 | id),
  568. data = dt_pred_conc_slope_stat, control = lcctrl)
  569. rmarkdown::paged_table(anova(lme_rest_sd))
  570. emmeans_results = emmeans(lme_rest_sd, list(pairwise ~ tITI))
  571. emmeans_pvalues = round_pvalues(summary(emmeans_results[[2]])$p.value)
  572. emmeans_results
  573. ```
  574. ```{r, echo=FALSE, include=FALSE, eval=FALSE, results=hold}
  575. lme_rest_slope = lmerTest::lmer(
  576. mean_abs_slope ~ tITI + (1 | id),
  577. data = dt_pred_conc_slope_stat, control = lcctrl)
  578. rmarkdown::paged_table(anova(lme_rest_slope))
  579. emmeans_results = emmeans(lme_rest_slope, list(pairwise ~ tITI))
  580. emmeans_pvalues = round_pvalues(summary(emmeans_results[[2]])$p.value)
  581. emmeans_results
  582. ```
  583. ```{r}
  584. plot_means_seen = function(data, variable, ylabel, ylim){
  585. #colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
  586. grays = colorRampPalette(c("lightgray", "black"))
  587. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(4))
  588. fig_mean_beta = ggplot(data = data, aes(
  589. y = get(variable), x = tITI, fill = tITI)) +
  590. facet_wrap(~ seen) +
  591. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9)) +
  592. geom_point(position = position_jitter(width = 0.1, height = 0, seed = 2),
  593. alpha = 1, inherit.aes = TRUE, pch = 21,
  594. color = "white") +
  595. stat_summary(fun.data = mean_se, geom = "errorbar",
  596. position = position_dodge(0.9), width = 0, color = "black") +
  597. xlab("Data") + ylab(ylabel) +
  598. coord_capped_cart(left = "both", expand = TRUE, ylim = ylim) +
  599. scale_fill_manual(values = colors, guide = FALSE) +
  600. theme(axis.ticks.x = element_blank(), axis.line.x = element_blank(),
  601. axis.title.x = element_blank()) +
  602. theme(axis.text.x = element_text(angle = 45, hjust = 0.9)) +
  603. theme(panel.border = element_blank(), axis.line.y = element_line()) +
  604. theme(axis.line.y = element_line(colour = "black"),
  605. panel.grid.major = element_blank(),
  606. panel.grid.minor = element_blank(),
  607. panel.background = element_blank())
  608. }
  609. fig_mean_beta_seen = plot_means_seen(
  610. data = dt_pred_conc_slope_stat_seen, variable = "abs_mean_slope",
  611. ylabel = "Absolute mean regression slope", ylim = c(-0.01, 0.02))
  612. fig_mean_beta_abs_seen = plot_means_seen(
  613. data = dt_pred_conc_slope_stat_seen, variable = "mean_abs_slope",
  614. ylabel = "Mean absolute regression slope", ylim = c(0, 0.1))
  615. fig_sd_prob_seen = plot_means_seen(
  616. data = dt_pred_conc_slope_stat_seen, variable = "mean_sd_prob",
  617. ylabel = "SD of probability", ylim = c(0, 0.4))
  618. fig_mean_pos_abs_seen = plot_means_seen(
  619. data = dt_pred_conc_slope_stat_seen, variable = "abs_pos_slope",
  620. ylabel = "Mean absolute regression slope", ylim = c(0, 0.1))
  621. fig_mean_neg_abs_seen = plot_means_seen(
  622. data = dt_pred_conc_slope_stat_seen, variable = "abs_neg_slope",
  623. ylabel = "Mean absolute regression slope", ylim = c(0, 0.1))
  624. fig_mean_slope_seen = plot_means_seen(
  625. data = dt_pred_conc_slope_stat_seen, variable = "mean_slope",
  626. ylabel = "Absolute mean regression slope", ylim = c(-0.01, 0.02))
  627. fig_mean_beta_seen; fig_mean_beta_abs_seen; fig_sd_prob_seen;
  628. fig_mean_pos_abs_seen; fig_mean_neg_abs_seen; fig_mean_slope_seen
  629. ```
  630. ### Frequency spectrum analyses
  631. #### Fixed event order
  632. We create a data table with frequency expectations:
  633. ```{r}
  634. fitfreq = 1 / 5.26
  635. stim_dur = 0.1
  636. num_seq_stim = 5
  637. # calculate the delta
  638. delta_freq_fast = fitfreq / (1 + fitfreq * (num_seq_stim * stim_dur + (num_seq_stim - 1) * 0.032))
  639. delta_freq_slow = fitfreq / (1 + fitfreq * (num_seq_stim * stim_dur + (num_seq_stim - 1) * 2.048))
  640. delta_freq_rest = 0.01
  641. # create a data table with the expected frequencies:
  642. frequency_expectation = data.table(xintercept = c(
  643. delta_freq_fast, delta_freq_slow, rep(delta_freq_rest, 4)))
  644. ```
  645. We calculate the frequency spectra based on the time courses of regression slope coefficients:
  646. ```{r}
  647. library(lomb)
  648. # create a time vector with random gaps:
  649. time_concat = as.vector(sapply(0:14, FUN = function(x) seq(0, 1*11, by = 1) + x*100 + round(runif(1)*50)))
  650. # get the frequency spectrum of fast and slow trials:
  651. dt_pred_rest_lsp = dt_pred_conc_slope %>%
  652. filter(sequence == 1) %>%
  653. setDT(.) %>%
  654. .[, by = .(classification, id, tITI), {
  655. frequency_spectrum = lsp(x = slope, times = time_concat, from = 0, to = 0.3, plot = FALSE)
  656. list(num_trs = .N, freq_bins = frequency_spectrum$scanned,
  657. filter_width = round(0.01/mean(diff(frequency_spectrum$scanned))),
  658. power = frequency_spectrum$power
  659. )}] %>% verify(all(num_trs == 180)) %>% verify(length(unique(filter_width)) == 1) %>%
  660. # smooth the frequency spectra and frequency bins:
  661. .[, by = .(classification, id, tITI), ":=" (
  662. power_smooth = stats::filter(power, rep(1/unique(filter_width), unique(filter_width))),
  663. freq_bins_smooth = stats::filter(freq_bins, rep(1/unique(filter_width), unique(filter_width)))
  664. )]
  665. ```
  666. ```{r}
  667. # calculate the mean frequency profile across participants:
  668. dt_pred_rest_lsp_mean = dt_pred_rest_lsp %>%
  669. # filter out NA frequency spectrum:
  670. filter(!is.na(power_smooth)) %>%
  671. setDT(.) %>%
  672. # calculate the mean smoothed frequency spectrum across all frequency bins:
  673. .[, by = .(classification, tITI, freq_bins_smooth), .(
  674. num_subs = .N,
  675. mean_spec = mean(power_smooth),
  676. sem_upper = mean(power_smooth) + (sd(power_smooth)/sqrt(.N)),
  677. sem_lower = mean(power_smooth) - (sd(power_smooth)/sqrt(.N))
  678. )] %>%
  679. verify(all(num_subs == 32))
  680. ```
  681. ```{r}
  682. #colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
  683. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(1), "black", grays(2))
  684. fig_freq_lsp = ggplot(
  685. data = dt_pred_rest_lsp_mean %>% filter(tITI != "Rest Post S1"),
  686. aes(
  687. color = tITI, y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
  688. geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
  689. linetype = "dashed", color = colors) +
  690. geom_line() +
  691. geom_ribbon(aes(fill = tITI, ymin = sem_lower, ymax = sem_upper),
  692. alpha = 0.3, color = NA) +
  693. geom_text(data = frequency_expectation, aes(
  694. x = xintercept + 0.02, y = 0.1, label = paste(round(xintercept, 2))),
  695. color = colors) +
  696. xlab("Frequency") + ylab("Power") +
  697. scale_color_manual(name = "", values = colors) +
  698. scale_fill_manual(name = "", values = colors) +
  699. theme(legend.position = "top", legend.direction = "horizontal",
  700. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  701. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  702. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  703. ylim = c(0, 3), xlim = c(0, 0.3)) +
  704. guides(color = guide_legend(nrow = 1)) +
  705. scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
  706. breaks = seq(0, 0.3, by = 0.05)) +
  707. theme(panel.border = element_blank(), axis.line = element_line()) +
  708. theme(axis.line = element_line(colour = "black"),
  709. panel.grid.major = element_blank(),
  710. panel.grid.minor = element_blank(),
  711. panel.border = element_blank(),
  712. panel.background = element_blank())
  713. fig_freq_lsp
  714. ```
  715. #### All possible event orders
  716. We calculate the frequency spectrum for all permuted resting state sequences and
  717. sequence trials:
  718. ```{r}
  719. # create a time vector with random gaps:
  720. time_concat = as.vector(sapply(0:14, FUN = function(x) seq(0, 1*11, by = 1) + x*100 + round(runif(1)*50)))
  721. # get the frequency spectrum of fast and slow trials:
  722. dt_pred_rest_lsp_seen = dt_pred_conc_slope %>%
  723. # calculate the frequency spectrum for each permuted sequence and participant:
  724. .[, by = .(classification, id, tITI, sequence, seen), {
  725. frequency_spectrum = lomb::lsp(
  726. x = slope, times = time_concat, from = 0, to = 0.3, plot = FALSE)
  727. list(num_trs = .N, freq_bins = frequency_spectrum$scanned,
  728. filter_width = round(0.01/mean(diff(frequency_spectrum$scanned))),
  729. power = frequency_spectrum$power
  730. )}] %>%
  731. # the number of TRs for each stretch of activation patterns should be 180:
  732. verify(all(num_trs == 180)) %>%
  733. # there should only be one filtering width per sequence:
  734. verify(length(unique(filter_width)) == 1) %>%
  735. # there should be 120 sequence permutations in the resting condition:
  736. verify(.[, by = .(classification, id, tITI), .(
  737. num_seq = length(unique(sequence)))]$num_seq %in% c(1, 120)) %>%
  738. # smooth the frequency spectra and frequency bins:
  739. .[, by = .(classification, id, tITI, sequence, seen), ":=" (
  740. power_smooth = stats::filter(power, rep(1/unique(filter_width), unique(filter_width))),
  741. freq_bins_smooth = stats::filter(freq_bins, rep(1/unique(filter_width), unique(filter_width)))
  742. )]
  743. ```
  744. We calculate the mean frequency spectra for each participant, classification approach,
  745. speed condition, smoothed frequency bin and if the (permuted) rest sequence
  746. was actually seen during the task or not:
  747. ```{r}
  748. # calculate the mean frequency profile across participants:
  749. dt_pred_rest_lsp_mean_seen = dt_pred_rest_lsp_seen %>%
  750. # filter out NA frequency spectrum:
  751. filter(!is.na(power_smooth)) %>%
  752. setDT(.) %>%
  753. # calculate the mean smoothed frequency spectrum across all frequency bins and sequences:
  754. .[, by = .(classification, id, tITI, freq_bins_smooth), .(
  755. num_seqs = .N,
  756. power_smooth = mean(power_smooth)
  757. )] %>%
  758. # the number of sequences should be 1 for sequence trials, 15 for seen
  759. # permuted rest repetitions and 105 for unseen sequence repetitions:
  760. verify(num_seqs %in% c(1, 120)) %>%
  761. # calculate the mean smoothed frequency spectrum across all frequency bins:
  762. .[, by = .(classification, tITI, freq_bins_smooth), .(
  763. num_subs = .N,
  764. mean_spec = mean(power_smooth),
  765. sem_upper = mean(power_smooth) + (sd(power_smooth)/sqrt(.N)),
  766. sem_lower = mean(power_smooth) - (sd(power_smooth)/sqrt(.N))
  767. )] %>%
  768. # verify if data was averaged across the expected number of participants:
  769. verify(all(num_subs == 32))
  770. ```
  771. ```{r, echo=TRUE}
  772. calc_delta <- function(speed_s) {
  773. # input speed_s: sequence speed (ISI), in seconds
  774. fitfreq = 1 / 5.26
  775. stim_dur = 0.1
  776. num_seq_stim = 5
  777. delta = fitfreq / (1 + fitfreq * (num_seq_stim * stim_dur + (num_seq_stim - 1) * speed_s))
  778. return(delta)
  779. }
  780. # create a data table with the expected frequencies:
  781. frequency_expectation = data.table(xintercept = c(
  782. calc_delta(0.032), calc_delta(2.048), 0.01),
  783. tITI = c("32 ms", "2048 ms", "Rest Pre S1"))
  784. ```
  785. #### Pre vs. post-task resting-state data
  786. ```{r, echo=FALSE}
  787. colors_all = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
  788. colors_rest = c("black", "darkred")
  789. plot_data <- dt_pred_rest_lsp_mean_seen %>%
  790. filter(tITI %in% c("Rest Pre S1", "Rest Post S1"))
  791. fig_freq_post <- ggplot(data = plot_data, aes(
  792. color = tITI, y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
  793. geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
  794. linetype = "dashed", color = colors_all) +
  795. geom_line() +
  796. geom_ribbon(aes(fill = tITI, ymin = sem_lower, ymax = sem_upper),
  797. alpha = 0.3, color = NA) +
  798. geom_text(data = frequency_expectation, aes(
  799. x = xintercept + 0.03, y = 0.1, label = paste(round(xintercept, 2))),
  800. color = colors_all) +
  801. xlab("Frequency") + ylab("Power") +
  802. scale_color_manual(name = "", values = colors_rest) +
  803. scale_fill_manual(name = "", values = colors_rest) +
  804. theme(legend.position = "top", legend.direction = "horizontal",
  805. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  806. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  807. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  808. ylim = c(0, 3), xlim = c(0, 0.3)) +
  809. guides(color = guide_legend(nrow = 1)) +
  810. scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
  811. breaks = seq(0, 0.3, by = 0.05)) +
  812. theme(panel.border = element_blank(), axis.line = element_line()) +
  813. theme(axis.line = element_line(colour = "black"),
  814. panel.grid.major = element_blank(),
  815. panel.grid.minor = element_blank(),
  816. panel.border = element_blank(),
  817. panel.background = element_blank())
  818. fig_freq_post
  819. ```
  820. We calculate the relative power (difference between pre- and post task resting state data) across the entire frequency spectrum:
  821. ```{r}
  822. # calculate the mean frequency profile across participants:
  823. dt_pred_rest_lsp_mean_rel_seen = dt_pred_rest_lsp_seen %>%
  824. filter(tITI %in% c("Rest Pre S1", "Rest Post S1")) %>%
  825. transform(tITI = ifelse(tITI == "Rest Pre S1", "rest_pre_s1", "rest_post_s1")) %>%
  826. # filter out NA frequency spectrum:
  827. filter(!is.na(power_smooth)) %>%
  828. setDT(.) %>%
  829. # calculate the mean smoothed frequency spectrum across all frequency bins and sequences:
  830. .[, by = .(classification, id, tITI, freq_bins_smooth), .(
  831. num_seqs = .N,
  832. power_smooth = mean(power_smooth)
  833. )] %>%
  834. # the number of sequences should be 1 for sequence trials, 15 for seen
  835. # permuted rest repetitions and 105 for unseen sequence repetitions:
  836. verify(num_seqs %in% c(1, 120)) %>%
  837. # transform resting state data from long to wide to calculate relative power:
  838. spread(data = ., key = tITI, value = power_smooth) %>%
  839. mutate(rel_power = rest_post_s1 - rest_pre_s1) %>%
  840. setDT(.) %>%
  841. # calculate the mean smoothed frequency spectrum across all frequency bins:
  842. .[, by = .(classification, freq_bins_smooth), .(
  843. num_subs = .N,
  844. mean_spec = mean(rel_power),
  845. sem_upper = mean(rel_power) + (sd(rel_power)/sqrt(.N)),
  846. sem_lower = mean(rel_power) - (sd(rel_power)/sqrt(.N))
  847. )] %>%
  848. # verify if data was averaged across the expected number of participants:
  849. verify(all(num_subs == 32))
  850. ```
  851. We determine in which frequency range the peak in difference between pre- and post-task rest is:
  852. ```{r}
  853. increase_slow = round(dt_pred_rest_lsp_mean_rel$freq_bins_smooth[which.max(dt_pred_rest_lsp_mean_rel$mean_spec)], 2)
  854. increase_fast = round(dt_pred_rest_lsp_mean_rel$freq_bins_smooth[dt_pred_rest_lsp_mean_rel$freq_bins_smooth > 0.07][which.max(dt_pred_rest_lsp_mean_rel[dt_pred_rest_lsp_mean_rel$freq_bins_smooth > 0.07]$mean_spec)], 2)
  855. increase_slow; increase_fast;
  856. ```
  857. ```{r, echo=FALSE}
  858. colors_all = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
  859. colors_rest = c("black", "darkred")
  860. fig_freq_post_rel <- ggplot(data = dt_pred_rest_lsp_mean_rel_seen, aes(
  861. y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
  862. geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
  863. linetype = "dashed", color = colors_all) +
  864. geom_hline(aes(yintercept = 0)) +
  865. geom_line() +
  866. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper),
  867. alpha = 0.3, color = NA) +
  868. geom_text(data = frequency_expectation, aes(
  869. x = xintercept + 0.03, y = -0.25, label = paste(round(xintercept, 2))),
  870. color = colors_all) +
  871. xlab("Frequency") + ylab("Relative power (Pre vs. Post)") +
  872. scale_color_manual(name = "", values = colors_rest) +
  873. scale_fill_manual(name = "", values = colors_rest) +
  874. theme(legend.position = "top", legend.direction = "horizontal",
  875. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  876. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  877. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  878. xlim = c(0, 0.3)) +
  879. guides(color = guide_legend(nrow = 1)) +
  880. scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
  881. breaks = seq(0, 0.3, by = 0.05)) +
  882. theme(panel.border = element_blank(), axis.line = element_line()) +
  883. theme(axis.line = element_line(colour = "black"),
  884. panel.grid.major = element_blank(),
  885. panel.grid.minor = element_blank(),
  886. panel.border = element_blank(),
  887. panel.background = element_blank())
  888. fig_freq_post_rel
  889. ```
  890. #### Frequency spectra of frequent vs. less frequent sequences
  891. We calculate the mean frequency spectra depending on whether the assumed sequences occured more frequency or less frequently during the task:
  892. ```{r}
  893. # calculate the mean frequency profile across participants:
  894. dt_pred_rest_lsp_mean_seen = dt_pred_rest_lsp_seen %>%
  895. # filter out NA frequency spectrum:
  896. filter(!is.na(power_smooth)) %>%
  897. setDT(.) %>%
  898. # calculate the mean smoothed frequency spectrum across all frequency bins and sequences:
  899. .[, by = .(classification, id, tITI, freq_bins_smooth, seen), .(
  900. num_seqs = .N,
  901. power_smooth = mean(power_smooth)
  902. )] %>%
  903. # the number of sequences should be 1 for sequence trials, 15 for seen
  904. # permuted rest repetitions and 105 for unseen sequence repetitions:
  905. verify(num_seqs %in% c(1, 15, 105)) %>%
  906. # calculate the mean smoothed frequency spectrum across all frequency bins:
  907. .[, by = .(classification, tITI, freq_bins_smooth, seen), .(
  908. num_subs = .N,
  909. mean_spec = mean(power_smooth),
  910. sem_upper = mean(power_smooth) + (sd(power_smooth)/sqrt(.N)),
  911. sem_lower = mean(power_smooth) - (sd(power_smooth)/sqrt(.N))
  912. )] %>%
  913. # verify if data was averaged across the expected number of participants:
  914. verify(all(num_subs == 32))
  915. ```
  916. ```{r, echo=FALSE}
  917. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(1), "black")
  918. fig_freq_lsp_seen = ggplot(data = dt_pred_rest_lsp_mean_seen, aes(
  919. color = tITI, y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
  920. facet_wrap(~ seen) +
  921. geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
  922. linetype = "dashed", color = rep(colors[1:3], 2)) +
  923. geom_line() +
  924. geom_ribbon(aes(fill = tITI, ymin = sem_lower, ymax = sem_upper),
  925. alpha = 0.3, color = NA) +
  926. geom_text(data = frequency_expectation, aes(
  927. x = xintercept + 0.02, y = 0.1, label = paste(round(xintercept, 2))),
  928. color = rep(colors[1:3], 2)) +
  929. xlab("Frequency") + ylab("Power") +
  930. scale_color_manual(name = "", values = colors) +
  931. scale_fill_manual(name = "", values = colors) +
  932. theme(legend.position = "top", legend.direction = "horizontal",
  933. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  934. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  935. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  936. ylim = c(0, 3), xlim = c(0, 0.3)) +
  937. guides(color = guide_legend(nrow = 1)) +
  938. scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
  939. breaks = seq(0, 0.3, by = 0.05)) +
  940. theme(panel.border = element_blank(), axis.line = element_line()) +
  941. theme(axis.line = element_line(colour = "black"),
  942. panel.grid.major = element_blank(),
  943. panel.grid.minor = element_blank(),
  944. panel.border = element_blank(),
  945. panel.background = element_blank())
  946. fig_freq_lsp_seen
  947. ```
  948. #### Relative frequency spectra (fixed order)
  949. We compare the power of the expected frequencies of resting state, fast, and slow
  950. sequence trial data in data from resting state, fast, and slow sequence trial data.
  951. ```{r}
  952. fitfreq = 1 / 5.26
  953. stim_dur = 0.1
  954. num_seq_stim = 5
  955. # calculate the delta
  956. delta_freq_fast = fitfreq / (1 + fitfreq * (num_seq_stim * stim_dur + (num_seq_stim - 1) * 0.032))
  957. delta_freq_slow = fitfreq / (1 + fitfreq * (num_seq_stim * stim_dur + (num_seq_stim - 1) * 2.048))
  958. delta_freq_rest = 0.01
  959. # create a data table with the expected frequencies:
  960. frequency_expectation = data.table(xintercept = c(
  961. delta_freq_fast, delta_freq_slow, rep(delta_freq_rest, 4)))
  962. ```
  963. ```{r}
  964. dt_pred_rest_lsp_rel = dt_pred_rest_lsp %>%
  965. # remove columns that are not needed:
  966. mutate(num_trs = NULL, filter_width = NULL, power = NULL) %>%
  967. # spread the values of the smoothed power for all speed conditions:
  968. spread(key = "tITI", value = "power_smooth") %>%
  969. # calculate relative to power (relative to resting state data):
  970. mutate(power_fast_rel = get("32 ms") / get("Rest Pre S1")) %>%
  971. mutate(power_slow_rel = get("2048 ms") / get("Rest Pre S1")) %>%
  972. setDT(.) %>%
  973. # find the frequency with the highest power for every participant:
  974. .[, by = .(classification, id), .(
  975. num_bins = .N,
  976. max_freq_fast = freq_bins_smooth[which.max(power_fast_rel)],
  977. max_freq_slow = freq_bins_smooth[which.max(power_slow_rel)]
  978. )] %>%
  979. # compare the maximum frequencies of relative fast vs. relative slow data:
  980. .[, by = .(classification), {
  981. ttest_results = t.test(max_freq_fast, max_freq_slow, paired = TRUE)
  982. list(
  983. num_subs = .N,
  984. pvalue = ttest_results$p.value,
  985. tvalue = ttest_results$statistic,
  986. df = ttest_results$parameter
  987. )}] %>%
  988. verify(all(num_subs == 32))
  989. # print the table:
  990. rmarkdown::paged_table(dt_pred_rest_lsp_rel)
  991. ```
  992. ```{r}
  993. dt_pred_rest_lsp_exp = dt_pred_rest_lsp %>%
  994. .[, by = .(classification, id, tITI), .(
  995. power_index_fast = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_fast))],
  996. power_index_slow = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_slow))],
  997. power_index_rest = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_rest))]
  998. )] %>%
  999. # melt all index variables into one column:
  1000. gather(grep("power", names(.), fixed = TRUE), key = "index", value = "power") %>% setDT(.) %>%
  1001. .[grepl("index_fast", index), label := paste0("Fast (", round(delta_freq_fast, 2), ")")] %>%
  1002. .[grepl("index_slow", index), label := paste0("Slow (", round(delta_freq_slow, 2), ")")] %>%
  1003. .[grepl("index_rest", index), label := paste0("Rest (", round(delta_freq_rest, 2), ")")] %>%
  1004. setorder(., classification, id, tITI) %>%
  1005. filter(classification == "ovr") %>%
  1006. setDT(.)
  1007. ```
  1008. ```{r, echo=TRUE}
  1009. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray", "black")
  1010. fig_freq_exp = ggplot(
  1011. data = dt_pred_rest_lsp_exp %>% filter(tITI != "Rest Post S1"),
  1012. aes(y = power, x = fct_rev(label), fill = tITI)) +
  1013. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
  1014. width = 0.8) +
  1015. geom_point(position = position_jitterdodge(
  1016. jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
  1017. alpha = 1, inherit.aes = TRUE, pch = 21,
  1018. color = "white") +
  1019. stat_summary(fun.data = "mean_se", geom = "errorbar",
  1020. position = position_dodge(0.9), width = 0, color = "black") +
  1021. xlab("Predicted frequency") + ylab("Power") +
  1022. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0, 5)) +
  1023. scale_fill_manual(values = colors, name = "") +
  1024. theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
  1025. theme(legend.position = "top", legend.direction = "horizontal",
  1026. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1027. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1028. theme(panel.border = element_blank(), axis.line = element_line()) +
  1029. theme(axis.line = element_line(colour = "black"),
  1030. panel.grid.major = element_blank(),
  1031. panel.grid.minor = element_blank(),
  1032. panel.border = element_blank(),
  1033. panel.background = element_blank())
  1034. fig_freq_exp
  1035. ```
  1036. #### Relative frequency spectra (all orders)
  1037. ```{r}
  1038. dt_pred_rest_lsp_exp_seen = dt_pred_rest_lsp_seen %>%
  1039. .[, by = .(classification, id, tITI, sequence, seen), .(
  1040. power_index_fast = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_fast))],
  1041. power_index_slow = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_slow))],
  1042. power_index_rest = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_rest))]
  1043. )] %>%
  1044. # melt all index variables into one column:
  1045. gather(grep("power", names(.), fixed = TRUE), key = "index", value = "power") %>%
  1046. setDT(.) %>%
  1047. .[grepl("index_fast", index), label := paste0("Fast (", round(delta_freq_fast, 2), ")")] %>%
  1048. .[grepl("index_slow", index), label := paste0("Slow (", round(delta_freq_slow, 2), ")")] %>%
  1049. .[grepl("index_rest", index), label := paste0("Rest (", round(delta_freq_rest, 2), ")")] %>%
  1050. # average across sequences for seen and unseen sequences:
  1051. .[, by = .(classification, id, tITI, seen, index, label), .(
  1052. num_seqs = .N,
  1053. power = mean(power)
  1054. )] %>%
  1055. # check if the number of sequences matches for sequence (1), seen (15) and
  1056. # unseen (105) sequences in permuted resting state sequences:
  1057. verify(num_seqs %in% c(1, 15, 105)) %>%
  1058. setorder(., classification, id, tITI) %>%
  1059. filter(classification == "ovr") %>%
  1060. setDT(.)
  1061. ```
  1062. ```{r}
  1063. colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray", "black")
  1064. fig_freq_exp_seen = ggplot(data = dt_pred_rest_lsp_exp_seen, aes(
  1065. y = power, x = fct_rev(label), fill = tITI)) +
  1066. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
  1067. width = 0.8) +
  1068. geom_point(position = position_jitterdodge(
  1069. jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
  1070. alpha = 1, inherit.aes = TRUE, pch = 21,
  1071. color = "white") +
  1072. facet_wrap(~ seen) +
  1073. stat_summary(fun.data = "mean_se", geom = "errorbar",
  1074. position = position_dodge(0.9), width = 0, color = "black") +
  1075. xlab("Predicted frequency") + ylab("Power") +
  1076. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0, 5)) +
  1077. scale_fill_manual(values = colors, name = "") +
  1078. theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
  1079. theme(legend.position = "top", legend.direction = "horizontal",
  1080. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1081. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1082. theme(panel.border = element_blank(), axis.line = element_line()) +
  1083. theme(axis.line = element_line(colour = "black"),
  1084. panel.grid.major = element_blank(),
  1085. panel.grid.minor = element_blank(),
  1086. panel.border = element_blank(),
  1087. panel.background = element_blank())
  1088. fig_freq_exp_seen
  1089. ```
  1090. We examine the effect of resting-state session (pre- vs. post-task rest) and sequence frequency (less frequent vs. more frequent) on the power in the high frequency range (predicted by our model) indicative of fast sequential reactivation:
  1091. ```{r, results=hold}
  1092. lme_power <- lmerTest::lmer(
  1093. power ~ tITI * seen + (tITI + seen | id),
  1094. data = subset(dt_pred_rest_lsp_exp_seen, index == "power_index_fast" &
  1095. stringr::str_detect(tITI, "Rest")),
  1096. control = lcctrl)
  1097. summary(lme_power)
  1098. anova(lme_power)
  1099. emmeans_results = emmeans(lme_power, list(pairwise ~ tITI | seen))
  1100. emmeans_results
  1101. ```
  1102. ```{r, echo=TRUE, results=hold}
  1103. lme_power <- lmerTest::lmer(
  1104. power ~ tITI + (seen | id),
  1105. data = subset(dt_pred_rest_lsp_exp_seen, index == "power_index_fast" &
  1106. stringr::str_detect(tITI, "Rest")),
  1107. control = lcctrl)
  1108. summary(lme_power)
  1109. anova(lme_power)
  1110. emmeans_results = emmeans(lme_power, list(pairwise ~ tITI))
  1111. emmeans_results
  1112. ```
  1113. ```{r, echo=TRUE, results=hold}
  1114. lme_power <- lmerTest::lmer(
  1115. power ~ seen + (tITI | id),
  1116. data = subset(dt_pred_rest_lsp_exp_seen, index == "power_index_fast" &
  1117. stringr::str_detect(tITI, "Rest")),
  1118. control = lcctrl)
  1119. summary(lme_power)
  1120. anova(lme_power)
  1121. emmeans_results = emmeans(lme_power, list(pairwise ~ seen))
  1122. emmeans_results
  1123. ```
  1124. We compare the power in the fast frequency spectrum between pre- and post-task resting-state data:
  1125. ```{r}
  1126. dt_pred_rest_lsp_exp_seen %>%
  1127. # select resting state data and power in the fast frequency range:
  1128. filter(stringr::str_detect(tITI, "Rest") & index == "power_index_fast") %>%
  1129. group_by(index, seen) %>%
  1130. do(broom::tidy(t.test(formula = power ~ tITI, data = ., paired = TRUE))) %>%
  1131. setDT(.) %>%
  1132. .[, "p.value_adjust" := p.adjust(p.value, method = "fdr")] %>%
  1133. .[, "p.value_round" := round_pvalues(p.value)] %>%
  1134. .[, "p.value_adjust_round" := round_pvalues(p.value_adjust)] %>%
  1135. # check if the degrees-of-freedom are n - 1:
  1136. verify(parameter == 32 - 1) %>%
  1137. rmarkdown::paged_table(.)
  1138. ```
  1139. ```{r}
  1140. plot_data <- dt_pred_rest_lsp_exp_seen %>%
  1141. filter(tITI %in% c("Rest Pre S1", "Rest Post S1")) %>%
  1142. filter(index == "power_index_fast")
  1143. colors = c(brewer.pal(n = 8, name = "RdBu")[6], hcl.colors(5, "Cividis")[c(1)])
  1144. fig_freq_exp_post = ggplot(data = plot_data, aes(
  1145. y = power, x = tITI, fill = seen)) +
  1146. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
  1147. width = 0.8) +
  1148. geom_point(position = position_jitterdodge(
  1149. jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
  1150. alpha = 1, inherit.aes = TRUE, pch = 21,
  1151. color = "white") +
  1152. stat_summary(fun.data = "mean_se", geom = "errorbar",
  1153. position = position_dodge(0.9), width = 0, color = "black") +
  1154. xlab("Resting-state data") + ylab("Power at fast frequency (0.17)") +
  1155. #ggtitle("Predicted fast frequency (0.17 Hz)") +
  1156. theme(plot.title = element_text(hjust = 1)) +
  1157. coord_capped_cart(left = "both", bottom = "both", expand = TRUE) +
  1158. scale_fill_manual(values = colors, name = "Sequences") +
  1159. theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
  1160. theme(legend.position = "top", legend.direction = "horizontal",
  1161. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1162. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1163. guides(fill = guide_legend(title.position = "top", title.hjust = 0.5)) +
  1164. theme(panel.border = element_blank(), axis.line = element_line()) +
  1165. theme(axis.line = element_line(colour = "black"),
  1166. panel.grid.major = element_blank(),
  1167. panel.grid.minor = element_blank(),
  1168. panel.border = element_blank(),
  1169. panel.background = element_blank())
  1170. #theme(legend.title = element_text(size = 10),
  1171. # legend.text = element_text(size = 10))
  1172. fig_freq_exp_post
  1173. ```
  1174. ### Figure 6
  1175. ```{r}
  1176. plot_grid(fig_freq_post, fig_freq_post_rel, fig_freq_exp_post, nrow = 1, ncol = 3,
  1177. labels = c("a", "b", "c"), rel_widths = c(3, 3, 3))
  1178. ```
  1179. ```{r, echo=TRUE, eval=FALSE}
  1180. ggsave(filename = "highspeed_plot_decoding_resting_post.pdf",
  1181. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1182. dpi = "retina", width = 9, height = 3)
  1183. ggsave(filename = "wittkuhn_schuck_figure_6.pdf",
  1184. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1185. dpi = "retina", width = 9, height = 3)
  1186. ```
  1187. ### Inserting sequence trials into rest
  1188. We create a function to sample sequence trials:
  1189. ```{r}
  1190. subseqsample = function(sequence, seq_size = 12, seq_num = 15) {
  1191. # initialize an empty array for the resulting indices:
  1192. index = rep(NA, length(sequence))
  1193. # calculate how much space would be allocated to gaps:
  1194. total_gaps = length(sequence) - seq_size * seq_num
  1195. # calculate random gaps
  1196. random_gaps = diff(sort(c(0, sample(1:(total_gaps + seq_num), seq_num), total_gaps + seq_num + 1)))
  1197. # we subtract 1 for each element of seq_num we added above:
  1198. random_gaps = random_gaps - 1
  1199. # make sure that the sum of random gaps matches the total gaps:
  1200. if (sum(random_gaps) != total_gaps) {warning("sum of gaps does not match!")}
  1201. # generate a random order of subsequences:
  1202. seq_order = sample(1:seq_num, seq_num)
  1203. # initialize the first starting point of potential sequences:
  1204. cstart = 0
  1205. # loop through all subsequences:
  1206. for (cseq in seq(1, seq_num)) {
  1207. # select the first random gap:
  1208. cgap = random_gaps[cseq]
  1209. # take the starting point and add the current random gap:
  1210. cstart = cstart + cgap + 1 # +1 because gap can be 0
  1211. # calculate the current subsequence index:
  1212. cidx = seq(cstart, (cstart + seq_size - 1))
  1213. # insert the random sequence index into the index array:
  1214. index[cidx] = seq_order[cseq]
  1215. # define a new starting position for the next subsequence:
  1216. cstart = max(cidx)
  1217. }
  1218. # return the indices:
  1219. return(index)
  1220. }
  1221. subseqsample(sequence = seq(1, 233))
  1222. ```
  1223. We randomly sample 15 sequences of 12 TRs each from resting state data and
  1224. compare the mean frequency spectrum across those trials between resting state,
  1225. fast and slow sequence trials.
  1226. ```{r}
  1227. dt_pred_rest_freq = dt_pred_conc_slope %>%
  1228. # we deselect post-task resting state data and assume on fixed ordering:
  1229. filter(!(tITI == "Rest Post S1") & sequence == 1) %>%
  1230. # filter for TRs in the forward and backward period only in sequence trials:
  1231. filter(!(tITI %in% c("32 ms", "2048 ms") & !(seq_tr %in% seq(2, 13)))) %>%
  1232. setDT(.) %>%
  1233. # select random chunks of TRs in the resting state data
  1234. .[tITI == "Rest Pre S1", by = .(classification, id), ":=" (trial_tITI = subseqsample(seq_tr))] %>%
  1235. # filter out all NA trials on resting state data
  1236. filter(!(is.na(trial_tITI) & tITI == "Rest Pre S1")) %>%
  1237. setDT(.) %>%
  1238. # assert that all the three conditions (two sequence trials and rest data) are present:
  1239. assert(in_set("32 ms", "2048 ms", "Rest Pre S1"), tITI) %>%
  1240. # calculate the frequency spectrum for every
  1241. .[, by = .(classification, id, tITI, trial_tITI), {
  1242. frequency_spectrum = stats::spectrum(
  1243. x = scale(slope), plot = FALSE, detrend = TRUE, demean = FALSE, spans = 5)
  1244. list(
  1245. num_trs = .N,
  1246. freq_bins = frequency_spectrum$freq,
  1247. spectrum = frequency_spectrum$spec
  1248. )}] %>% verify(all(num_trs %in% c(12, 6))) %>%
  1249. # normalize the frequencies per participant and trial to remove mean differences:
  1250. .[, by = .(classification, id, tITI, trial_tITI), ":=" (spectrum = spectrum/sum(spectrum))] %>%
  1251. # multiply the frequency levels by 1/TR:
  1252. transform(freq_bins = freq_bins * (1/1.25)) %>% setDT(.) %>%
  1253. # calculate the mean frequency spectrum per participant and condition:
  1254. .[, by = .(classification, id, tITI, freq_bins), .(
  1255. num_trials = .N,
  1256. mean_spec = mean(spectrum)
  1257. )] %>% verify(all(num_trials == 15)) %>%
  1258. # calculate the average frequency spectrum across all participants:
  1259. .[, by = .(classification, tITI, freq_bins), .(
  1260. num_subs = .N,
  1261. mean_spec = mean(mean_spec),
  1262. sem_upper = mean(mean_spec) + (sd(mean_spec)/sqrt(.N)),
  1263. sem_lower = mean(mean_spec) - (sd(mean_spec)/sqrt(.N))
  1264. )] %>% verify(all(num_subs == 32))
  1265. ```
  1266. We insert a number of events with probabilities scaled at different SNR levels:
  1267. ```{r}
  1268. dt_pred_conc = dt_pred_all %>%
  1269. # we add a consecutive counter that will be used to concatenate data:
  1270. .[, by = .(classification, id, tITI, class), ":=" (t = seq(1, .N))] %>%
  1271. # we will focus all further analyses on the one-versus-rest classifier:
  1272. filter(classification == "ovr") %>% setDT(.)
  1273. # create separate dataframes for resting state data and sequence trial data:
  1274. dt_pred_rest_s1 = subset(dt_pred_conc, tITI == "rest_run-pre_ses-01")
  1275. dt_pred_seq_cut = subset(dt_pred_conc, tITI %in% c("32 ms", "2048 ms"))
  1276. speed_levels = unique(dt_pred_seq_cut$tITI)
  1277. num_speeds = length(speed_levels)
  1278. # define the number of SNR levels and sequence inserts:
  1279. snr_levels = c(4/5, 1/2, 1/4, 1/8, 0)
  1280. snr_levels_char = c("4/5", "1/2", "1/4", "1/8", "0")
  1281. num_snrs = length(snr_levels)
  1282. num_inserts = 6
  1283. insert_length = 12
  1284. # get the number of TRs during resting state and warn if incorrect:
  1285. num_rest_trs = max(dt_pred_rest_s1$t)
  1286. if (num_rest_trs != 233) {warning("number of resting state TRs incorrect")}
  1287. num_seq_trials = max(dt_pred_seq_cut$trial_tITI)
  1288. if (num_seq_trials != 15) {warning("number of sequence trials incorrect")}
  1289. num_subs = length(unique(dt_pred_rest_s1$id))
  1290. if (num_subs != 32) {warning("number of participants incorrect")}
  1291. # create a vector of seeds to make computations reproducible:
  1292. #seeds = seq(1, num_snrs * num_inserts * num_subs * num_speeds)
  1293. dt_pred_rest_insert <- list()
  1294. count = 1
  1295. for (sub in unique(dt_pred_rest_s1$id)) {
  1296. # sample random sequence trials to be cut out depending on the number of inserts:
  1297. cutout_trials = sort(sample(1:num_seq_trials, num_inserts, replace = FALSE))
  1298. # sample random insert sequences depending on the number of inserts (no overlap):
  1299. rest_seq = subseqsample(sequence = seq(1, num_rest_trs), seq_size = insert_length, seq_num = num_inserts * 2)
  1300. subsequence_starts = sort(sapply(X = seq(1, num_inserts * 2), FUN = function(x){which(rest_seq == x)[1]}))
  1301. insert_starts = sort(sample(subsequence_starts, num_inserts, replace = FALSE))
  1302. insert_stops = sort(insert_starts + insert_length - 1)
  1303. blend_starts = sort(setdiff(subsequence_starts, insert_starts))
  1304. blend_stops = sort(blend_starts + insert_length - 1)
  1305. for (ninserts in seq(1, num_inserts)) {
  1306. snr_index = 0
  1307. for (snr in snr_levels) {
  1308. snr_index = snr_index + 1
  1309. for (speed in speed_levels) {
  1310. # create a resting state data subset for the current settings:
  1311. dt_pred_rest_tmp = dt_pred_rest_s1 %>%
  1312. filter(id == sub) %>% setorder(classification, id, t, class) %>%
  1313. mutate(snr = snr) %>% mutate(insert = ninserts) %>% mutate(position_insert = position) %>%
  1314. mutate(speed = speed) %>% mutate(snr_char = snr_levels_char[snr_index]) %>%
  1315. mutate(prob_insert = probability) %>% mutate(prob_insert_blend = probability) %>%
  1316. mutate(insert_index = NA) %>% mutate(insert_start = NA) %>% setDT(.) %>%
  1317. verify(.[, by = .(classification, id, position), .(num_trs = .N)]$num_trs == num_rest_trs)
  1318. # loop through all events to cutout and insert fast sequences:
  1319. for (cevent in 1:ninserts) {
  1320. # create the sequence for sequence inserting:
  1321. insert_seq = seq(insert_starts[cevent], insert_stops[cevent])
  1322. # create the sequence for the blending rest trials:
  1323. blend_seq = seq(blend_starts[cevent], blend_stops[cevent])
  1324. # select the sequence from fast trials:
  1325. dt_pred_seq_cutout = dt_pred_seq_cut %>%
  1326. # filter for specififc participant and specific trial:
  1327. filter(id == sub & trial_tITI == cutout_trials[cevent]) %>%
  1328. # filter for specific speed and specific trs:
  1329. filter(tITI == speed & seq_tr %in% seq(2, insert_length + 1)) %>%
  1330. # sort by classification, id, timepoint and class (as rest data, see above):
  1331. setorder(classification, id, t, class) %>%
  1332. # multiply the probability with the current SNR level:
  1333. mutate(prob_snr = probability * snr)
  1334. # insert the snr-adjusted sequence trial probabilities:
  1335. dt_pred_rest_tmp$prob_insert[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$prob_snr
  1336. # keep the position indices of the inserted sequence trials:
  1337. dt_pred_rest_tmp$position_insert[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$position
  1338. # blend the snr-adjusted sequence trials with data from snr-adjusted res trials:
  1339. dt_pred_rest_tmp$prob_insert_blend[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$prob_snr +
  1340. dt_pred_rest_tmp$probability[dt_pred_rest_tmp$t %in% blend_seq] * (1 - snr)
  1341. # create a new column with the (SNR-adjusted) sequence probabilities:
  1342. dt_pred_rest_tmp$prob_seq[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$probability
  1343. dt_pred_rest_tmp$prob_seq_snr[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$prob_snr
  1344. # include indices that indicate where data was inserted:
  1345. dt_pred_rest_tmp$insert_index[dt_pred_rest_tmp$t %in% insert_seq] = "inserted"
  1346. dt_pred_rest_tmp$insert_num[dt_pred_rest_tmp$t %in% insert_seq] = cevent
  1347. dt_pred_rest_tmp$insert_start[dt_pred_rest_tmp$t %in% min(insert_seq)] = 1
  1348. }
  1349. dt_pred_rest_insert[[count]] = dt_pred_rest_tmp
  1350. count = count + 1
  1351. }
  1352. }
  1353. }
  1354. }
  1355. # combine data in one datatable:
  1356. dt_pred_rest_insert <- do.call("rbind", dt_pred_rest_insert)
  1357. # calculate the differences in probability:
  1358. dt_pred_rest_insert$prob_diff = dt_pred_rest_insert$probability - dt_pred_rest_insert$prob_insert_blend
  1359. ```
  1360. Plot differences between inserted sequence speeds at different SNR levels:
  1361. ```{r}
  1362. cols_order = colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5)
  1363. plot_data = subset(dt_pred_rest_insert, id == example_id) %>%
  1364. filter(insert_index == "inserted") %>% filter(speed == "32 ms") %>%
  1365. filter(insert == 1) %>%
  1366. transform(snr_char = factor(as.factor(snr_char), levels = c("4/5", "1/2", "1/4", "1/8", "0"))) %>% setDT(.)
  1367. fig_prob_diff_snr = ggplot(data = plot_data, aes(
  1368. x = as.factor(t), y = prob_seq_snr, color = snr_char, group = snr_char)) +
  1369. geom_line() + geom_point(alpha = 0.3) +
  1370. facet_wrap(~ class) + ylim(c(0,1)) +
  1371. scale_color_manual(values = cols_order, name = "SNR level") +
  1372. ylab("Probability (%)") + xlab("Time (TRs)") +
  1373. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1374. coord_capped_cart(left = "both", bottom = "both", expand = TRUE) +
  1375. theme(legend.position = c(1, 0), legend.justification = c(1, 0)) +
  1376. scale_x_discrete(labels = label_fill(seq(1, 12, 1), mod = 11),
  1377. breaks = seq(min(plot_data$t), max(plot_data$t), 1)) +
  1378. theme(panel.border = element_blank(), axis.line = element_line()) +
  1379. theme(axis.line = element_line(colour = "black"),
  1380. panel.grid.major = element_blank(),
  1381. panel.grid.minor = element_blank(),
  1382. panel.border = element_blank(),
  1383. panel.background = element_blank())
  1384. fig_prob_diff_snr
  1385. ```
  1386. Plot differences in probability between true and augmented rest data.
  1387. ```{r}
  1388. plot_data = subset(dt_pred_rest_insert, id == example_id & speed == "32 ms") %>%
  1389. filter(snr_char %in% c("4/5", "1/4") & insert == c(2, 6)) %>%
  1390. mutate(snr_char = paste("SNR level:", snr_char)) %>%
  1391. mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts")))
  1392. # create data frame to plot rectangles where trials were inserted:
  1393. plot_rectangles = plot_data %>%
  1394. filter(insert_start == 1) %>%
  1395. mutate(xmin = t) %>%
  1396. mutate(xmax = ifelse(speed == "32 ms", xmin + 11, xmin + 11)) %>%
  1397. mutate(ymin = -100, ymax = 100)
  1398. fig_inserts = ggplot(data = plot_data, mapping = aes(
  1399. x = as.numeric(t), y = as.numeric(prob_diff * 100), group = as.factor(position))) +
  1400. geom_hline(aes(yintercept = 0), color = "gray") +
  1401. geom_line(mapping = aes(color = as.factor(position))) +
  1402. facet_grid(vars(as.factor(insert)), vars(fct_rev(as.factor(snr_char)))) +
  1403. geom_rect(data = plot_rectangles, aes(
  1404. xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax),
  1405. alpha = 0.2, inherit.aes = FALSE, show.legend = FALSE, fill = "gray") +
  1406. theme(legend.position = "top", legend.direction = "horizontal",
  1407. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  1408. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  1409. xlab("Time (TRs)") + ylab("Difference in probability (%)\nof sequence-free vs. -inserted rest") +
  1410. scale_colour_manual(name = "Serial position", values = color_events) +
  1411. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  1412. xlim = c(0, 250), ylim = c(-100, 100)) +
  1413. guides(color = guide_legend(nrow = 1)) +
  1414. scale_x_continuous(labels = label_fill(seq(0, 250, 25), mod = 5),
  1415. breaks = seq(0, 250, 25)) +
  1416. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1417. theme(panel.border = element_blank(), axis.line = element_line()) +
  1418. theme(axis.line = element_line(colour = "black"),
  1419. panel.grid.major = element_blank(),
  1420. panel.grid.minor = element_blank(),
  1421. panel.border = element_blank(),
  1422. panel.background = element_blank())
  1423. fig_inserts
  1424. ```
  1425. We calculate the slopes for each speed, snr and insert level.
  1426. Attention: This takes some time to run because a lot of linear models are estimated...
  1427. About 13 minutes, OMG.
  1428. ```{r}
  1429. dt_pred_rest_insert %>%
  1430. verify(.[position != position_insert,]$insert_index == "inserted")
  1431. ```
  1432. ```{r}
  1433. start_time = Sys.time()
  1434. # calculate the slope for each inserted and snr-adjusted rest data segment:
  1435. dt_pred_rest_insert_slope = dt_pred_rest_insert %>% setDT(.) %>%
  1436. # calculate the regression slopes for each id, trial, speed, snr, insert and TR:
  1437. .[, by = .(id, classification, speed, trial_tITI, insert, snr, snr_char, t, insert_num), .(
  1438. num_events = .N,
  1439. mean_position = mean(position),
  1440. mean_position_insert = mean(position_insert),
  1441. slope_rest = coef(lm(probability ~ position))[2] * (-1),
  1442. slope_insert = coef(lm(prob_insert_blend ~ position_insert))[2] * (-1),
  1443. sd_prob_rest = sd(probability),
  1444. sd_prob_insert = sd(prob_insert_blend)
  1445. # verify that the slope was calculated across five events with mean position of 3:
  1446. )] %>% verify(all(num_events == 5)) %>% verify(all(mean_position == 3)) %>%
  1447. verify(all(mean_position_insert == 3)) %>%
  1448. # sort the data table by speed, trial, number of inserts, snr level and time:
  1449. setorder(id, speed, trial_tITI, insert, snr, snr_char, t) %>%
  1450. # filter for one-versus-rest classification only:
  1451. filter(classification == "ovr") %>% setDT(.)
  1452. end_time = Sys.time()
  1453. print(end_time - start_time)
  1454. ```
  1455. Plot slopes with inserts:
  1456. ```{r}
  1457. colors = c(hcl.colors(5, "Cividis")[c(5,1)], "gray")
  1458. plot_rest = subset(dt_pred_rest_insert_slope, id == example_id) %>%
  1459. filter(snr_char %in% c("0", "4/5") & insert == c(6)) %>%
  1460. mutate(snr_char = paste("SNR level:", snr_char)) %>%
  1461. mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts")))
  1462. # create data frame to plot rectangles where trials were inserted:
  1463. plot_rectangles = plot_rest %>%
  1464. filter(!is.na(insert_num)) %>% setDT(.) %>%
  1465. .[, by = .(id, classification, speed, insert, snr, snr_char, insert_num), .(
  1466. xmin = min(t), xmax = max(t), ymin = -0.25, ymax = 0.25
  1467. )]
  1468. plot_inserts = plot_rest %>%
  1469. filter(!is.na(insert_num)) %>% setDT(.)
  1470. # plot the
  1471. fig_inserts_slopes = ggplot(data = plot_rest, mapping = aes(
  1472. x = as.numeric(t), y = as.numeric(slope_rest), color = fct_rev(speed))) +
  1473. geom_hline(aes(yintercept = 0), color = "black") +
  1474. geom_line(aes(color = "rest")) +
  1475. facet_grid(vars(as.factor(insert)), vars(as.factor(snr_char))) +
  1476. #facet_wrap(~ speed, nrow = 2, ncol = 1) +
  1477. facet_grid(vars(fct_rev(as.factor(speed))), vars(fct_rev(as.factor(snr_char)))) +
  1478. geom_rect(data = plot_rectangles, aes(
  1479. xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax, fill = speed),
  1480. alpha = 0.2, inherit.aes = FALSE, show.legend = FALSE) +
  1481. geom_line(data = plot_inserts, aes(
  1482. y = slope_insert, color = speed, group = interaction(insert_num, speed))) +
  1483. theme(legend.position = "top", legend.direction = "horizontal",
  1484. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  1485. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  1486. xlab("Time (TRs)") + ylab("Regression slope") +
  1487. scale_colour_manual(name = "Speed", values = colors) +
  1488. scale_fill_manual(name = "Speed", values = colors) +
  1489. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, xlim = c(0, 250)) +
  1490. guides(color = guide_legend(nrow = 1)) +
  1491. scale_x_continuous(labels = label_fill(seq(0, 250, 25), mod = 5), breaks = seq(0, 250, 25)) +
  1492. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1493. theme(axis.line = element_line(colour = "black"),
  1494. panel.grid.major = element_blank(),
  1495. panel.grid.minor = element_blank(),
  1496. panel.border = element_blank(),
  1497. panel.background = element_blank())
  1498. fig_inserts_slopes
  1499. ```
  1500. ```{r}
  1501. # select variable: either slopes or standard deviation:
  1502. insert_stat_func = function(dt, variable) {
  1503. dt_pred_rest_insert_slope_stat = dt %>%
  1504. # calculate mean slope coefficients across all augmented rest TRs:
  1505. .[, by = .(id, classification, speed, insert, snr, snr_char), .(
  1506. num_trs = .N,
  1507. mean_slope_rest = mean(abs(slope_rest)),
  1508. mean_slope_insert = mean(abs(slope_insert)),
  1509. mean_sd_prob_rest = mean(sd_prob_rest),
  1510. mean_sd_prob_insert = mean(sd_prob_insert)
  1511. )] %>% verify(all(num_trs == num_rest_trs)) %>%
  1512. # calculate the difference between the inserted and sequence-free rest data:
  1513. mutate(mean_slope_diff = mean_slope_insert - mean_slope_rest) %>%
  1514. mutate(mean_sd_diff = mean_sd_prob_insert - mean_sd_prob_rest) %>% setDT(.) %>%
  1515. # concuct a t-test comparing sequence-free vs. sequence-including rests:
  1516. .[, by = .(classification, speed, insert, snr, snr_char), {
  1517. ttest_results = t.test(get(variable), mu = 0, alternative = "two.sided")
  1518. list(
  1519. num_subs = .N,
  1520. mean_variable_diff = mean(get(variable)),
  1521. pvalue = ttest_results$p.value,
  1522. tvalue = round(ttest_results$statistic, 2),
  1523. df = ttest_results$parameter,
  1524. sem_upper = mean(get(variable)) + (sd(get(variable))/sqrt(.N)),
  1525. sem_lower = mean(get(variable)) - (sd(get(variable))/sqrt(.N))
  1526. )}] %>% verify(all(num_subs == 32)) %>% verify(all((num_subs - df) == 1)) %>%
  1527. # adjust p-values for multiple comparisons:
  1528. # check if the number of comparisons matches expectations:
  1529. .[, by = .(classification, speed), ":=" (
  1530. num_comp = .N,
  1531. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  1532. )] %>% verify(all(num_comp == 30, na.rm = TRUE)) %>%
  1533. # round the original p-values according to the apa standard:
  1534. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  1535. # round the adjusted p-value:
  1536. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  1537. # create new variable indicating significance below 0.05
  1538. mutate(significance = ifelse(pvalue_adjust < 0.05, "*", "")) %>%
  1539. # log-transform the p-values:
  1540. mutate(pvalue_log = log(pvalue_adjust, base = 20)) %>%
  1541. # adjust the ordering of the snr levels:
  1542. transform(snr_char = factor(snr_char, levels = c("0", "1/8", "1/4", "1/2", "4/5"))) %>%
  1543. # set order:
  1544. setorder(classification, speed, insert, snr) %>% setDT(.)
  1545. return(dt_pred_rest_insert_slope_stat)
  1546. }
  1547. snr_insert_stat_slope = insert_stat_func(dt = dt_pred_rest_insert_slope, variable = "mean_slope_diff")
  1548. snr_insert_stat_sd = insert_stat_func(dt = dt_pred_rest_insert_slope, variable = "mean_sd_diff")
  1549. snr_insert_stat_slope %>% filter(pvalue_adjust < 0.05)
  1550. snr_insert_stat_sd %>% filter(pvalue_adjust < 0.05)
  1551. ```
  1552. Plot average slope regression coefficients and p-values:
  1553. ```{r}
  1554. cols_order = rev(colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5))
  1555. plot_snr_insert = function(dt, variable) {
  1556. if (variable == "pvalue_log") {
  1557. yrange = seq(-10, 0, 1)
  1558. ylabel = "p-value\n(base-20 log-transformed)"
  1559. yintercept = -1
  1560. mod = 2
  1561. dt$sem_upper = 10; dt$sem_lower = 10;
  1562. }
  1563. else if (variable == "mean_slope_diff") {
  1564. variable = "mean_variable_diff"
  1565. yrange = seq(-0.007, 0.003, 0.001)
  1566. mod = 2
  1567. ylabel = "Regression slope\n(relative to pre-task rest)"
  1568. yintercept = 0
  1569. }
  1570. else if (variable == "mean_sd_diff") {
  1571. variable = "mean_variable_diff"
  1572. yrange = seq(-0.03, 0.01, 0.01)
  1573. mod = 1
  1574. ylabel = "SD of probability\n(relative to pre-task rest)"
  1575. yintercept = 0
  1576. }
  1577. ggplot(data = dt, mapping = aes(
  1578. x = as.factor(insert), y = as.numeric(get(variable)), group = as.factor(snr_char),
  1579. color = as.factor(snr_char), fill = as.factor(snr_char))) +
  1580. facet_wrap(~ fct_rev(as.factor(speed))) +
  1581. geom_hline(aes(yintercept = yintercept), color = "black", linetype = "dashed") +
  1582. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
  1583. geom_line() + geom_point(pch = 15) +
  1584. theme(legend.position = "top", legend.direction = "horizontal",
  1585. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  1586. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  1587. xlab("Number of inserted sequence trials") + ylab(ylabel) +
  1588. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(min(yrange), max(yrange))) +
  1589. guides(color = guide_legend(nrow = 1)) +
  1590. scale_color_manual(values = cols_order, name = "SNR level") +
  1591. scale_fill_manual(values = cols_order, name = "SNR level") +
  1592. scale_y_continuous(labels = label_fill(yrange, mod = mod), breaks = yrange) +
  1593. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1594. theme(axis.line = element_line(colour = "black"),
  1595. panel.grid.major = element_blank(),
  1596. panel.grid.minor = element_blank(),
  1597. panel.border = element_blank(),
  1598. panel.background = element_blank())
  1599. }
  1600. fig_snr_pmat_slope = plot_snr_insert(dt = snr_insert_stat_slope, variable = "pvalue_log")
  1601. fig_snr_data_slope = plot_snr_insert(dt = snr_insert_stat_slope, variable = "mean_slope_diff")
  1602. fig_snr_pmat_sd = plot_snr_insert(dt = snr_insert_stat_sd, variable = "pvalue_log")
  1603. fig_snr_data_sd = plot_snr_insert(dt = snr_insert_stat_sd, variable = "mean_sd_diff")
  1604. fig_snr_pmat_slope; fig_snr_data_slope; fig_snr_pmat_sd; fig_snr_data_sd;
  1605. ```
  1606. # Frequency analysis
  1607. We calculate the frequency spectra for sequence-free and sequence-inserted
  1608. (fast and slow sequences) rest:
  1609. ```{r}
  1610. baseline = 1
  1611. # get frequency spectrum of fast and slow sequence data:
  1612. dt_pred_rest_insert_freq = dt_pred_rest_insert_slope %>%
  1613. # calculate the lomb scargle frequency spectra for every snr and insert level
  1614. .[, by = .(classification, id, speed, insert, snr, snr_char), {
  1615. frequency_spectrum = lsp(x = slope_insert, times = 1:.N, from = 0, to = 0.3, plot = FALSE, ofac = 2)
  1616. list(num_trs = .N, freq_bins = frequency_spectrum$scanned, power = frequency_spectrum$power,
  1617. filter_width = round(0.05/mean(diff(frequency_spectrum$scanned)))
  1618. )}] %>% verify(all(num_trs == 233)) %>% verify(length(unique(filter_width)) == 1) %>%
  1619. # smooth the frequency spectra:
  1620. .[, by = .(classification, id, speed, insert, snr, snr_char), ":=" (
  1621. power_smooth = stats::filter(power, rep(1/unique(filter_width), unique(filter_width))),
  1622. freq_bins_smooth = stats::filter(freq_bins, rep(1/unique(filter_width), unique(filter_width)))
  1623. )] %>%
  1624. # add a counter for the frequency spectrum
  1625. .[, by = .(classification, id, speed, snr, snr_char, insert), ":=" (bin = 1:.N)] %>%
  1626. # verify that the number of frequency bins is the same for all participants, snr, and insert levels:
  1627. verify(length(unique(.[, by = .(classification, id, speed, insert, snr), .(
  1628. num_bins = length(unique(bin)))]$num_bins)) == 1) %>%
  1629. # calculate power of every snr level relative to the snr 0 level:
  1630. .[, by = .(classification, id, speed, insert), ":=" (
  1631. power_smooth_rel = as.vector(sapply(X = sort(unique(snr)), FUN = function(x){
  1632. power_smooth[snr == x]/power_smooth[snr == 0]}) - baseline)
  1633. )]
  1634. # calculate the mean smoothed frequency for every snr and insert level across frequency bins:
  1635. dt_pred_rest_insert_freq_mean = dt_pred_rest_insert_freq %>%
  1636. .[, by = .(classification, speed, snr, snr_char, insert, freq_bins_smooth, bin), .(
  1637. num_subs = .N,
  1638. mean_power_smooth_rel = mean(power_smooth_rel),
  1639. sem_upper = mean(power_smooth_rel) + (sd(power_smooth_rel)/sqrt(.N)),
  1640. sem_lower = mean(power_smooth_rel) - (sd(power_smooth_rel)/sqrt(.N))
  1641. )] %>% verify(all(num_subs == 32))
  1642. ```
  1643. ```{r}
  1644. colors_snr = rev(colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5))
  1645. plot_data = dt_pred_rest_insert_freq_mean %>%
  1646. filter(!is.na(mean_power_smooth_rel)) %>%
  1647. filter(insert %in% c(2, 6)) %>%
  1648. mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts"))) %>%
  1649. # adjust the ordering of the snr levels:
  1650. transform(snr_char = factor(snr_char, levels = c("0", "1/8", "1/4", "1/2", "4/5"))) %>%
  1651. setDT(.)
  1652. freq_range = 0.01
  1653. num_plot_inserts = length(unique(plot_data$insert))
  1654. num_speed_inserts = length(unique(plot_data$speed))
  1655. frequency_expectation <- frequency_expectation[1:3,]
  1656. frequency_expectation_snr = frequency_expectation[1:3,] %>%
  1657. mutate(colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")) %>%
  1658. .[rep(seq_len(nrow(.)), each = num_plot_inserts * num_speed_inserts), ] %>%
  1659. mutate(xmax = xintercept + freq_range, xmin = xintercept - freq_range) %>%
  1660. mutate(speed = rep(unique(plot_data$speed), num_plot_inserts * nrow(frequency_expectation))) %>%
  1661. mutate(insert = rep(rep(unique(plot_data$insert), each = 2), nrow(frequency_expectation))) %>%
  1662. transform(xintercept = ifelse(colors == "gray", 0.03, xintercept)) %>%
  1663. filter(colors != "gray")
  1664. fig_freq_lsp_insert = ggplot(data = plot_data, aes(
  1665. color = as.factor(snr_char), y = as.numeric(mean_power_smooth_rel), x = as.numeric(freq_bins_smooth))) +
  1666. geom_rect(data = frequency_expectation_snr, aes(
  1667. xmin = xmin, xmax = xmax, ymin = -0.2, ymax = 0.3),
  1668. alpha = 0.1, inherit.aes = FALSE, show.legend = FALSE, fill = frequency_expectation_snr$colors) +
  1669. #geom_vline(data = frequency_expectation_snr, aes(xintercept = xintercept),
  1670. # linetype = "dashed", color = frequency_expectation_snr$colors) +
  1671. geom_line() +
  1672. facet_grid(vars(insert), vars(fct_rev(as.factor(speed)))) +
  1673. geom_ribbon(aes(fill = snr_char, ymin = sem_lower, ymax = sem_upper),
  1674. alpha = 0.3, color = NA) +
  1675. #geom_text(data = frequency_expectation_snr, aes(
  1676. # x = xintercept + 0.03, y = -0.15, label = paste(round(xintercept, 2), "Hz")),
  1677. # color = frequency_expectation_snr$colors, size = 3) +
  1678. xlab("Frequency") + ylab("Power\n(relative to pre-task rest)") +
  1679. scale_color_manual(name = "SNR level", values = colors_snr, guide = FALSE) +
  1680. scale_fill_manual(name = "SNR level", values = colors_snr, guide = FALSE) +
  1681. theme(legend.position = "top", legend.direction = "horizontal",
  1682. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  1683. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  1684. coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
  1685. xlim = c(0, 0.3), ylim = c(-0.2, 0.3)) +
  1686. #guides(color = guide_legend(nrow = 1)) +
  1687. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1688. theme(axis.line = element_line(colour = "black"),
  1689. panel.grid.major = element_blank(),
  1690. panel.grid.minor = element_blank(),
  1691. panel.border = element_blank(),
  1692. panel.background = element_blank())
  1693. ```
  1694. ```{r}
  1695. dt_pred_rest_lsp_insert_exp = dt_pred_rest_insert_freq %>%
  1696. .[, by = .(classification, id, speed, snr, snr_char, insert), {
  1697. fast_min = which.min(abs(freq_bins_smooth - delta_freq_fast + freq_range))
  1698. fast_max = which.min(abs(freq_bins_smooth - delta_freq_fast - freq_range))
  1699. slow_min = which.min(abs(freq_bins_smooth - delta_freq_slow + freq_range))
  1700. slow_max = which.min(abs(freq_bins_smooth - delta_freq_slow - freq_range))
  1701. rest_min = which.min(abs(freq_bins_smooth - 0.03 + freq_range))
  1702. rest_max = which.min(abs(freq_bins_smooth - 0.03 - freq_range))
  1703. list(
  1704. fast_min = freq_bins_smooth[fast_min], fast_max = freq_bins_smooth[fast_max],
  1705. slow_min = freq_bins_smooth[slow_min], slow_max = freq_bins_smooth[slow_max],
  1706. power_index_fast = mean(power_smooth_rel[fast_min:fast_max]),
  1707. power_index_slow = mean(power_smooth_rel[slow_min:slow_max]),
  1708. power_index_rest = mean(power_smooth_rel[rest_min:rest_max])
  1709. )}] %>%
  1710. # melt all index variables into one column:
  1711. gather(grep("power", names(.), fixed = TRUE), key = "index", value = "power") %>% setDT(.) %>%
  1712. .[grepl("index_fast", index), label := paste0(round(delta_freq_fast, 2), "")] %>%
  1713. .[grepl("index_slow", index), label := paste0(round(delta_freq_slow, 2), "")] %>%
  1714. .[grepl("index_rest", index), label := paste0(round(delta_freq_rest, 2), "")] %>%
  1715. setorder(., classification, id, speed, snr, insert) %>%
  1716. # filter for specific inserts levels here:
  1717. filter(insert %in% c(2, 6)) %>% setDT(.)
  1718. ```
  1719. ```{r}
  1720. # calculate t
  1721. dt_pred_rest_lsp_insert_exp_mean = dt_pred_rest_lsp_insert_exp %>%
  1722. # filter out the irrelvant rest power index:
  1723. filter(index != "power_index_rest") %>% setDT(.) %>%
  1724. # compare the mean power in expected frequnecy for every SNR and insert level:
  1725. .[, by = .(classification, speed, insert, snr, snr_char, index), {
  1726. ttest_results = t.test(power, mu = 0, alternative = "two.sided", paired = FALSE)
  1727. list(
  1728. num_subs = .N,
  1729. pvalue = ttest_results$p.value,
  1730. tvalue = ttest_results$statistic,
  1731. df = ttest_results$parameter,
  1732. cohens_d = (mean(power) - 0)/sd(power)
  1733. )}] %>% verify(all(num_subs == 32)) %>% verify((num_subs - df) == 1) %>%
  1734. # adjust p-values for multiple comparisons:
  1735. # check if the number of comparisons matches expectations:
  1736. .[, by = .(classification, speed), ":=" (
  1737. num_comp = .N,
  1738. pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
  1739. )] %>%
  1740. # round the original p-values according to the apa standard:
  1741. mutate(pvalue_round = round_pvalues(pvalue)) %>%
  1742. # round the adjusted p-value:
  1743. mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
  1744. # create new variable indicating significance below 0.05
  1745. mutate(significance = ifelse(pvalue_adjust < 0.05, "*", ""))
  1746. # filter for all p-values below the significance level:
  1747. dt_pred_rest_lsp_insert_exp_mean %>% filter(pvalue < 0.05)
  1748. ```
  1749. ```{r}
  1750. cols_order = rev(colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5))
  1751. plot_data = dt_pred_rest_lsp_insert_exp %>%
  1752. # add to inserts label:
  1753. mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts"))) %>%
  1754. # adjust the ordering of the snr levels:
  1755. transform(snr_char = factor(snr_char, levels = c("0", "1/4", "1/8", "1/2", "4/5"))) %>%
  1756. # filter out the irrelvant rest power index:
  1757. filter(index != "power_index_rest") %>% setDT(.)
  1758. fig_freq_exp_insert = ggplot(data = plot_data, aes(
  1759. y = power, x = fct_rev(label), fill = snr_char, color = snr_char, group = snr)) +
  1760. geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
  1761. width = 0.8) +
  1762. geom_point(position = position_jitterdodge(
  1763. jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
  1764. alpha = 0.1, inherit.aes = TRUE, pch = 16) +
  1765. facet_grid(vars(insert), vars(fct_rev(as.factor(speed)))) +
  1766. stat_summary(fun.data = "mean_se", geom = "errorbar",
  1767. position = position_dodge(0.9), width = 0, color = "black") +
  1768. xlab("Predicted frequency") + ylab("Power\n(relative to pre-task rest)") +
  1769. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(-0.1, 0.3)) +
  1770. scale_fill_manual(values = cols_order, name = "SNR level", guide = FALSE) +
  1771. scale_color_manual(values = cols_order, name = "SNR level", guide = FALSE) +
  1772. theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
  1773. theme(legend.position = "top", legend.direction = "horizontal",
  1774. legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
  1775. legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
  1776. #guides(color = guide_legend(nrow = 1)) +
  1777. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
  1778. theme(axis.line = element_line(colour = "black"),
  1779. panel.grid.major = element_blank(),
  1780. panel.grid.minor = element_blank(),
  1781. panel.border = element_blank(),
  1782. panel.background = element_blank())
  1783. fig_freq_exp_insert
  1784. ```
  1785. # Plotting
  1786. Main
  1787. ```{r}
  1788. panel_1 = plot_grid(fig_sd_prob, fig_mean_beta, ncol = 2, labels = c("a", "b"))
  1789. panel_2 = plot_grid(fig_prob_slope, ncol = 1, labels = c("c"))
  1790. panel_3 = plot_grid(fig_freq_lsp, fig_freq_exp, ncol = 2, labels = c("d", "e"))
  1791. panel_4 = plot_grid(fig_snr_data_sd, fig_snr_pmat_sd, labels = c("f", "g"))
  1792. panel_5 = plot_grid(fig_freq_lsp_insert, fig_freq_exp_insert, labels = c("h", "i"))
  1793. plot_grid(plot_grid(panel_1, panel_2, ncol = 2), panel_3, panel_4, panel_5, ncol = 1)
  1794. ```
  1795. ```{r}
  1796. ggsave(filename = "highspeed_plot_decoding_rest.pdf",
  1797. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1798. dpi = "retina", width = 10, height = 12)
  1799. ggsave(filename = "wittkuhn_schuck_figure_5.pdf",
  1800. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1801. dpi = "retina", width = 10, height = 12)
  1802. ```
  1803. Supplement
  1804. ```{r}
  1805. plot_grid(fig_prob_time_seq, fig_prob_diff_snr, fig_inserts, fig_inserts_slopes,
  1806. fig_snr_pmat_slope, fig_snr_data_slope, labels = "auto", ncol = 2, nrow = 3)
  1807. ```
  1808. ```{r}
  1809. ggsave(filename = "highspeed_plot_decoding_rest_supplement.pdf",
  1810. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1811. dpi = "retina", width = 10, height = 10)
  1812. ```
  1813. We average the data across participants:
  1814. ```{r}
  1815. dt_pred_rest_freq_mean = dt_pred_rest_freq %>%
  1816. .[, by = .(classification, freq_bins, t), .(
  1817. num_subs = .N,
  1818. mean_spec = mean(spec),
  1819. sem_upper = mean(spec) + (sd(spec)/sqrt(.N)),
  1820. sem_lower = mean(spec) - (sd(spec)/sqrt(.N))
  1821. )] %>% verify(all(num_subs == 32))
  1822. dt_pred_rest_insert_freq_mean = dt_pred_rest_insert_freq %>%
  1823. .[, by = .(classification, speed, insert, snr, snr_char, freq_bins, t), .(
  1824. num_subs = .N,
  1825. mean_spec = mean(spec),
  1826. sem_upper = mean(spec) + (sd(spec)/sqrt(.N)),
  1827. sem_lower = mean(spec) - (sd(spec)/sqrt(.N))
  1828. )] %>% verify(all(num_subs == 32))
  1829. ```
  1830. We calculate the mean expected frequency based on sine response functions:
  1831. ```{r}
  1832. fast_frequency = 1 / (5.24 + 2 * (0.1 + 0.032))
  1833. slow_frequency = 1 / (5.24 + 2 * (0.1 + 2.048))
  1834. # create a data table that is used for plotting later:
  1835. frequency_expectation = data.table(speed = c("32 ms", "2048 ms"),
  1836. xintercept = c(fast_frequency, slow_frequency))
  1837. ```
  1838. ```{r}
  1839. color_adjust = 80
  1840. color_all = as.vector(sapply(X = seq(num_inserts), FUN = function(x){
  1841. colorRampPalette(c(cols_order[x], '#333333'), space = 'Lab')(color_adjust)[1:num_inserts]}))
  1842. color_sort = c()
  1843. for (i in seq(1:num_inserts)) {
  1844. color_sort = c(color_sort, color_all[seq(i, length(color_all), by = num_inserts)])
  1845. }
  1846. # create a data table with the mean frequencies during sequence-free rest:
  1847. dt1 = dt_pred_rest_freq_mean; dt1$speed = "32 ms"
  1848. dt2 = dt_pred_rest_freq_mean; dt2$speed = "2048 ms"
  1849. dt_rest_freq_plot = rbind(dt1, dt2)
  1850. # create a figure of the frequency densities:
  1851. fig_rest_freq_density = ggplot(data = subset(dt_pred_rest_insert_freq_mean), aes(
  1852. color = interaction(as.factor(snr_char), as.factor(insert)),
  1853. y = as.numeric(mean_spec), x = as.numeric(freq_bins))) +
  1854. facet_wrap(~ fct_rev(as.factor(speed))) +
  1855. #geom_ribbon(aes(fill = interaction(as.factor(snr_char), as.factor(insert)),
  1856. # ymin = sem_lower, ymax = sem_upper), alpha = 0.1, color = NA) +
  1857. geom_line() +
  1858. geom_line(data = dt_rest_freq_plot, aes(y = as.numeric(mean_spec), x = as.numeric(freq_bins)),
  1859. inherit.aes = FALSE, color = "black") +
  1860. #geom_vline(data = frequency_expectation, aes(xintercept = xintercept), linetype = "dashed") +
  1861. theme(legend.position = "top", legend.direction = "horizontal",
  1862. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  1863. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  1864. xlab("Frequency (Hz)") + ylab("Spectral density") +
  1865. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0, 4)) +
  1866. scale_color_manual(values = color_sort, name = "SNR level", guide = FALSE) +
  1867. scale_fill_manual(values = color_sort, name = "SNR level", guide = FALSE) +
  1868. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2)))
  1869. fig_rest_freq_density
  1870. ```
  1871. We calculate the mean frequencies for each snr and insert level:
  1872. ```{r, results = "hold"}
  1873. # calculate the mean frequecies for sequence-inserted rest:
  1874. dt_pred_rest_insert_freq_stat = dt_pred_rest_insert_freq %>%
  1875. .[, by = .(classification, id, speed, snr, snr_char, insert), .(
  1876. num_freq_bins = .N,
  1877. mean_freq = sum((spec * freq_bins)/sum(spec))
  1878. )] %>% verify(all(num_freq_bins == 120))
  1879. dt_pred_rest_insert_freq_stat_mean = dt_pred_rest_insert_freq_stat %>%
  1880. .[, by = .(classification, speed, insert, snr, snr_char), .(
  1881. num_subs = .N,
  1882. mean_freq = mean(mean_freq),
  1883. sem_upper = mean(mean_freq) + (sd(mean_freq)/sqrt(.N)),
  1884. sem_lower = mean(mean_freq) - (sd(mean_freq)/sqrt(.N))
  1885. )] %>% verify(all(num_subs == 32))
  1886. # calculate the mean frequencies for sequence-free resting state:
  1887. dt_pred_rest_freq_stat = dt_pred_rest_freq %>% setDT(.) %>%
  1888. .[, by = .(classification, id), .(
  1889. num_freq_bins = .N,
  1890. mean_freq = sum((spec * freq_bins)/sum(spec))
  1891. )] %>% verify(all(num_freq_bins == 120)) %>%
  1892. # add variables to match number of columns in sequence-inserted data:
  1893. mutate(snr = 0, insert = 0, snr_char = "0", speed = "rest") %>% setDT(.)
  1894. # calculate mean frequency during sequence-free rest used for plotting:
  1895. dt1 = dt_pred_rest_freq_stat %>% .[, by = .(classification), .(mean_freq = mean(mean_freq))]; dt1$speed = "32 ms"
  1896. dt2 = dt_pred_rest_freq_stat %>% .[, by = .(classification), .(mean_freq = mean(mean_freq))]; dt2$speed = "2048 ms"
  1897. dt_rest_freq_plot = rbind(dt1, dt2)
  1898. # combine the sequence-inserted and sequence-free rest in one data table for linear models:
  1899. dt_pred_rest_freq_all_stat = rbind(dt_pred_rest_insert_freq_stat, dt_pred_rest_freq_stat) %>%
  1900. # standardize the explanatory variables:
  1901. mutate(snr_z = scale(snr), insert_z = scale(insert)) %>%
  1902. transform(speed = as.factor(speed))
  1903. # linear mixed effects models settings:
  1904. lme_rest_fast_rest = lmer(mean_freq ~ speed + (1 + speed | id),
  1905. data = subset(dt_pred_rest_freq_all_stat, speed %in% c("32 ms", "rest")), control = lcctrl)
  1906. lme_rest_slow_rest = lmer(mean_freq ~ speed + (1 + speed | id),
  1907. data = subset(dt_pred_rest_freq_all_stat, speed %in% c("2048 ms", "rest")), control = lcctrl)
  1908. lme_rest_slow_fast = lmer(mean_freq ~ speed * snr_z * insert_z + (1 + speed + snr_z + insert_z | id),
  1909. data = subset(dt_pred_rest_freq_all_stat, speed %in% c("2048 ms", "32 ms")), control = lcctrl)
  1910. anova(lme_rest_fast_rest)
  1911. anova(lme_rest_slow_rest)
  1912. anova(lme_rest_slow_fast)
  1913. ```
  1914. ```{r}
  1915. fig_rest_freq_mean = ggplot(data = dt_pred_rest_insert_freq_stat_mean, mapping = aes(
  1916. x = as.factor(insert), y = as.numeric(mean_freq), group = as.factor(snr_char),
  1917. color = as.factor(snr_char), fill = as.factor(snr_char))) +
  1918. facet_wrap(~ fct_rev(as.factor(speed))) +
  1919. geom_hline(data = dt_rest_freq_plot, aes(yintercept = as.numeric(mean_freq)),
  1920. color = "black", linetype = "dashed") +
  1921. geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.1, color = NA) +
  1922. geom_line() + geom_point(pch = 15) +
  1923. theme(legend.position = "top", legend.direction = "horizontal",
  1924. legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
  1925. legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
  1926. xlab("Number of inserted sequence trials") + ylab("Mean frequency (Hz)") +
  1927. coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0.15, 0.30)) +
  1928. guides(color = guide_legend(nrow = 1)) +
  1929. scale_color_manual(values = cols_order, name = "SNR level") +
  1930. scale_fill_manual(values = cols_order, name = "SNR level") +
  1931. theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2)))
  1932. fig_rest_freq_mean
  1933. ```
  1934. ```{r}
  1935. panel_upper = plot_grid(fig_prob_slope, fig_mean_beta, fig_inserts,
  1936. labels = c("a", "b", "c"), ncol = 3, rel_widths = c(1.5, 1, 2))
  1937. panel_middle = plot_grid(fig_snr_pmat, fig_snr_slope, labels = c("d", "e"), ncol = 2)
  1938. panel_lower = plot_grid(fig_rest_freq_density, fig_rest_freq_mean, labels = c("f", "g"), ncol = 2)
  1939. plot_grid(panel_upper, panel_middle, panel_lower, nrow = 3, ncol = 1)
  1940. ggsave(filename = "highspeed_plot_decoding_resting_snr_inserts.pdf",
  1941. plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
  1942. dpi = "retina", width = 10, height = 10)
  1943. ```