12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117 |
- ## Decoding: Resting-state
- ### Preparation
- #### Initialization
- We load the data and relevant functions:
- ```{r, warning=FALSE, message=FALSE, echo=TRUE}
- # find the path to the root of this project:
- if (!requireNamespace("here")) install.packages("here")
- if ( basename(here::here()) == "highspeed" ) {
- path_root = here::here("highspeed-analysis")
- } else {
- path_root = here::here()
- }
- # list of participants with chance performance on sequence and repetition trials:
- chance_performer = c("sub-24", "sub-31", "sub-37", "sub-40")
- ```
- The next step is only evaluated during manual execution:
- ```{r, eval=FALSE}
- # source all relevant functions from the setup R script:
- source(file.path(path_root, "code", "highspeed-analysis-setup.R"))
- ```
- #### Prepare sequence events data
- We select all events files for sequence trials when the stimuli were presented:
- ```{r, echo=TRUE}
- # create a subset of the events data table only including sequence task events:
- dt_events_seq = subset(dt_events, condition == "sequence" & trial_type == "stimulus")
- rmarkdown::paged_table(dt_events_seq)
- ```
- #### Prepare resting decoding data
- We prepare the resting state data for analysis:
- ```{r, results="hold"}
- # create a subset that only includes the resting state data:
- dt_pred_rest = dt_pred %>%
- # filter for rest only data that was created by the union mask:
- filter(condition == "rest" & class != "other" & mask == "union") %>%
- # sort the data frame by classification, session, etc.:
- setorder(id, classification, session, run_study, seq_tr, class) %>%
- # add random positions for the events detected during resting state:
- # group by classification and participants to generate class specific indices:
- group_by(classification, id) %>%
- # create class specific position indices randomly for every participant:
- group_modify( ~ { .x %>% mutate(position = mapvalues(
- .x$class, from = unique(.x$class), to = sample(length(unique(.x$class)))))}) %>%
- ungroup %>%
- transform(position = as.numeric(position)) %>%
- setDT(.) %>%
- # there should be a unique position for every class in every participant:
- verify(all(.[, by = .(classification, id, class), .(
- unique_positions = length(unique(position))
- )]$unique_positions == 1)) %>%
- # verify that every class is represented by all indices across participants:
- verify(all(.[, by = .(classification, class), .(
- unique_positions = length(unique(position))
- )]$unique_positions == 5)) %>%
- # check that the number of TRs for each resting state run is correct
- verify(all(.[, by = .(classification, id, run_study, session, class), .(
- num_trs = .N)]$num_trs == 233))
- ```
- We did not acquire pre-task resting-state data in Session 1.
- We filter for these participants below:
- ```{r, results="hold"}
- # create an array with all participant ids who do not have all four resting state runs:
- ids_no_pre_s1 = unique(
- (dt_pred_rest %>% setDT(.) %>%
- .[, by = .(id, session), .(num_runs = length(unique(run_study)))] %>%
- filter(!(num_runs == 2)))$id)
- # print the ids of the four participants without pre-task session 1 rest:
- print(ids_no_pre_s1)
- ```
- We select data from all remaining participants with pre-task session 1 rest:
- ```{r, results = "hold"}
- dt_pred_rest_s1 = dt_pred_rest %>%
- # exclude participants without session 1 pre-task rest:
- filter(!(id %in% c(ids_no_pre_s1))) %>%
- # exclude participants who performed below chance in seq and rep trials:
- filter(!(id %in% c(chance_performer))) %>%
- # filter for pre-task resting state data in session 1:
- #filter(run_study == "run-pre" & session == "ses-01") %>%
- # filter for the one-versus-rest classification approach:
- filter(classification == "ovr") %>%
- # filter resting state data to the same length as concatenated sequence data:
- #filter(seq_tr %in% seq(1, 180)) %>%
- # check if the number of participants is correct:
- verify(length(unique(id)) == 32) %>%
- setDT(.) %>%
- # create unique run indices through the combination of run and session labels:
- .[grepl("run-pre", run_study, fixed = TRUE) & session == "ses-01",
- run_study := "run-pre_ses-01"] %>%
- .[grepl("run-pre", run_study, fixed = TRUE) & session == "ses-02",
- run_study := "run-pre_ses-02"] %>%
- .[grepl("run-post", run_study, fixed = TRUE) & session == "ses-01",
- run_study := "run-post_ses-01"] %>%
- .[grepl("run-post", run_study, fixed = TRUE) & session == "ses-02",
- run_study := "run-post_ses-02"] %>%
- setDT(.)
- ```
- #### Prepare sequence decoding data
- We prepare the sequence trial data that will be combined with the resting state data:
- ```{r}
- # create a list of all participants that are excluded from the resting state analysis:
- sub_exclude_rest = unique(dt_events_seq$subject[
- !(dt_events_seq$subject %in% dt_pred_rest_s1$id)])
- # create a subset of the decoding data only including the sequence task data:
- dt_pred_seq = dt_pred %>%
- # create a subset of the decoding data only including the sequence task data:
- filter(condition == "sequence" & class != "other" & mask == "cv" & stim != "cue") %>%
- setDT(.) %>%
- # add the serial position, change trial and target cue to the sequence data table:
- .[, c("position", "change", "trial_cue", "trial_cue_position") := get_pos(
- .SD, dt_events_seq), by = .(id, trial, class),
- .SDcols = c("id", "trial", "class")] %>%
- # remove columns not needed to align data table with resting state data:
- mutate(change = NULL, trial_cue = NULL, trial_cue_position = NULL) %>%
- # rename the speed condition to include the "ms" label and express in milliseconds:
- transform(tITI = paste(as.character(as.numeric(tITI) * 1000), "ms")) %>%
- # remove participants that should be excluded from resting state analysis:
- filter(!(id %in% sub_exclude_rest)) %>%
- setDT(.) %>%
- # only select TRs of the forward and backward period
- filter(seq_tr %in% seq(2, 13)) %>%
- setDT(.) %>%
- # check if the number of TRs for each participant is correct:
- verify(.[, by = .(classification, id, tITI, trial_tITI, class), .(
- num_trs = .N)]$num_trs == 12)
- ```
- #### Combine sequence and resting data
- We combine the sequence and resting state decoding data in one data table:
- ```{r, echo=TRUE}
- #c("run-pre_ses-01", "run-post_ses-01", "run-pre_ses-02", "run-post_ses-02")
- deselect_rest_run <- c("run-pre_ses-02", "run-post_ses-02")
- dt_pred_all = rbind(dt_pred_seq, dt_pred_rest_s1) %>%
- # filter for pre-task resting state of session 1 only:
- filter(!(run_study %in% deselect_rest_run)) %>%
- # reorder the factor levels of the combined data table to sort in ascending order:
- transform(tITI = factor(tITI, levels(as.factor(tITI))[c(3,5,1,4,2,6)])) %>%
- # concatenate all session 1 pre-task resting state scans and add counter:
- filter(tITI %in% c("32 ms", "2048 ms", "rest")) %>%
- setDT(.) %>%
- # adjust resting state grouping variable to accommodate multiple runs:
- .[tITI == "rest", tITI := paste0(tITI, "_", run_study)] %>%
- # filter out one-versus-rest classification trials only:
- filter(classification == "ovr") %>%
- setDT(.) %>%
- # sort the data table:
- setorder(id, condition, tITI, trial_tITI, seq_tr, position) %>%
- setDT(.)
- ```
- We use a short function to run some checks on the combined data table:
- ```{r, echo=TRUE}
- checkdata = function(dt){
- library(data.table); library(tidyverse);
- dt %>%
- # check the number of trs in all resting state data:
- verify(.[stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI, class), .(
- num_trs = .N
- )]$num_trs == 233) %>%
- # check the number of trs in all sequence trial data:
- verify(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI, trial_tITI, class), .(
- num_trs = .N
- )]$num_trs == 12) %>%
- # check the number of unique trials for each sequence speed condition:
- verify(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI), .(
- num_trials = length(unique(trial_tITI))
- )]$num_trials == 15) %>%
- # check the number of speed conditions in sequence trials
- verify(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id), .(
- num_speeds = length(unique(tITI))
- )]$num_speeds == 2) %>%
- # check the number of speed conditions in resting state data:
- verify(.[stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI), .(
- num_speeds = length(unique(tITI))
- )]$num_speeds == 1) %>%
- # check the mask name in resting state data:
- verify(all(.[stringr::str_detect(tITI, "rest"), .(name_mask = unique(mask))]$name_mask == "union")) %>%
- # check the mask name in sequence trial data:
- verify(all(.[!stringr::str_detect(tITI, "rest"), .(name_mask = unique(mask))]$name_mask == "cv")) %>%
- # check if the number of items per run study is as expected:
- verify(all(.[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI, trial_tITI, class), .(
- num_trs = .N
- )]$num_trs == 12))
- }
- # check the current data table:
- rmarkdown::paged_table(checkdata(dt_pred_all))
- ```
- ### Decoding probabilities
- We draw a random example participant used for plotting:
- ```{r}
- set.seed(3)
- example_id = sample(unique(dt_pred_all$id), 1)
- ```
- We concatenate all fast and slow sequence trials and resting state:
- ```{r}
- dt_pred_conc = dt_pred_all %>%
- # we add a consecutive counter that will be used to concatenate data:
- .[, by = .(classification, id, tITI, class), ":=" (t = seq(1, .N))] %>%
- # omit the last TRs in the resting state data:
- filter(!(stringr::str_detect(tITI, "rest") & seq_tr > 180)) %>%
- # we will focus all further analyses on the one-versus-rest classifier:
- filter(classification == "ovr") %>%
- setDT(.) %>%
- # verify that both resting and sequence trial data are restricted to 180 TRs:
- verify(.[, by = .(classification, id, tITI, class, tITI), .(
- num_trs = .N)]$num_trs == 180) %>%
- setDT(.)
- ```
- We plot the probabilities of all concatenated sequence trials and the session 1
- resting state data (separately for each speed condition):
- ```{r, echo=FALSE}
- fig_prob_time_seq = ggplot(data = subset(dt_pred_conc, id == example_id), mapping = aes(
- x = as.numeric(t) * 1.25, y = as.numeric(probability * 100), group = as.factor(position))) +
- geom_line(mapping = aes(color = as.factor(position))) +
- facet_wrap(~ as.factor(tITI)) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
- legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
- xlab("Time (TRs)") + ylab("Probability (%)") +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
- ylim = c(0, 100), xlim = c(0, 200)) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank()) +
- scale_colour_manual(name = "Serial event", values = color_events, guide = FALSE) +
- theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
- guides(color = guide_legend(nrow = 1))
- fig_prob_time_seq
- ```
- ### Calculating sequentiality
- We create a function to determine whether a certain sequential combination of the task stimuli has been presented to the participants on sequence trials or not:
- ```{r}
- did_you_see_that <- function(seq, dt_events_seq) {
- dt_events_seq_sub <- dt_events_seq %>%
- #filter(subject == sub) %>%
- setDT(.) %>%
- # calculate if the input sequence was seen for every trial:
- .[, by = .(subject, interval_time, trial), .(
- num_events = .N,
- seen = as.numeric(all(stim_label[serial_position] == seq))
- )] %>%
- # check if the number of events per sequence is 5:
- verify(num_events == 5) %>%
- # the number a seen sequences should be either 0 (not seen) or 5 (seen five)
- # time for the five different speed conditions:
- verify(.[, by = .(subject), .(sum_seen = sum(seen))]$sum_seen %in% c(0, 5)) %>%
- # for each speed condition, the number should be 1 (seen) or 0 (not seen):
- verify(.[, by = .(subject, interval_time), .(sum_seen = sum(seen))]$sum_seen %in% c(0, 1))
- # return if sequence was seen (1) or not (0):
- seen <- as.numeric(sum(dt_events_seq_sub$seen) == 5)
- return(seen)
- }
- ```
- We verify that the function works properly:
- ```{r, message=FALSE}
- sequences = permn(1:5, sort = TRUE)
- # list of all possible stimuli:
- seq <- c("cat", "chair", "face", "house", "shoe")
- # select one example participant:
- sub = "sub-14"
- # determine if participant saw 15 combinations of the five stimuli:
- sum_seq = sum(unlist(lapply(X = sequences, function(x)
- did_you_see_that(seq = seq[x], subset(dt_events_seq, subject == sub)))))
- verify(data.table(sum_seq), sum_seq == 15)
- ```
- We calculate the regression slopes for every TR and assumed sequence in resting-state data:
- ```{r, echo=TRUE, eval=FALSE}
- # create an array with all the stimulus labels:
- stimuli <- c("cat", "chair", "face", "house", "shoe")
- # register the computation start time:
- start_time <- Sys.time()
- # calculate sd probability, slope for every permutation of a sequence:
- dt_pred_conc_slope_rest = dt_pred_conc %>%
- # filter for resting state analyses only:
- filter(stringr::str_detect(tITI, "rest")) %>%
- setDT(.) %>%
- # calculate sd probability, slope and if sequence was seen or not:
- .[, by = .(id, classification, tITI, trial_tITI, seq_tr), .(
- # calculate the number of events per TR and the mean position:
- num_events = .N,
- mean_position = mean(position),
- # calculate the standard deviation of the probabilities:
- sd_prob = sd(probability),
- # calculate the slope for each permuted sequence:
- slope = unlist(lapply(sequences, function(x) coef(lm(probability ~ x))[2] * (-1))),
- # determine if the permuted sequence was actually seen by the subject:
- seen = unlist(lapply(sequences, function(x)
- did_you_see_that(seq = stimuli[x], subset(dt_events_seq, subject == unique(id)))))
- )] %>%
- # add a counter for all sequential combinations:
- .[, by = .(id, classification, tITI, trial_tITI, seq_tr), "sequence" := seq(1,.N)]
- # stop the counter and print the calculation time:
- end_time = Sys.time()
- print(end_time - start_time)
- ```
- We calculate the regression slopes for sequence data:
- ```{r, echo=TRUE, eval=FALSE}
- dt_pred_conc_slope_seq = dt_pred_conc %>%
- # filter for sequence data only:
- filter(!stringr::str_detect(tITI, "rest")) %>%
- base::droplevels() %>%
- setDT(.) %>%
- # calculate the slope for every participant, speed and TR:
- .[, by = .(id, classification, tITI, trial_tITI, seq_tr), .(
- # calculate the number of events per TR and the mean position:
- num_events = .N,
- mean_position = mean(position),
- # calculate the standard deviation of the probabilities:
- sd_prob = sd(probability),
- # calculate the slope for each permuted sequence:
- slope = coef(lm(probability ~ position))[2] * (-1)
- )] %>%
- # add variables if sequence was seen or not and sequence counter:
- .[, "seen" := 1] %>%
- .[, "sequence" := 1]
- ```
- We combine the resting state and sequence trial slope data:
- ```{r, echo=TRUE, eval=FALSE}
- dt_pred_conc_slope = rbind(dt_pred_conc_slope_seq, dt_pred_conc_slope_rest) %>%
- # verify that there are 5 events per TR and their mean position is 5:
- verify(all(num_events == 5)) %>%
- verify(all(mean_position == 3)) %>%
- # sort the columns by classification approach, id, speed, trial and TR:
- setorder(classification, id, tITI, trial_tITI, seq_tr) %>%
- # add trial counter for every speed condition (and sequence):
- .[stringr::str_detect(tITI, "rest"),
- by = .(classification, id, tITI, sequence), ":=" (t = seq(1, .N))] %>%
- .[!stringr::str_detect(tITI, "rest"),
- by = .(classification, id, tITI), ":=" (t = seq(1, .N))] %>%
- # group by classification, id, speed and trial and calculate step sizes:
- .[!stringr::str_detect(tITI, "rest"), by = .(classification, id, tITI),
- trial_start := trial_tITI - shift(trial_tITI)] %>%
- # create a variable that signifies the trial starts for sequence trials:
- transform(trial_start = ifelse(
- is.na(trial_start) & !stringr::str_detect(tITI, "rest"), 1, trial_start)) %>%
- # create new variable names for resting state data:
- .[tITI == "rest_run-pre_ses-01", tITI := "Rest Pre S1"] %>%
- .[tITI == "rest_run-post_ses-01", tITI := "Rest Post S1"] %>%
- .[tITI == "rest_run-pre_ses-02", tITI := "Rest Pre S2"] %>%
- .[tITI == "rest_run-post_ses-02", tITI := "Rest Post S2"] %>%
- # sort the factor levels of the speed conditions:
- transform(tITI = factor(tITI, levels = c(
- "32 ms", "2048 ms", "Rest Pre S1", "Rest Post S1",
- "Rest Pre S2", "Rest Post S2"))) %>%
- # rename the factor for seen (1) and unseen (0) sequences:
- .[, seen := ifelse(seen == 1, "More frequent", "Less frequent")] %>%
- setDT(.)
- ```
- We save the regression slope data during manual execution, save it to the repository and reload it for all following analyses:
- ```{r, echo=TRUE, eval=FALSE}
- save(dt_pred_conc_slope, file = file.path(path_root, "data", "tmp", "dt_pred_conc_slope.Rdata"))
- ```
- ```{r, echo=TRUE}
- load(file.path(path_root, "data", "tmp", "dt_pred_conc_slope.Rdata"))
- ```
- ### Mean slope coefficients
- #### Fixed event order
- We calculate the mean slope coefficients assuming a fixed ordering of sequence events for sequence trials:
- ```{r}
- # calculate the mean slope across participants for each time point:
- dt_pred_conc_slope_mean <- dt_pred_conc_slope %>%
- filter(sequence == 1) %>%
- setDT(.) %>%
- .[, by = .(classification, tITI, t, trial_start), .(
- num_subs = .N,
- mean_slope = mean(slope),
- sem_upper = mean(slope) + (sd(slope)/sqrt(.N)),
- sem_lower = mean(slope) - (sd(slope)/sqrt(.N))
- )] %>%
- verify(all(num_subs == 32)) %>%
- filter(classification == "ovr") %>%
- # sort the factor levels of the speed conditions:
- transform(tITI = factor(tITI, levels = c(
- "2048 ms", "32 ms", "Rest Pre S1", "Rest Post S1",
- "Rest Pre S2", "Rest Post S2"))) %>%
- setDT(.)
- ```
- #### Figure 5c
- We plot the mean regression coefficient separately for
- all sequence speed conditions and the pre-task resting state data.
- Trial boundaries (for sequence trials) are shown as gray vertical lines.
- ```{r, echo=FALSE}
- grays = colorRampPalette(c("lightgray", "black"))
- #colors = c(hcl.colors(5, "Cividis")[c(5,1)], "gray")
- colors = c(hcl.colors(5, "Cividis")[c(5,1)], grays(4))
- plot_trial_starts = dt_pred_conc_slope_mean %>%
- filter(trial_start == 1, !stringr::str_detect(tITI, "rest"))
- fig_prob_slope = ggplot(
- data = dt_pred_conc_slope_mean %>% filter(tITI != "Rest Post S1"),
- mapping = aes(
- x = as.numeric(t), y = as.numeric(mean_slope), color = as.factor(tITI),
- fill = as.factor(tITI))) +
- facet_wrap(~ as.factor(tITI), ncol = 1) +
- geom_vline(data = plot_trial_starts, aes(xintercept = t), linetype = "dashed", alpha = 0.1) +
- geom_hline(aes(yintercept = 0), linetype = "solid", color = "gray") +
- geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
- geom_line() +
- xlab("Time (TRs)") + ylab("Regression slope") +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE, xlim = c(1, 200)) +
- theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
- scale_color_manual(values = colors, guide = FALSE) +
- scale_fill_manual(values = colors, guide = FALSE) +
- scale_y_continuous(labels = label_fill(seq(-0.1, 0.1, 0.05), mod = 2),
- breaks = seq(-0.1, 0.1, 0.05)) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_prob_slope
- ```
- #### Source Data File Fig. 5c
- ```{r}
- dt_pred_conc_slope_mean %>% filter(tITI != "Rest Post S1") %>%
- select(-num_subs, -classification) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_5c.csv"),
- row.names = FALSE)
- ```
- We calculate the mean regression slopes and compare sequence and rest:
- ```{r}
- # calculate mean regression slopes for fast and slow sequence trials and rest:
- dt_pred_conc_slope_stat = dt_pred_conc_slope %>%
- filter(sequence == 1) %>%
- setDT(.) %>%
- .[, by = .(classification, id, tITI), .(
- num_trs = .N,
- mean_abs_slope = mean(abs(slope)),
- abs_mean_slope = abs(mean(slope)),
- mean_sd_prob = mean(sd_prob)
- )] %>% verify(all(num_trs == 180)) %>%
- filter(classification == "ovr") %>%
- transform(tITI = factor(tITI, levels = c(
- "32 ms", "2048 ms", "Rest Pre S1", "Rest Post S1",
- "Rest Pre S2", "Rest Post S2"))) %>%
- setDT(.)
- ```
- ```{r, results="hold"}
- # compare mean probabilities between resting state and fast sequence data:
- mean_sd_rest_pre = subset(dt_pred_conc_slope_stat, tITI == "Rest Pre S1")$mean_sd_prob
- mean_sd_rest_post = subset(dt_pred_conc_slope_stat, tITI == "Rest Post S1")$mean_sd_prob
- mean_sd_fast = subset(dt_pred_conc_slope_stat, tITI == "32 ms")$mean_sd_prob
- mean_sd_slow = subset(dt_pred_conc_slope_stat, tITI == "2048 ms")$mean_sd_prob
- fast_vs_rest_pre = t.test(x = mean_sd_rest_pre, y = mean_sd_fast, alternative = "two.sided", paired = TRUE)
- fast_vs_rest_post = t.test(x = mean_sd_rest_post, y = mean_sd_fast, alternative = "two.sided", paired = TRUE)
- fast_vs_slow = t.test(x = mean_sd_slow, y = mean_sd_fast, alternative = "two.sided", paired = TRUE)
- slow_vs_rest_pre = t.test(x = mean_sd_rest_pre, y = mean_sd_slow, alternative = "two.sided", paired = TRUE)
- rest_pre_vs_rest_post = t.test(x = mean_sd_rest_pre, y = mean_sd_rest_post, alternative = "two.sided", paired = TRUE)
- print(round(c(mean(mean_sd_rest_pre), mean(mean_sd_fast), mean(mean_sd_slow)), 2))
- round_pvalues(p.adjust(
- c(fast_vs_rest_pre$p.value, fast_vs_slow$p.value, slow_vs_rest_pre$p.value,
- rest_pre_vs_rest_post$p.value, fast_vs_rest_post$p.value), method = "fdr"))
- cohensd_rest = round((mean(mean_sd_rest_pre) - mean(mean_sd_fast)) / sd(mean_sd_rest_pre - mean_sd_fast), 2)
- cohensd_slow = round((mean(mean_sd_slow) - mean(mean_sd_fast)) / sd(mean_sd_slow - mean_sd_fast), 2)
- print(cohensd_rest); print(cohensd_slow)
- fast_vs_rest_pre; fast_vs_slow
- ```
- #### Figure 5a / 5b
- We plot the mean absolute regression slope for all speed conditions
- and pre-task resting state data:
- ```{r}
- plot_means = function(data, variable, ylabel, ylim){
- #colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
- colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(4))
- fig_mean_beta = ggplot(
- data = data %>% filter(tITI != "Rest Post S1"),
- aes(
- y = get(variable), x = tITI, fill = tITI)) +
- geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9)) +
- geom_point(position = position_jitter(width = 0.1, height = 0, seed = 2),
- alpha = 1, inherit.aes = TRUE, pch = 21,
- color = "white") +
- stat_summary(fun.data = mean_se, geom = "errorbar",
- position = position_dodge(0.9), width = 0, color = "black") +
- xlab("Data") + ylab(ylabel) +
- coord_capped_cart(left = "both", expand = TRUE, ylim = ylim) +
- scale_fill_manual(values = colors, guide = FALSE) +
- theme(axis.ticks.x = element_blank(), axis.line.x = element_blank(),
- axis.title.x = element_blank()) +
- theme(axis.text.x = element_text(angle = 45, hjust = 0.9)) +
- theme(panel.border = element_blank(), axis.line.y = element_line()) +
- theme(axis.line.y = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.background = element_blank())
- }
- fig_mean_beta = plot_means(data = dt_pred_conc_slope_stat, variable = "abs_mean_slope",
- ylabel = "Absolute mean regression slope", ylim = c(0, 0.02))
- fig_mean_beta_abs = plot_means(data = dt_pred_conc_slope_stat, variable = "mean_abs_slope",
- ylabel = "Regression slope (abs)", ylim = c(0, 0.1))
- fig_sd_prob = plot_means(data = dt_pred_conc_slope_stat, variable = "mean_sd_prob",
- ylabel = "SD of probability", ylim = c(0, 0.4))
- fig_mean_beta; fig_mean_beta_abs; fig_sd_prob;
- ```
- #### Source Data File Fig. 5a / 5b
- ```{r}
- dt_pred_conc_slope_stat %>%
- select(-classification, -num_trs) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_5a.csv"),
- row.names = FALSE)
- dt_pred_conc_slope_stat %>%
- select(-classification, -num_trs) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_5b.csv"),
- row.names = FALSE)
- ```
- #### All possible event orders
- We calculate the mean slope for every time point (for every TR) depending on
- if a sequence was actually seen or not (relevant for resting state data).
- Note, that by this definition, all sequences on sequence trials were "seen".
- ```{r}
- # calculate the mean slope across participants for each time point:
- dt_pred_conc_slope_mean_seen <- dt_pred_conc_slope %>%
- # average slopes for each participant and each time point:
- .[, by = .(id, classification, tITI, t, trial_start, seen), .(
- num_seq = .N,
- mean_slope_sub = mean(slope)
- )] %>%
- # number of sequences will be 1 (sequence conditions), 15 (seen permuted
- # sequence condition), or 105 (not seen permuted sequence conditions):
- verify(num_seq %in% c(1, 15, 105)) %>%
- # calculate the mean slope across participants (including SEM):
- .[, by = .(classification, tITI, t, trial_start, seen), .(
- num_subs = .N,
- mean_slope = mean(mean_slope_sub),
- sem_upper = mean(mean_slope_sub) + (sd(mean_slope_sub)/sqrt(.N)),
- sem_lower = mean(mean_slope_sub) - (sd(mean_slope_sub)/sqrt(.N))
- )] %>%
- # verify that data was averaged across the correct number of participants:
- verify(all(num_subs == 32)) %>%
- filter(classification == "ovr") %>%
- # sort factor levels of the speed condition:
- transform(tITI = factor(tITI, levels = c(
- "2048 ms", "32 ms", "Rest Pre S1", "Rest Post S1",
- "Rest Pre S2", "Rest Post S2"))) %>%
- setDT(.)
- ```
- ```{r, echo = FALSE}
- grays = colorRampPalette(c("lightgray", "black"))
- #colors = c(hcl.colors(5, "Cividis")[c(5,1)], "gray")
- colors = c(hcl.colors(5, "Cividis")[c(5,1)], grays(4))
- plot_trial_starts = dt_pred_conc_slope_mean_seen %>%
- filter(trial_start == 1, !stringr::str_detect(tITI, "rest"))
- fig_prob_slope_seen = ggplot(data = dt_pred_conc_slope_mean, mapping = aes(
- x = as.numeric(t), y = as.numeric(mean_slope), color = as.factor(tITI),
- fill = as.factor(tITI))) +
- facet_grid(vars(as.factor(tITI)), vars(as.factor(seen))) +
- geom_vline(data = plot_trial_starts, aes(xintercept = t), linetype = "dashed", alpha = 0.1) +
- geom_hline(aes(yintercept = 0), linetype = "solid", color = "gray") +
- geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
- geom_line() +
- xlab("Time (TRs)") + ylab("Regression slope") +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE, xlim = c(1, 200)) +
- theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
- scale_color_manual(values = colors, guide = FALSE) +
- scale_fill_manual(values = colors, guide = FALSE) +
- scale_y_continuous(labels = label_fill(seq(-0.1, 0.1, 0.05), mod = 2),
- breaks = seq(-0.1, 0.1, 0.05)) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_prob_slope_seen
- ```
- We calculate the mean absolute and non-absolute slopes for sequence and rest trials
- depending on whether the permuted sequence was actually seen or not:
- ```{r}
- # calculate mean regression slopes for fast and slow sequence trials and rest:
- dt_pred_conc_slope_stat_seen = dt_pred_conc_slope %>%
- .[, by = .(classification, id, tITI, seen), .(
- num_trs = .N,
- mean_slope = mean(slope),
- mean_abs_slope = mean(abs(slope)),
- abs_mean_slope = abs(mean(slope)),
- abs_pos_slope = mean(abs(slope[slope > 0])),
- abs_neg_slope = mean(abs(slope[slope < 0])),
- mean_sd_prob = mean(sd_prob)
- )] %>%
- # check if the average slope was calculated across the correct number of TRs
- # which should be 180 for sequence trials and 180 times number of sequences
- # not seen (105) and 180 times sequences seen (15) in permuted rest data:
- verify(all(num_trs %in% c(180, 180 * (120 - 15), 180 * 15))) %>%
- filter(classification == "ovr") %>%
- setDT(.)
- ```
- ```{r, echo=FALSE, include=FALSE, eval=FALSE, results="hold"}
- lme_sd_prob <- lmerTest::lmer(
- sd_prob ~ tITI + (tITI | id),
- data = dt_pred_conc_slope, control = lcctrl
- )
- rmarkdown::paged_table(anova(lme_sd_prob))
- emmeans_results = emmeans(lme_sd_prob, list(pairwise ~ tITI))
- emmeans_results
- ```
- ```{r, echo=FALSE, include=FALSE, eval=FALSE, results="hold"}
- lme_rest_sd = lmerTest::lmer(
- mean_sd_prob ~ tITI + (1 | id),
- data = dt_pred_conc_slope_stat, control = lcctrl)
- rmarkdown::paged_table(anova(lme_rest_sd))
- emmeans_results = emmeans(lme_rest_sd, list(pairwise ~ tITI))
- emmeans_pvalues = round_pvalues(summary(emmeans_results[[2]])$p.value)
- emmeans_results
- ```
- ```{r, echo=FALSE, include=FALSE, eval=FALSE, results="hold"}
- lme_rest_slope = lmerTest::lmer(
- mean_abs_slope ~ tITI + (1 | id),
- data = dt_pred_conc_slope_stat, control = lcctrl)
- rmarkdown::paged_table(anova(lme_rest_slope))
- emmeans_results = emmeans(lme_rest_slope, list(pairwise ~ tITI))
- emmeans_pvalues = round_pvalues(summary(emmeans_results[[2]])$p.value)
- emmeans_results
- ```
- ```{r}
- plot_means_seen = function(data, variable, ylabel, ylim){
- #colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
- grays = colorRampPalette(c("lightgray", "black"))
- colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(4))
- fig_mean_beta = ggplot(data = data, aes(
- y = get(variable), x = tITI, fill = tITI)) +
- facet_wrap(~ seen) +
- geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9)) +
- geom_point(position = position_jitter(width = 0.1, height = 0, seed = 2),
- alpha = 1, inherit.aes = TRUE, pch = 21,
- color = "white") +
- stat_summary(fun.data = mean_se, geom = "errorbar",
- position = position_dodge(0.9), width = 0, color = "black") +
- xlab("Data") + ylab(ylabel) +
- coord_capped_cart(left = "both", expand = TRUE, ylim = ylim) +
- scale_fill_manual(values = colors, guide = FALSE) +
- theme(axis.ticks.x = element_blank(), axis.line.x = element_blank(),
- axis.title.x = element_blank()) +
- theme(axis.text.x = element_text(angle = 45, hjust = 0.9)) +
- theme(panel.border = element_blank(), axis.line.y = element_line()) +
- theme(axis.line.y = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.background = element_blank())
- }
- fig_mean_beta_seen = plot_means_seen(
- data = dt_pred_conc_slope_stat_seen, variable = "abs_mean_slope",
- ylabel = "Absolute mean regression slope", ylim = c(-0.01, 0.02))
- fig_mean_beta_abs_seen = plot_means_seen(
- data = dt_pred_conc_slope_stat_seen, variable = "mean_abs_slope",
- ylabel = "Mean absolute regression slope", ylim = c(0, 0.1))
- fig_sd_prob_seen = plot_means_seen(
- data = dt_pred_conc_slope_stat_seen, variable = "mean_sd_prob",
- ylabel = "SD of probability", ylim = c(0, 0.4))
- fig_mean_pos_abs_seen = plot_means_seen(
- data = dt_pred_conc_slope_stat_seen, variable = "abs_pos_slope",
- ylabel = "Mean absolute regression slope", ylim = c(0, 0.1))
- fig_mean_neg_abs_seen = plot_means_seen(
- data = dt_pred_conc_slope_stat_seen, variable = "abs_neg_slope",
- ylabel = "Mean absolute regression slope", ylim = c(0, 0.1))
- fig_mean_slope_seen = plot_means_seen(
- data = dt_pred_conc_slope_stat_seen, variable = "mean_slope",
- ylabel = "Absolute mean regression slope", ylim = c(-0.01, 0.02))
- fig_mean_beta_seen; fig_mean_beta_abs_seen; fig_sd_prob_seen;
- fig_mean_pos_abs_seen; fig_mean_neg_abs_seen; fig_mean_slope_seen
- ```
- ### Frequency spectrum analyses
- #### Fixed event order
- We create a data table with frequency expectations:
- ```{r}
- fitfreq = 1 / 5.26
- stim_dur = 0.1
- num_seq_stim = 5
- # calculate the delta
- delta_freq_fast = fitfreq / (1 + fitfreq * (
- num_seq_stim * stim_dur + (num_seq_stim - 1) * 0.032))
- delta_freq_slow = fitfreq / (1 + fitfreq * (
- num_seq_stim * stim_dur + (num_seq_stim - 1) * 2.048))
- delta_freq_rest = 0.01
- # create a data table with the expected frequencies:
- frequency_expectation = data.table(xintercept = c(
- delta_freq_fast, delta_freq_slow, rep(delta_freq_rest, 4)))
- ```
- We calculate the frequency spectra based on the time courses of regression slope coefficients:
- ```{r}
- library(lomb)
- # create a time vector with random gaps:
- time_concat = as.vector(sapply(0:14, FUN = function(x) seq(0, 1*11, by = 1) + x*100 + round(runif(1)*50)))
- # get the frequency spectrum of fast and slow trials:
- dt_pred_rest_lsp = dt_pred_conc_slope %>%
- filter(sequence == 1) %>%
- setDT(.) %>%
- .[, by = .(classification, id, tITI), {
- frequency_spectrum = lsp(x = slope, times = time_concat, from = 0, to = 0.3, plot = FALSE)
- list(num_trs = .N, freq_bins = frequency_spectrum$scanned,
- filter_width = round(0.01/mean(diff(frequency_spectrum$scanned))),
- power = frequency_spectrum$power
- )}] %>% verify(all(num_trs == 180)) %>% verify(length(unique(filter_width)) == 1) %>%
- # smooth the frequency spectra and frequency bins:
- .[, by = .(classification, id, tITI), ":=" (
- power_smooth = stats::filter(power, rep(1/unique(filter_width), unique(filter_width))),
- freq_bins_smooth = stats::filter(freq_bins, rep(1/unique(filter_width), unique(filter_width)))
- )]
- ```
- ```{r}
- # calculate the mean frequency profile across participants:
- dt_pred_rest_lsp_mean = dt_pred_rest_lsp %>%
- # filter out NA frequency spectrum:
- filter(!is.na(power_smooth)) %>%
- setDT(.) %>%
- # calculate the mean smoothed frequency spectrum across all frequency bins:
- .[, by = .(classification, tITI, freq_bins_smooth), .(
- num_subs = .N,
- mean_spec = mean(power_smooth),
- sem_upper = mean(power_smooth) + (sd(power_smooth)/sqrt(.N)),
- sem_lower = mean(power_smooth) - (sd(power_smooth)/sqrt(.N))
- )] %>%
- verify(all(num_subs == 32))
- ```
- #### Figure 5d
- ```{r}
- #colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
- colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(1), "black", grays(2))
- fig_freq_lsp = ggplot(
- data = dt_pred_rest_lsp_mean %>% filter(tITI != "Rest Post S1"),
- aes(
- color = tITI, y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
- geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
- linetype = "dashed", color = colors) +
- geom_line() +
- geom_ribbon(aes(fill = tITI, ymin = sem_lower, ymax = sem_upper),
- alpha = 0.3, color = NA) +
- geom_text(data = frequency_expectation, aes(
- x = xintercept + 0.02, y = 0.1, label = paste(round(xintercept, 2))),
- color = colors) +
- xlab("Frequency") + ylab("Power") +
- scale_color_manual(name = "", values = colors) +
- scale_fill_manual(name = "", values = colors) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
- legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
- ylim = c(0, 3), xlim = c(0, 0.3)) +
- guides(color = guide_legend(nrow = 1)) +
- scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
- breaks = seq(0, 0.3, by = 0.05)) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_freq_lsp
- ```
- #### Source Data File Fig. 5d
- ```{r}
- dt_pred_rest_lsp_mean %>% filter(tITI != "Rest Post S1") %>%
- select(- classification, -num_subs) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_5d.csv"),
- row.names = FALSE)
- ```
- #### All possible event orders
- We calculate the frequency spectrum for all permuted resting state sequences and
- sequence trials:
- ```{r}
- # create a time vector with random gaps:
- time_concat = as.vector(sapply(0:14, FUN = function(x) seq(0, 1*11, by = 1) + x*100 + round(runif(1)*50)))
- # get the frequency spectrum of fast and slow trials:
- dt_pred_rest_lsp_seen = dt_pred_conc_slope %>%
- # calculate the frequency spectrum for each permuted sequence and participant:
- .[, by = .(classification, id, tITI, sequence, seen), {
- frequency_spectrum = lomb::lsp(
- x = slope, times = time_concat, from = 0, to = 0.3, plot = FALSE)
- list(num_trs = .N, freq_bins = frequency_spectrum$scanned,
- filter_width = round(0.01/mean(diff(frequency_spectrum$scanned))),
- power = frequency_spectrum$power
- )}] %>%
- # the number of TRs for each stretch of activation patterns should be 180:
- verify(all(num_trs == 180)) %>%
- # there should only be one filtering width per sequence:
- verify(length(unique(filter_width)) == 1) %>%
- # there should be 120 sequence permutations in the resting condition:
- verify(.[, by = .(classification, id, tITI), .(
- num_seq = length(unique(sequence)))]$num_seq %in% c(1, 120)) %>%
- # smooth the frequency spectra and frequency bins:
- .[, by = .(classification, id, tITI, sequence, seen), ":=" (
- power_smooth = stats::filter(power, rep(1/unique(filter_width), unique(filter_width))),
- freq_bins_smooth = stats::filter(freq_bins, rep(1/unique(filter_width), unique(filter_width)))
- )]
- ```
- We calculate the mean frequency spectra for each participant, classification approach,
- speed condition, smoothed frequency bin and if the (permuted) rest sequence
- was actually seen during the task or not:
- ```{r}
- # calculate the mean frequency profile across participants:
- dt_pred_rest_lsp_mean_seen = dt_pred_rest_lsp_seen %>%
- # filter out NA frequency spectrum:
- filter(!is.na(power_smooth)) %>%
- setDT(.) %>%
- # calculate the mean smoothed frequency spectrum across all frequency bins and sequences:
- .[, by = .(classification, id, tITI, freq_bins_smooth), .(
- num_seqs = .N,
- power_smooth = mean(power_smooth)
- )] %>%
- # the number of sequences should be 1 for sequence trials, 15 for seen
- # permuted rest repetitions and 105 for unseen sequence repetitions:
- verify(num_seqs %in% c(1, 120)) %>%
- # calculate the mean smoothed frequency spectrum across all frequency bins:
- .[, by = .(classification, tITI, freq_bins_smooth), .(
- num_subs = .N,
- mean_spec = mean(power_smooth),
- sem_upper = mean(power_smooth) + (sd(power_smooth)/sqrt(.N)),
- sem_lower = mean(power_smooth) - (sd(power_smooth)/sqrt(.N))
- )] %>%
- # verify if data was averaged across the expected number of participants:
- verify(all(num_subs == 32))
- ```
- ```{r, echo=TRUE}
- calc_delta <- function(speed_s) {
- # input speed_s: sequence speed (ISI), in seconds
- fitfreq = 1 / 5.26
- stim_dur = 0.1
- num_seq_stim = 5
- delta = fitfreq / (1 + fitfreq * (num_seq_stim * stim_dur + (num_seq_stim - 1) * speed_s))
- return(delta)
- }
- # create a data table with the expected frequencies:
- frequency_expectation = data.table(xintercept = c(
- calc_delta(0.032), calc_delta(2.048), 0.01),
- tITI = c("32 ms", "2048 ms", "Rest Pre S1"))
- ```
- #### Figure 6a
- ```{r, echo=FALSE}
- colors_all = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
- colors_rest = c("black", "darkred")
- plot_data <- dt_pred_rest_lsp_mean_seen %>%
- filter(tITI %in% c("Rest Pre S1", "Rest Post S1"))
- fig_freq_post <- ggplot(data = plot_data, aes(
- color = tITI, y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
- geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
- linetype = "dashed", color = colors_all) +
- geom_line() +
- geom_ribbon(aes(fill = tITI, ymin = sem_lower, ymax = sem_upper),
- alpha = 0.3, color = NA) +
- geom_text(data = frequency_expectation, aes(
- x = xintercept + 0.03, y = 0.1, label = paste(round(xintercept, 2))),
- color = colors_all) +
- xlab("Frequency") + ylab("Power") +
- scale_color_manual(name = "", values = colors_rest) +
- scale_fill_manual(name = "", values = colors_rest) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
- legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
- ylim = c(0, 3), xlim = c(0, 0.3)) +
- guides(color = guide_legend(nrow = 1)) +
- scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
- breaks = seq(0, 0.3, by = 0.05)) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_freq_post
- ```
- #### Source Data File Fig. 6a
- ```{r}
- dt_pred_rest_lsp_mean_seen %>%
- filter(tITI %in% c("Rest Pre S1", "Rest Post S1")) %>%
- select(-classification, -num_subs) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_6a.csv"),
- row.names = FALSE)
- ```
- We calculate the relative power (difference between pre- and post task resting state data) across the entire frequency spectrum:
- ```{r}
- # calculate the mean frequency profile across participants:
- dt_pred_rest_lsp_mean_rel_seen = dt_pred_rest_lsp_seen %>%
- filter(tITI %in% c("Rest Pre S1", "Rest Post S1")) %>%
- transform(tITI = ifelse(tITI == "Rest Pre S1", "rest_pre_s1", "rest_post_s1")) %>%
- # filter out NA frequency spectrum:
- filter(!is.na(power_smooth)) %>%
- setDT(.) %>%
- # calculate the mean smoothed frequency spectrum across all frequency bins and sequences:
- .[, by = .(classification, id, tITI, freq_bins_smooth), .(
- num_seqs = .N,
- power_smooth = mean(power_smooth)
- )] %>%
- # the number of sequences should be 1 for sequence trials, 15 for seen
- # permuted rest repetitions and 105 for unseen sequence repetitions:
- verify(num_seqs %in% c(1, 120)) %>%
- # transform resting state data from long to wide to calculate relative power:
- spread(data = ., key = tITI, value = power_smooth) %>%
- mutate(rel_power = rest_post_s1 - rest_pre_s1) %>%
- setDT(.) %>%
- # calculate the mean smoothed frequency spectrum across all frequency bins:
- .[, by = .(classification, freq_bins_smooth), .(
- num_subs = .N,
- mean_spec = mean(rel_power),
- sem_upper = mean(rel_power) + (sd(rel_power)/sqrt(.N)),
- sem_lower = mean(rel_power) - (sd(rel_power)/sqrt(.N))
- )] %>%
- # verify if data was averaged across the expected number of participants:
- verify(all(num_subs == 32))
- ```
- We determine in which frequency range the peak in difference between pre- and post-task rest is:
- ```{r}
- increase_slow = round(dt_pred_rest_lsp_mean_rel_seen$freq_bins_smooth[which.max(dt_pred_rest_lsp_mean_rel_seen$mean_spec)], 2)
- increase_fast = round(dt_pred_rest_lsp_mean_rel_seen$freq_bins_smooth[dt_pred_rest_lsp_mean_rel_seen$freq_bins_smooth > 0.07][which.max(dt_pred_rest_lsp_mean_rel_seen[dt_pred_rest_lsp_mean_rel_seen$freq_bins_smooth > 0.07]$mean_spec)], 2)
- increase_slow; increase_fast;
- ```
- #### Figure 6b
- ```{r, echo=FALSE}
- colors_all = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")
- colors_rest = c("black", "darkred")
- fig_freq_post_rel <- ggplot(data = dt_pred_rest_lsp_mean_rel_seen, aes(
- y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
- geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
- linetype = "dashed", color = colors_all) +
- geom_hline(aes(yintercept = 0)) +
- geom_line() +
- geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper),
- alpha = 0.3, color = NA) +
- geom_text(data = frequency_expectation, aes(
- x = xintercept + 0.03, y = -0.25, label = paste(round(xintercept, 2))),
- color = colors_all) +
- xlab("Frequency") + ylab("Relative power (Pre vs. Post)") +
- scale_color_manual(name = "", values = colors_rest) +
- scale_fill_manual(name = "", values = colors_rest) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
- legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
- xlim = c(0, 0.3)) +
- guides(color = guide_legend(nrow = 1)) +
- scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
- breaks = seq(0, 0.3, by = 0.05)) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_freq_post_rel
- ```
- #### Source Data File Fig. 6b
- ```{r}
- dt_pred_rest_lsp_mean_rel_seen %>%
- select(-classification, -num_subs) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_6b.csv"),
- row.names = FALSE)
- ```
- #### Frequency spectra of frequent vs. less frequent sequences
- We calculate the mean frequency spectra depending on whether the assumed sequences occured more frequency or less frequently during the task:
- ```{r}
- # calculate the mean frequency profile across participants:
- dt_pred_rest_lsp_mean_seen = dt_pred_rest_lsp_seen %>%
- # filter out NA frequency spectrum:
- filter(!is.na(power_smooth)) %>%
- setDT(.) %>%
- # calculate the mean smoothed frequency spectrum across all frequency bins and sequences:
- .[, by = .(classification, id, tITI, freq_bins_smooth, seen), .(
- num_seqs = .N,
- power_smooth = mean(power_smooth)
- )] %>%
- # the number of sequences should be 1 for sequence trials, 15 for seen
- # permuted rest repetitions and 105 for unseen sequence repetitions:
- verify(num_seqs %in% c(1, 15, 105)) %>%
- # calculate the mean smoothed frequency spectrum across all frequency bins:
- .[, by = .(classification, tITI, freq_bins_smooth, seen), .(
- num_subs = .N,
- mean_spec = mean(power_smooth),
- sem_upper = mean(power_smooth) + (sd(power_smooth)/sqrt(.N)),
- sem_lower = mean(power_smooth) - (sd(power_smooth)/sqrt(.N))
- )] %>%
- # verify if data was averaged across the expected number of participants:
- verify(all(num_subs == 32))
- ```
- ```{r, echo=FALSE}
- colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", grays(1), "black")
- fig_freq_lsp_seen = ggplot(data = dt_pred_rest_lsp_mean_seen, aes(
- color = tITI, y = as.numeric(mean_spec), x = as.numeric(freq_bins_smooth))) +
- facet_wrap(~ seen) +
- geom_vline(data = frequency_expectation, aes(xintercept = xintercept),
- linetype = "dashed", color = rep(colors[1:3], 2)) +
- geom_line() +
- geom_ribbon(aes(fill = tITI, ymin = sem_lower, ymax = sem_upper),
- alpha = 0.3, color = NA) +
- geom_text(data = frequency_expectation, aes(
- x = xintercept + 0.02, y = 0.1, label = paste(round(xintercept, 2))),
- color = rep(colors[1:3], 2)) +
- xlab("Frequency") + ylab("Power") +
- scale_color_manual(name = "", values = colors) +
- scale_fill_manual(name = "", values = colors) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
- legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
- ylim = c(0, 3), xlim = c(0, 0.3)) +
- guides(color = guide_legend(nrow = 1)) +
- scale_x_continuous(labels = label_fill(seq(0, 0.3, by = 0.05), mod = 1),
- breaks = seq(0, 0.3, by = 0.05)) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_freq_lsp_seen
- ```
- ### Relative frequency spectra (fixed order)
- We compare the power of the expected frequencies of resting state, fast, and slow
- sequence trial data in data from resting state, fast, and slow sequence trial data.
- ```{r}
- fitfreq = 1 / 5.26
- stim_dur = 0.1
- num_seq_stim = 5
- # calculate the delta
- delta_freq_fast = fitfreq / (1 + fitfreq * (num_seq_stim * stim_dur + (num_seq_stim - 1) * 0.032))
- delta_freq_slow = fitfreq / (1 + fitfreq * (num_seq_stim * stim_dur + (num_seq_stim - 1) * 2.048))
- delta_freq_rest = 0.01
- # create a data table with the expected frequencies:
- frequency_expectation = data.table(xintercept = c(
- delta_freq_fast, delta_freq_slow, rep(delta_freq_rest, 4)))
- ```
- ```{r}
- dt_pred_rest_lsp_exp = dt_pred_rest_lsp %>%
- .[, by = .(classification, id, tITI), .(
- power_index_fast = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_fast))],
- power_index_slow = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_slow))],
- power_index_rest = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_rest))]
- )] %>%
- # melt all index variables into one column:
- gather(grep("power", names(.), fixed = TRUE), key = "index", value = "power") %>% setDT(.) %>%
- .[grepl("index_fast", index), label := paste0("Fast (", round(delta_freq_fast, 2), ")")] %>%
- .[grepl("index_slow", index), label := paste0("Slow (", round(delta_freq_slow, 2), ")")] %>%
- .[grepl("index_rest", index), label := paste0("Rest (", round(delta_freq_rest, 2), ")")] %>%
- setorder(., classification, id, tITI) %>%
- filter(classification == "ovr") %>%
- setDT(.)
- ```
- #### Figure 5e
- ```{r, echo=TRUE}
- colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray", "black")
- fig_freq_exp = ggplot(
- data = dt_pred_rest_lsp_exp %>% filter(tITI != "Rest Post S1"),
- aes(y = power, x = fct_rev(label), fill = tITI)) +
- geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
- width = 0.8) +
- geom_point(position = position_jitterdodge(
- jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
- alpha = 1, inherit.aes = TRUE, pch = 21,
- color = "white") +
- stat_summary(fun.data = "mean_se", geom = "errorbar",
- position = position_dodge(0.9), width = 0, color = "black") +
- xlab("Predicted frequency") + ylab("Power") +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0, 5)) +
- scale_fill_manual(values = colors, name = "") +
- theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
- legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_freq_exp
- ```
- ```{r, results="hold"}
- lme_power <- lmerTest::lmer(
- power ~ tITI + (1 | id),
- data = dt_pred_rest_lsp_exp %>%
- filter(tITI != "Rest Post S1" & index == "power_index_fast"),
- control = lcctrl)
- summary(lme_power)
- anova(lme_power)
- emmeans_results = emmeans(lme_power, list(pairwise ~ tITI))
- emmeans_results
- ```
- ```{r}
- # get the power spectrum of the expected fast frequency in fast data:
- power_fast = subset(dt_pred_rest_lsp_exp, index == "power_index_fast" & tITI == "32 ms")$power
- # get the power spectrum of the expected fast frequency in slow data:
- power_slow = subset(dt_pred_rest_lsp_exp, index == "power_index_fast" & tITI == "2048 ms")$power
- # get the power spectrum of the expected fast frequency in rest data:
- power_rest_pre = subset(dt_pred_rest_lsp_exp, index == "power_index_fast" & tITI == "Rest Pre S1")$power
- # compare fast vs. slow and fast vs. rest:
- ttest_power_fastvsslow = t.test(power_fast, power_slow, paired = TRUE)
- ttest_power_fastvsrestpre = t.test(power_fast, power_rest_pre, paired = TRUE)
- ttest_power_slowvsrestpre = t.test(power_slow, power_rest_pre, paired = TRUE)
- round_pvalues(p.adjust(c(ttest_power_fastvsslow$p.value,
- ttest_power_fastvsrestpre$p.value,
- ttest_power_slowvsrestpre$p.value), method = "fdr"))
- ttest_power_fastvsslow; ttest_power_fastvsrestpre; ttest_power_slowvsrestpre
- ```
- #### Source Data File Fig. 5e
- ```{r}
- dt_pred_rest_lsp_exp %>% filter(tITI != "Rest Post S1") %>%
- select(-classification, -index) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_5e.csv"),
- row.names = FALSE)
- ```
- ### Relative frequency spectra (all orders)
- ```{r}
- dt_pred_rest_lsp_exp_seen = dt_pred_rest_lsp_seen %>%
- .[, by = .(classification, id, tITI, sequence, seen), .(
- power_index_fast = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_fast))],
- power_index_slow = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_slow))],
- power_index_rest = power_smooth[which.min(abs(freq_bins_smooth - delta_freq_rest))]
- )] %>%
- # melt all index variables into one column:
- gather(grep("power", names(.), fixed = TRUE), key = "index", value = "power") %>%
- setDT(.) %>%
- .[grepl("index_fast", index), label := paste0("Fast (", round(delta_freq_fast, 2), ")")] %>%
- .[grepl("index_slow", index), label := paste0("Slow (", round(delta_freq_slow, 2), ")")] %>%
- .[grepl("index_rest", index), label := paste0("Rest (", round(delta_freq_rest, 2), ")")] %>%
- # average across sequences for seen and unseen sequences:
- .[, by = .(classification, id, tITI, seen, index, label), .(
- num_seqs = .N,
- power = mean(power)
- )] %>%
- # check if the number of sequences matches for sequence (1), seen (15) and
- # unseen (105) sequences in permuted resting state sequences:
- verify(num_seqs %in% c(1, 15, 105)) %>%
- setorder(., classification, id, tITI) %>%
- filter(classification == "ovr") %>%
- setDT(.)
- ```
- ```{r}
- colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray", "black")
- fig_freq_exp_seen = ggplot(data = dt_pred_rest_lsp_exp_seen, aes(
- y = power, x = fct_rev(label), fill = tITI)) +
- geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
- width = 0.8) +
- geom_point(position = position_jitterdodge(
- jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
- alpha = 1, inherit.aes = TRUE, pch = 21,
- color = "white") +
- facet_wrap(~ seen) +
- stat_summary(fun.data = "mean_se", geom = "errorbar",
- position = position_dodge(0.9), width = 0, color = "black") +
- xlab("Predicted frequency") + ylab("Power") +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(0, 5)) +
- scale_fill_manual(values = colors, name = "") +
- theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
- legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_freq_exp_seen
- ```
- We examine the effect of resting-state session (pre- vs. post-task rest) and sequence frequency (less frequent vs. more frequent) on the power in the high frequency range (predicted by our model) indicative of fast sequential reactivation:
- ```{r, results="hold"}
- lme_power <- lmerTest::lmer(
- power ~ tITI * seen + (tITI + seen | id),
- data = subset(dt_pred_rest_lsp_exp_seen, index == "power_index_fast" &
- stringr::str_detect(tITI, "Rest")),
- control = lcctrl)
- summary(lme_power)
- anova(lme_power)
- emmeans_results = emmeans(lme_power, list(pairwise ~ tITI | seen))
- emmeans_results
- ```
- ```{r, echo=TRUE, results="hold"}
- lme_power <- lmerTest::lmer(
- power ~ tITI + (seen | id),
- data = subset(dt_pred_rest_lsp_exp_seen, index == "power_index_fast" &
- stringr::str_detect(tITI, "Rest")),
- control = lcctrl)
- summary(lme_power)
- anova(lme_power)
- emmeans_results = emmeans(lme_power, list(pairwise ~ tITI))
- emmeans_results
- ```
- ```{r, echo=TRUE, results="hold"}
- lme_power <- lmerTest::lmer(
- power ~ seen + (tITI | id),
- data = subset(dt_pred_rest_lsp_exp_seen, index == "power_index_fast" &
- stringr::str_detect(tITI, "Rest")), control = lcctrl)
- summary(lme_power)
- anova(lme_power)
- emmeans_results = emmeans(lme_power, list(pairwise ~ seen))
- emmeans_results
- ```
- We compare the power in the fast frequency spectrum between pre- and post-task resting-state data:
- ```{r}
- dt_pred_rest_lsp_exp_seen %>%
- # select resting state data and power in the fast frequency range:
- filter(stringr::str_detect(tITI, "Rest") & index == "power_index_fast") %>%
- group_by(index, seen) %>%
- do(broom::tidy(t.test(formula = power ~ tITI, data = ., paired = TRUE))) %>%
- setDT(.) %>%
- .[, "p.value_adjust" := p.adjust(p.value, method = "fdr")] %>%
- .[, "p.value_round" := round_pvalues(p.value)] %>%
- .[, "p.value_adjust_round" := round_pvalues(p.value_adjust)] %>%
- # check if the degrees-of-freedom are n - 1:
- verify(parameter == 32 - 1) %>%
- rmarkdown::paged_table(.)
- ```
- #### Figure 6c
- ```{r}
- plot_data <- dt_pred_rest_lsp_exp_seen %>%
- filter(tITI %in% c("Rest Pre S1", "Rest Post S1")) %>%
- filter(index == "power_index_fast")
- colors = c(brewer.pal(n = 8, name = "RdBu")[6], hcl.colors(5, "Cividis")[c(1)])
- fig_freq_exp_post = ggplot(data = plot_data, aes(
- y = power, x = tITI, fill = seen)) +
- geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
- width = 0.8) +
- geom_point(position = position_jitterdodge(
- jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
- alpha = 1, inherit.aes = TRUE, pch = 21,
- color = "white") +
- stat_summary(fun.data = "mean_se", geom = "errorbar",
- position = position_dodge(0.9), width = 0, color = "black") +
- xlab("Resting-state data") + ylab("Power at fast frequency (0.17)") +
- #ggtitle("Predicted fast frequency (0.17 Hz)") +
- theme(plot.title = element_text(hjust = 1)) +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE) +
- scale_fill_manual(values = colors, name = "Sequences") +
- theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
- legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
- guides(fill = guide_legend(title.position = "top", title.hjust = 0.5)) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- #theme(legend.title = element_text(size = 10),
- # legend.text = element_text(size = 10))
- fig_freq_exp_post
- ```
- #### Source Data File Fig. 6c
- ```{r}
- dt_pred_rest_lsp_exp_seen %>%
- filter(tITI %in% c("Rest Pre S1", "Rest Post S1")) %>%
- filter(index == "power_index_fast") %>%
- select(-classification, -index, -label, -num_seqs) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_6c.csv"),
- row.names = FALSE)
- ```
- ### Figure 6
- ```{r}
- plot_grid(fig_freq_post, fig_freq_post_rel, fig_freq_exp_post, nrow = 1, ncol = 3,
- labels = c("a", "b", "c"), rel_widths = c(3, 3, 3))
- ```
- ```{r, echo=TRUE, eval=FALSE}
- ggsave(filename = "highspeed_plot_decoding_resting_post.pdf",
- plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
- dpi = "retina", width = 9, height = 3)
- ```
- ```{r}
- ggsave(filename = "wittkuhn_schuck_figure_6.pdf",
- plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
- dpi = "retina", width = 9, height = 3)
- ```
- ### Inserting sequence trials into rest
- We create a function to sample sequence trials:
- ```{r}
- subseqsample = function(sequence, seq_size = 12, seq_num = 15) {
- # initialize an empty array for the resulting indices:
- index = rep(NA, length(sequence))
- # calculate how much space would be allocated to gaps:
- total_gaps = length(sequence) - seq_size * seq_num
- # calculate random gaps
- random_gaps = diff(sort(c(0, sample(1:(total_gaps + seq_num), seq_num), total_gaps + seq_num + 1)))
- # we subtract 1 for each element of seq_num we added above:
- random_gaps = random_gaps - 1
- # make sure that the sum of random gaps matches the total gaps:
- if (sum(random_gaps) != total_gaps) {warning("sum of gaps does not match!")}
- # generate a random order of subsequences:
- seq_order = sample(1:seq_num, seq_num)
- # initialize the first starting point of potential sequences:
- cstart = 0
- # loop through all subsequences:
- for (cseq in seq(1, seq_num)) {
- # select the first random gap:
- cgap = random_gaps[cseq]
- # take the starting point and add the current random gap:
- cstart = cstart + cgap + 1 # +1 because gap can be 0
- # calculate the current subsequence index:
- cidx = seq(cstart, (cstart + seq_size - 1))
- # insert the random sequence index into the index array:
- index[cidx] = seq_order[cseq]
- # define a new starting position for the next subsequence:
- cstart = max(cidx)
- }
- # return the indices:
- return(index)
- }
- subseqsample(sequence = seq(1, 233))
- ```
- We randomly sample 15 sequences of 12 TRs each from resting state data and
- compare the mean frequency spectrum across those trials between resting state,
- fast and slow sequence trials.
- ```{r}
- dt_pred_rest_freq = dt_pred_conc_slope %>%
- # we deselect post-task resting state data and assume on fixed ordering:
- filter(!(tITI == "Rest Post S1") & sequence == 1) %>%
- # filter for TRs in the forward and backward period only in sequence trials:
- filter(!(tITI %in% c("32 ms", "2048 ms") & !(seq_tr %in% seq(2, 13)))) %>%
- setDT(.) %>%
- # select random chunks of TRs in the resting state data
- .[tITI == "Rest Pre S1", by = .(classification, id), ":=" (trial_tITI = subseqsample(seq_tr))] %>%
- # filter out all NA trials on resting state data
- filter(!(is.na(trial_tITI) & tITI == "Rest Pre S1")) %>%
- setDT(.) %>%
- # assert that all the three conditions (two sequence trials and rest data) are present:
- assert(in_set("32 ms", "2048 ms", "Rest Pre S1"), tITI) %>%
- # calculate the frequency spectrum for every
- .[, by = .(classification, id, tITI, trial_tITI), {
- frequency_spectrum = stats::spectrum(
- x = scale(slope), plot = FALSE, detrend = TRUE, demean = FALSE, spans = 5)
- list(
- num_trs = .N,
- freq_bins = frequency_spectrum$freq,
- spectrum = frequency_spectrum$spec
- )}] %>% verify(all(num_trs %in% c(12, 6))) %>%
- # normalize the frequencies per participant and trial to remove mean differences:
- .[, by = .(classification, id, tITI, trial_tITI), ":=" (spectrum = spectrum/sum(spectrum))] %>%
- # multiply the frequency levels by 1/TR:
- transform(freq_bins = freq_bins * (1/1.25)) %>% setDT(.) %>%
- # calculate the mean frequency spectrum per participant and condition:
- .[, by = .(classification, id, tITI, freq_bins), .(
- num_trials = .N,
- mean_spec = mean(spectrum)
- )] %>% verify(all(num_trials == 15)) %>%
- # calculate the average frequency spectrum across all participants:
- .[, by = .(classification, tITI, freq_bins), .(
- num_subs = .N,
- mean_spec = mean(mean_spec),
- sem_upper = mean(mean_spec) + (sd(mean_spec)/sqrt(.N)),
- sem_lower = mean(mean_spec) - (sd(mean_spec)/sqrt(.N))
- )] %>%
- verify(all(num_subs == 32))
- ```
- We insert a number of events with probabilities scaled at different SNR levels:
- ```{r}
- dt_pred_conc = dt_pred_all %>%
- # we add a consecutive counter that will be used to concatenate data:
- .[, by = .(classification, id, tITI, class), ":=" (t = seq(1, .N))] %>%
- # we will focus all further analyses on the one-versus-rest classifier:
- filter(classification == "ovr") %>% setDT(.)
- # create separate dataframes for resting state data and sequence trial data:
- dt_pred_rest_s1 = subset(dt_pred_conc, tITI == "rest_run-pre_ses-01")
- dt_pred_seq_cut = subset(dt_pred_conc, tITI %in% c("32 ms", "2048 ms"))
- speed_levels = unique(dt_pred_seq_cut$tITI)
- num_speeds = length(speed_levels)
- # define the number of SNR levels and sequence inserts:
- snr_levels = c(4/5, 1/2, 1/4, 1/8, 0)
- snr_levels_char = c("4/5", "1/2", "1/4", "1/8", "0")
- num_snrs = length(snr_levels)
- num_inserts = 6
- insert_length = 12
- # get the number of TRs during resting state and warn if incorrect:
- num_rest_trs = max(dt_pred_rest_s1$t)
- if (num_rest_trs != 233) {warning("number of resting state TRs incorrect")}
- num_seq_trials = max(dt_pred_seq_cut$trial_tITI)
- if (num_seq_trials != 15) {warning("number of sequence trials incorrect")}
- num_subs = length(unique(dt_pred_rest_s1$id))
- if (num_subs != 32) {warning("number of participants incorrect")}
- # create a vector of seeds to make computations reproducible:
- #seeds = seq(1, num_snrs * num_inserts * num_subs * num_speeds)
- dt_pred_rest_insert <- list()
- count = 1
- for (sub in unique(dt_pred_rest_s1$id)) {
- # sample random sequence trials to be cut out depending on the number of inserts:
- cutout_trials = sort(sample(1:num_seq_trials, num_inserts, replace = FALSE))
- # sample random insert sequences depending on the number of inserts (no overlap):
- rest_seq = subseqsample(sequence = seq(1, num_rest_trs), seq_size = insert_length, seq_num = num_inserts * 2)
- subsequence_starts = sort(sapply(X = seq(1, num_inserts * 2), FUN = function(x){which(rest_seq == x)[1]}))
- insert_starts = sort(sample(subsequence_starts, num_inserts, replace = FALSE))
- insert_stops = sort(insert_starts + insert_length - 1)
- blend_starts = sort(setdiff(subsequence_starts, insert_starts))
- blend_stops = sort(blend_starts + insert_length - 1)
- for (ninserts in seq(1, num_inserts)) {
- snr_index = 0
- for (snr in snr_levels) {
- snr_index = snr_index + 1
- for (speed in speed_levels) {
- # create a resting state data subset for the current settings:
- dt_pred_rest_tmp = dt_pred_rest_s1 %>%
- filter(id == sub) %>% setorder(classification, id, t, class) %>%
- mutate(snr = snr) %>% mutate(insert = ninserts) %>% mutate(position_insert = position) %>%
- mutate(speed = speed) %>% mutate(snr_char = snr_levels_char[snr_index]) %>%
- mutate(prob_insert = probability) %>% mutate(prob_insert_blend = probability) %>%
- mutate(insert_index = NA) %>% mutate(insert_start = NA) %>% setDT(.) %>%
- verify(.[, by = .(classification, id, position), .(num_trs = .N)]$num_trs == num_rest_trs)
- # loop through all events to cutout and insert fast sequences:
- for (cevent in 1:ninserts) {
- # create the sequence for sequence inserting:
- insert_seq = seq(insert_starts[cevent], insert_stops[cevent])
- # create the sequence for the blending rest trials:
- blend_seq = seq(blend_starts[cevent], blend_stops[cevent])
- # select the sequence from fast trials:
- dt_pred_seq_cutout = dt_pred_seq_cut %>%
- # filter for specififc participant and specific trial:
- filter(id == sub & trial_tITI == cutout_trials[cevent]) %>%
- # filter for specific speed and specific trs:
- filter(tITI == speed & seq_tr %in% seq(2, insert_length + 1)) %>%
- # sort by classification, id, timepoint and class (as rest data, see above):
- setorder(classification, id, t, class) %>%
- # multiply the probability with the current SNR level:
- mutate(prob_snr = probability * snr)
- # insert the snr-adjusted sequence trial probabilities:
- dt_pred_rest_tmp$prob_insert[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$prob_snr
- # keep the position indices of the inserted sequence trials:
- dt_pred_rest_tmp$position_insert[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$position
- # blend the snr-adjusted sequence trials with data from snr-adjusted res trials:
- dt_pred_rest_tmp$prob_insert_blend[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$prob_snr +
- dt_pred_rest_tmp$probability[dt_pred_rest_tmp$t %in% blend_seq] * (1 - snr)
- # create a new column with the (SNR-adjusted) sequence probabilities:
- dt_pred_rest_tmp$prob_seq[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$probability
- dt_pred_rest_tmp$prob_seq_snr[dt_pred_rest_tmp$t %in% insert_seq] = dt_pred_seq_cutout$prob_snr
- # include indices that indicate where data was inserted:
- dt_pred_rest_tmp$insert_index[dt_pred_rest_tmp$t %in% insert_seq] = "inserted"
- dt_pred_rest_tmp$insert_num[dt_pred_rest_tmp$t %in% insert_seq] = cevent
- dt_pred_rest_tmp$insert_start[dt_pred_rest_tmp$t %in% min(insert_seq)] = 1
- }
- dt_pred_rest_insert[[count]] = dt_pred_rest_tmp
- count = count + 1
- }
- }
- }
- }
- # combine data in one datatable:
- dt_pred_rest_insert <- do.call("rbind", dt_pred_rest_insert)
- # calculate the differences in probability:
- dt_pred_rest_insert$prob_diff = dt_pred_rest_insert$probability - dt_pred_rest_insert$prob_insert_blend
- ```
- Plot differences between inserted sequence speeds at different SNR levels:
- ```{r}
- cols_order = colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5)
- plot_data = subset(dt_pred_rest_insert, id == example_id) %>%
- filter(insert_index == "inserted") %>% filter(speed == "32 ms") %>%
- filter(insert == 1) %>%
- transform(snr_char = factor(as.factor(snr_char), levels = c("4/5", "1/2", "1/4", "1/8", "0"))) %>% setDT(.)
- fig_prob_diff_snr = ggplot(data = plot_data, aes(
- x = as.factor(t), y = prob_seq_snr, color = snr_char, group = snr_char)) +
- geom_line() + geom_point(alpha = 0.3) +
- facet_wrap(~ class) + ylim(c(0,1)) +
- scale_color_manual(values = cols_order, name = "SNR level") +
- ylab("Probability (%)") + xlab("Time (TRs)") +
- theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE) +
- theme(legend.position = c(1, 0), legend.justification = c(1, 0)) +
- scale_x_discrete(labels = label_fill(seq(1, 12, 1), mod = 11),
- breaks = seq(min(plot_data$t), max(plot_data$t), 1)) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_prob_diff_snr
- ```
- Plot differences in probability between true and augmented rest data.
- ```{r}
- plot_data = subset(dt_pred_rest_insert, id == example_id & speed == "32 ms") %>%
- filter(snr_char %in% c("4/5", "1/4") & insert == c(2, 6)) %>%
- mutate(snr_char = paste("SNR level:", snr_char)) %>%
- mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts")))
- # create data frame to plot rectangles where trials were inserted:
- plot_rectangles = plot_data %>%
- filter(insert_start == 1) %>%
- mutate(xmin = t) %>%
- mutate(xmax = ifelse(speed == "32 ms", xmin + 11, xmin + 11)) %>%
- mutate(ymin = -100, ymax = 100)
- fig_inserts = ggplot(data = plot_data, mapping = aes(
- x = as.numeric(t), y = as.numeric(prob_diff * 100), group = as.factor(position))) +
- geom_hline(aes(yintercept = 0), color = "gray") +
- geom_line(mapping = aes(color = as.factor(position))) +
- facet_grid(vars(as.factor(insert)), vars(fct_rev(as.factor(snr_char)))) +
- geom_rect(data = plot_rectangles, aes(
- xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax),
- alpha = 0.2, inherit.aes = FALSE, show.legend = FALSE, fill = "gray") +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
- legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
- xlab("Time (TRs)") + ylab("Difference in probability (%)\nof sequence-free vs. -inserted rest") +
- scale_colour_manual(name = "Serial position", values = color_events) +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
- xlim = c(0, 250), ylim = c(-100, 100)) +
- guides(color = guide_legend(nrow = 1)) +
- scale_x_continuous(labels = label_fill(seq(0, 250, 25), mod = 5),
- breaks = seq(0, 250, 25)) +
- theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
- theme(panel.border = element_blank(), axis.line = element_line()) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_inserts
- ```
- We calculate the slopes for each speed, snr and insert level.
- Attention: This takes some time to run because a lot of linear models are estimated...
- About 13 minutes, OMG.
- ```{r}
- dt_pred_rest_insert %>%
- verify(.[position != position_insert,]$insert_index == "inserted")
- ```
- ```{r}
- start_time = Sys.time()
- # calculate the slope for each inserted and snr-adjusted rest data segment:
- dt_pred_rest_insert_slope = dt_pred_rest_insert %>% setDT(.) %>%
- # calculate the regression slopes for each id, trial, speed, snr, insert and TR:
- .[, by = .(id, classification, speed, trial_tITI, insert, snr, snr_char, t, insert_num), .(
- num_events = .N,
- mean_position = mean(position),
- mean_position_insert = mean(position_insert),
- slope_rest = coef(lm(probability ~ position))[2] * (-1),
- slope_insert = coef(lm(prob_insert_blend ~ position_insert))[2] * (-1),
- sd_prob_rest = sd(probability),
- sd_prob_insert = sd(prob_insert_blend)
- # verify that the slope was calculated across five events with mean position of 3:
- )] %>% verify(all(num_events == 5)) %>% verify(all(mean_position == 3)) %>%
- verify(all(mean_position_insert == 3)) %>%
- # sort the data table by speed, trial, number of inserts, snr level and time:
- setorder(id, speed, trial_tITI, insert, snr, snr_char, t) %>%
- # filter for one-versus-rest classification only:
- filter(classification == "ovr") %>% setDT(.)
- end_time = Sys.time()
- print(end_time - start_time)
- ```
- Plot slopes with inserts:
- ```{r}
- colors = c(hcl.colors(5, "Cividis")[c(5,1)], "gray")
- plot_rest = subset(dt_pred_rest_insert_slope, id == example_id) %>%
- filter(snr_char %in% c("0", "4/5") & insert == c(6)) %>%
- mutate(snr_char = paste("SNR level:", snr_char)) %>%
- mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts")))
- # create data frame to plot rectangles where trials were inserted:
- plot_rectangles = plot_rest %>%
- filter(!is.na(insert_num)) %>% setDT(.) %>%
- .[, by = .(id, classification, speed, insert, snr, snr_char, insert_num), .(
- xmin = min(t), xmax = max(t), ymin = -0.25, ymax = 0.25
- )]
- plot_inserts = plot_rest %>%
- filter(!is.na(insert_num)) %>% setDT(.)
- # plot the
- fig_inserts_slopes = ggplot(data = plot_rest, mapping = aes(
- x = as.numeric(t), y = as.numeric(slope_rest), color = fct_rev(speed))) +
- geom_hline(aes(yintercept = 0), color = "black") +
- geom_line(aes(color = "rest")) +
- facet_grid(vars(as.factor(insert)), vars(as.factor(snr_char))) +
- #facet_wrap(~ speed, nrow = 2, ncol = 1) +
- facet_grid(vars(fct_rev(as.factor(speed))), vars(fct_rev(as.factor(snr_char)))) +
- geom_rect(data = plot_rectangles, aes(
- xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax, fill = speed),
- alpha = 0.2, inherit.aes = FALSE, show.legend = FALSE) +
- geom_line(data = plot_inserts, aes(
- y = slope_insert, color = speed, group = interaction(insert_num, speed))) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
- legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
- xlab("Time (TRs)") + ylab("Regression slope") +
- scale_colour_manual(name = "Speed", values = colors) +
- scale_fill_manual(name = "Speed", values = colors) +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE, xlim = c(0, 250)) +
- guides(color = guide_legend(nrow = 1)) +
- scale_x_continuous(labels = label_fill(seq(0, 250, 25), mod = 5), breaks = seq(0, 250, 25)) +
- theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_inserts_slopes
- ```
- ```{r}
- # select variable: either slopes or standard deviation:
- insert_stat_func = function(dt, variable) {
- dt_pred_rest_insert_slope_stat = dt %>%
- # calculate mean slope coefficients across all augmented rest TRs:
- .[, by = .(id, classification, speed, insert, snr, snr_char), .(
- num_trs = .N,
- mean_slope_rest = mean(abs(slope_rest)),
- mean_slope_insert = mean(abs(slope_insert)),
- mean_sd_prob_rest = mean(sd_prob_rest),
- mean_sd_prob_insert = mean(sd_prob_insert)
- )] %>% verify(all(num_trs == num_rest_trs)) %>%
- # calculate the difference between the inserted and sequence-free rest data:
- mutate(mean_slope_diff = mean_slope_insert - mean_slope_rest) %>%
- mutate(mean_sd_diff = mean_sd_prob_insert - mean_sd_prob_rest) %>% setDT(.) %>%
- # concuct a t-test comparing sequence-free vs. sequence-including rests:
- .[, by = .(classification, speed, insert, snr, snr_char), {
- ttest_results = t.test(get(variable), mu = 0, alternative = "two.sided")
- list(
- num_subs = .N,
- mean_variable_diff = mean(get(variable)),
- pvalue = ttest_results$p.value,
- tvalue = round(ttest_results$statistic, 2),
- df = ttest_results$parameter,
- sem_upper = mean(get(variable)) + (sd(get(variable))/sqrt(.N)),
- sem_lower = mean(get(variable)) - (sd(get(variable))/sqrt(.N))
- )}] %>%
- verify(all(num_subs == 32)) %>%
- verify(all((num_subs - df) == 1)) %>%
- # adjust p-values for multiple comparisons:
- # check if the number of comparisons matches expectations:
- .[, by = .(classification, speed), ":=" (
- num_comp = .N,
- pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
- )] %>%
- verify(all(num_comp == 30, na.rm = TRUE)) %>%
- # round the original p-values according to the apa standard:
- mutate(pvalue_round = round_pvalues(pvalue)) %>%
- # round the adjusted p-value:
- mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
- # create new variable indicating significance below 0.05
- mutate(significance = ifelse(pvalue_adjust < 0.05, "*", "")) %>%
- # log-transform the p-values:
- mutate(pvalue_log = log(pvalue_adjust, base = 20)) %>%
- # adjust the ordering of the snr levels:
- transform(snr_char = factor(snr_char, levels = c("0", "1/8", "1/4", "1/2", "4/5"))) %>%
- # set order:
- setorder(classification, speed, insert, snr) %>% setDT(.)
- return(dt_pred_rest_insert_slope_stat)
- }
- snr_insert_stat_slope = insert_stat_func(dt = dt_pred_rest_insert_slope, variable = "mean_slope_diff")
- snr_insert_stat_sd = insert_stat_func(dt = dt_pred_rest_insert_slope, variable = "mean_sd_diff")
- snr_insert_stat_slope %>% filter(pvalue_adjust < 0.05)
- snr_insert_stat_sd %>% filter(pvalue_adjust < 0.05)
- ```
- #### Figure 5f / 5g
- Plot average slope regression coefficients and p-values:
- ```{r}
- cols_order = rev(colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5))
- plot_snr_insert = function(dt, variable) {
- if (variable == "pvalue_log") {
- yrange = seq(-10, 0, 1)
- ylabel = "p-value\n(base-20 log-transformed)"
- yintercept = -1
- mod = 2
- dt$sem_upper = 10; dt$sem_lower = 10;
- }
- else if (variable == "mean_slope_diff") {
- variable = "mean_variable_diff"
- yrange = seq(-0.007, 0.003, 0.001)
- mod = 2
- ylabel = "Regression slope\n(relative to pre-task rest)"
- yintercept = 0
- }
- else if (variable == "mean_sd_diff") {
- variable = "mean_variable_diff"
- yrange = seq(-0.03, 0.01, 0.01)
- mod = 1
- ylabel = "SD of probability\n(relative to pre-task rest)"
- yintercept = 0
- }
- ggplot(data = dt, mapping = aes(
- x = as.factor(insert), y = as.numeric(get(variable)), group = as.factor(snr_char),
- color = as.factor(snr_char), fill = as.factor(snr_char))) +
- facet_wrap(~ fct_rev(as.factor(speed))) +
- geom_hline(aes(yintercept = yintercept), color = "black", linetype = "dashed") +
- geom_ribbon(aes(ymin = sem_lower, ymax = sem_upper), alpha = 0.5, color = NA) +
- geom_line() + geom_point(pch = 15) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
- legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
- xlab("Number of inserted sequence trials") + ylab(ylabel) +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(min(yrange), max(yrange))) +
- guides(color = guide_legend(nrow = 1)) +
- scale_color_manual(values = cols_order, name = "SNR level") +
- scale_fill_manual(values = cols_order, name = "SNR level") +
- scale_y_continuous(labels = label_fill(yrange, mod = mod), breaks = yrange) +
- theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- }
- fig_snr_pmat_slope = plot_snr_insert(dt = snr_insert_stat_slope, variable = "pvalue_log")
- fig_snr_data_slope = plot_snr_insert(dt = snr_insert_stat_slope, variable = "mean_slope_diff")
- fig_snr_pmat_sd = plot_snr_insert(dt = snr_insert_stat_sd, variable = "pvalue_log")
- fig_snr_data_sd = plot_snr_insert(dt = snr_insert_stat_sd, variable = "mean_sd_diff")
- fig_snr_pmat_slope; fig_snr_data_slope; fig_snr_pmat_sd; fig_snr_data_sd;
- ```
- #### Source Data File Fig. 5f
- ```{r}
- snr_insert_stat_sd %>%
- select(-num_subs, -classification, -pvalue, -tvalue, -df, -significance,
- -num_comp, -pvalue_adjust, -pvalue_round, -pvalue_adjust_round) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_5f.csv"),
- row.names = FALSE)
- ```
- #### Source Data File Fig. 5g
- ```{r}
- snr_insert_stat_sd %>%
- select(-num_subs, -classification, -pvalue, -tvalue, -df, -significance,
- -num_comp, -pvalue_adjust, -pvalue_round, -pvalue_adjust_round) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_5g.csv"),
- row.names = FALSE)
- ```
- ### Frequency spectra (inserted)
- #### Frequency spectra of augmented rest
- We calculate the frequency spectra for sequence-free and sequence-inserted
- (fast and slow sequences) rest:
- ```{r}
- baseline = 1
- # get frequency spectrum of fast and slow sequence data:
- dt_pred_rest_insert_freq = dt_pred_rest_insert_slope %>%
- # calculate the lomb scargle frequency spectra for every snr and insert level
- .[, by = .(classification, id, speed, insert, snr, snr_char), {
- frequency_spectrum = lsp(x = slope_insert, times = 1:.N, from = 0, to = 0.3, plot = FALSE, ofac = 2)
- list(num_trs = .N, freq_bins = frequency_spectrum$scanned, power = frequency_spectrum$power,
- filter_width = round(0.05/mean(diff(frequency_spectrum$scanned)))
- )}] %>% verify(all(num_trs == 233)) %>% verify(length(unique(filter_width)) == 1) %>%
- # smooth the frequency spectra:
- .[, by = .(classification, id, speed, insert, snr, snr_char), ":=" (
- power_smooth = stats::filter(power, rep(1/unique(filter_width), unique(filter_width))),
- freq_bins_smooth = stats::filter(freq_bins, rep(1/unique(filter_width), unique(filter_width)))
- )] %>%
- # add a counter for the frequency spectrum
- .[, by = .(classification, id, speed, snr, snr_char, insert), ":=" (bin = 1:.N)] %>%
- # verify that the number of frequency bins is the same for all participants, snr, and insert levels:
- verify(length(unique(.[, by = .(classification, id, speed, insert, snr), .(
- num_bins = length(unique(bin)))]$num_bins)) == 1) %>%
- # calculate power of every snr level relative to the snr 0 level:
- .[, by = .(classification, id, speed, insert), ":=" (
- power_smooth_rel = as.vector(sapply(X = sort(unique(snr)), FUN = function(x){
- power_smooth[snr == x]/power_smooth[snr == 0]}) - baseline)
- )]
- # calculate the mean smoothed frequency for every snr and insert level across frequency bins:
- dt_pred_rest_insert_freq_mean = dt_pred_rest_insert_freq %>%
- .[, by = .(classification, speed, snr, snr_char, insert, freq_bins_smooth, bin), .(
- num_subs = .N,
- mean_power_smooth_rel = mean(power_smooth_rel),
- sem_upper = mean(power_smooth_rel) + (sd(power_smooth_rel)/sqrt(.N)),
- sem_lower = mean(power_smooth_rel) - (sd(power_smooth_rel)/sqrt(.N))
- )] %>% verify(all(num_subs == 32))
- ```
- #### Figure 5h
- ```{r}
- colors_snr = rev(colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5))
- plot_data = dt_pred_rest_insert_freq_mean %>%
- filter(!is.na(mean_power_smooth_rel)) %>%
- filter(insert %in% c(2, 6)) %>%
- mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts"))) %>%
- # adjust the ordering of the snr levels:
- transform(snr_char = factor(snr_char, levels = c("0", "1/8", "1/4", "1/2", "4/5"))) %>%
- setDT(.)
- freq_range = 0.01
- num_plot_inserts = length(unique(plot_data$insert))
- num_speed_inserts = length(unique(plot_data$speed))
- frequency_expectation <- frequency_expectation[1:3,]
- frequency_expectation_snr = frequency_expectation[1:3,] %>%
- mutate(colors = c(hcl.colors(5, "Cividis")[c(1)], "gold", "gray")) %>%
- .[rep(seq_len(nrow(.)), each = num_plot_inserts * num_speed_inserts), ] %>%
- mutate(xmax = xintercept + freq_range, xmin = xintercept - freq_range) %>%
- mutate(speed = rep(unique(plot_data$speed), num_plot_inserts * nrow(frequency_expectation))) %>%
- mutate(insert = rep(rep(unique(plot_data$insert), each = 2), nrow(frequency_expectation))) %>%
- transform(xintercept = ifelse(colors == "gray", 0.03, xintercept)) %>%
- filter(colors != "gray")
- fig_freq_lsp_insert = ggplot(data = plot_data, aes(
- color = as.factor(snr_char), y = as.numeric(mean_power_smooth_rel), x = as.numeric(freq_bins_smooth))) +
- geom_rect(data = frequency_expectation_snr, aes(
- xmin = xmin, xmax = xmax, ymin = -0.2, ymax = 0.3),
- alpha = 0.1, inherit.aes = FALSE, show.legend = FALSE, fill = frequency_expectation_snr$colors) +
- #geom_vline(data = frequency_expectation_snr, aes(xintercept = xintercept),
- # linetype = "dashed", color = frequency_expectation_snr$colors) +
- geom_line() +
- facet_grid(vars(insert), vars(fct_rev(as.factor(speed)))) +
- geom_ribbon(aes(fill = snr_char, ymin = sem_lower, ymax = sem_upper),
- alpha = 0.3, color = NA) +
- #geom_text(data = frequency_expectation_snr, aes(
- # x = xintercept + 0.03, y = -0.15, label = paste(round(xintercept, 2), "Hz")),
- # color = frequency_expectation_snr$colors, size = 3) +
- xlab("Frequency") + ylab("Power\n(relative to pre-task rest)") +
- scale_color_manual(name = "SNR level", values = colors_snr, guide = FALSE) +
- scale_fill_manual(name = "SNR level", values = colors_snr, guide = FALSE) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(0, 0, 0, 0),
- legend.box.margin = margin(t = 0, r = 0, b = -5, l = 0)) +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE,
- xlim = c(0, 0.3), ylim = c(-0.2, 0.3)) +
- #guides(color = guide_legend(nrow = 1)) +
- theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_freq_lsp_insert
- ```
- #### Source Data Fig. 5h
- ```{r}
- plot_data %>%
- select(-classification, -bin, -num_subs) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_5h.csv"),
- row.names = FALSE)
- ```
- #### Expected frequency ranges
- ```{r}
- dt_pred_rest_lsp_insert_exp = dt_pred_rest_insert_freq %>%
- .[, by = .(classification, id, speed, snr, snr_char, insert), {
- fast_min = which.min(abs(freq_bins_smooth - delta_freq_fast + freq_range))
- fast_max = which.min(abs(freq_bins_smooth - delta_freq_fast - freq_range))
- slow_min = which.min(abs(freq_bins_smooth - delta_freq_slow + freq_range))
- slow_max = which.min(abs(freq_bins_smooth - delta_freq_slow - freq_range))
- rest_min = which.min(abs(freq_bins_smooth - 0.03 + freq_range))
- rest_max = which.min(abs(freq_bins_smooth - 0.03 - freq_range))
- list(
- fast_min = freq_bins_smooth[fast_min], fast_max = freq_bins_smooth[fast_max],
- slow_min = freq_bins_smooth[slow_min], slow_max = freq_bins_smooth[slow_max],
- power_index_fast = mean(power_smooth_rel[fast_min:fast_max]),
- power_index_slow = mean(power_smooth_rel[slow_min:slow_max]),
- power_index_rest = mean(power_smooth_rel[rest_min:rest_max])
- )}] %>%
- # melt all index variables into one column:
- gather(grep("power", names(.), fixed = TRUE), key = "index", value = "power") %>% setDT(.) %>%
- .[grepl("index_fast", index), label := paste0(round(delta_freq_fast, 2), "")] %>%
- .[grepl("index_slow", index), label := paste0(round(delta_freq_slow, 2), "")] %>%
- .[grepl("index_rest", index), label := paste0(round(delta_freq_rest, 2), "")] %>%
- setorder(., classification, id, speed, snr, insert) %>%
- # filter for specific inserts levels here:
- filter(insert %in% c(2, 6)) %>% setDT(.)
- ```
- ```{r}
- dt_pred_rest_lsp_insert_exp_mean = dt_pred_rest_lsp_insert_exp %>%
- # filter out the irrelvant rest power index:
- filter(index != "power_index_rest") %>% setDT(.) %>%
- # compare the mean power in expected frequnecy for every SNR and insert level:
- .[, by = .(classification, speed, insert, snr, snr_char, index), {
- ttest_results = t.test(power, mu = 0, alternative = "two.sided", paired = FALSE)
- list(
- num_subs = .N,
- pvalue = ttest_results$p.value,
- tvalue = ttest_results$statistic,
- df = ttest_results$parameter,
- cohens_d = (mean(power) - 0)/sd(power)
- )}] %>% verify(all(num_subs == 32)) %>% verify((num_subs - df) == 1) %>%
- # adjust p-values for multiple comparisons:
- # check if the number of comparisons matches expectations:
- .[, by = .(classification, speed), ":=" (
- num_comp = .N,
- pvalue_adjust = p.adjust(pvalue, method = "fdr", n = .N)
- )] %>%
- # round the original p-values according to the apa standard:
- mutate(pvalue_round = round_pvalues(pvalue)) %>%
- # round the adjusted p-value:
- mutate(pvalue_adjust_round = round_pvalues(pvalue_adjust)) %>%
- # create new variable indicating significance below 0.05
- mutate(significance = ifelse(pvalue_adjust < 0.05, "*", ""))
- # filter for all p-values below the significance level:
- dt_pred_rest_lsp_insert_exp_mean %>% filter(pvalue < 0.05)
- ```
- #### Figure 5i
- ```{r}
- cols_order = rev(colorRampPalette(c('#4890F7', '#EB3C2D'), space = 'Lab')(5))
- plot_data = dt_pred_rest_lsp_insert_exp %>%
- # add to inserts label:
- mutate(insert = ifelse(insert == 1, paste(insert, "insert"), paste(insert, "inserts"))) %>%
- # adjust the ordering of the snr levels:
- transform(snr_char = factor(snr_char, levels = c("0", "1/4", "1/8", "1/2", "4/5"))) %>%
- # filter out the irrelvant rest power index:
- filter(index != "power_index_rest") %>% setDT(.)
- fig_freq_exp_insert = ggplot(data = plot_data, aes(
- y = power, x = fct_rev(label), fill = snr_char, color = snr_char, group = snr)) +
- geom_bar(stat = "summary", fun = "mean", position = position_dodge(0.9),
- width = 0.8) +
- geom_point(position = position_jitterdodge(
- jitter.width = 0.1, jitter.height = 0, seed = 2, dodge.width = 0.9),
- alpha = 0.1, inherit.aes = TRUE, pch = 16) +
- facet_grid(vars(insert), vars(fct_rev(as.factor(speed)))) +
- stat_summary(fun.data = "mean_se", geom = "errorbar",
- position = position_dodge(0.9), width = 0, color = "black") +
- xlab("Predicted frequency") + ylab("Power\n(relative to pre-task rest)") +
- coord_capped_cart(left = "both", bottom = "both", expand = TRUE, ylim = c(-0.1, 0.3)) +
- scale_fill_manual(values = cols_order, name = "SNR level", guide = FALSE) +
- scale_color_manual(values = cols_order, name = "SNR level", guide = FALSE) +
- theme(axis.ticks.x = element_line(color = "white"), axis.line.x = element_line(color = "white")) +
- theme(legend.position = "top", legend.direction = "horizontal",
- legend.justification = "center", legend.margin = margin(t = 0, r = 0, b = 0, l = 0),
- legend.box.margin = margin(t = 0, r = 0, b = 0, l = 0)) +
- #guides(color = guide_legend(nrow = 1)) +
- theme(strip.text = element_text(margin = margin(b = 2, t = 2, r = 2, l = 2))) +
- theme(axis.line = element_line(colour = "black"),
- panel.grid.major = element_blank(),
- panel.grid.minor = element_blank(),
- panel.border = element_blank(),
- panel.background = element_blank())
- fig_freq_exp_insert
- ```
- #### Source Data Fig. 5i
- ```{r}
- plot_data %>%
- select(-classification, -fast_min, -fast_max, -slow_min, -slow_max) %>%
- write.csv(., file = file.path(path_sourcedata, "source_data_figure_5i.csv"),
- row.names = FALSE)
- ```
- ### Figure 5
- ```{r}
- panel_1 = plot_grid(fig_sd_prob, fig_mean_beta_abs, ncol = 2, labels = c("a", "b"))
- panel_2 = plot_grid(fig_prob_slope, ncol = 1, labels = c("c"))
- panel_3 = plot_grid(fig_freq_lsp, fig_freq_exp, ncol = 2, labels = c("d", "e"))
- panel_4 = plot_grid(fig_snr_data_sd, fig_snr_pmat_sd, labels = c("f", "g"))
- panel_5 = plot_grid(fig_freq_lsp_insert, fig_freq_exp_insert, labels = c("h", "i"))
- plot_grid(plot_grid(panel_1, panel_2, ncol = 2), panel_3, panel_4, panel_5, ncol = 1)
- ```
- ```{r, eval=FALSE}
- ggsave(filename = "highspeed_plot_decoding_rest.pdf",
- plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
- dpi = "retina", width = 10, height = 12)
- ```
- ```{r}
- ggsave(filename = "wittkuhn_schuck_figure_5.pdf",
- plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
- dpi = "retina", width = 10, height = 12)
- ```
- ## Supplementary Figure
- ```{r}
- plot_grid(fig_prob_time_seq, fig_prob_diff_snr, fig_inserts, fig_inserts_slopes,
- fig_snr_pmat_slope, fig_snr_data_slope, labels = "auto", ncol = 2, nrow = 3)
- ```
- ```{r, eval=FALSE}
- ggsave(filename = "highspeed_plot_decoding_rest_supplement.pdf",
- plot = last_plot(), device = cairo_pdf, path = path_figures, scale = 1,
- dpi = "retina", width = 10, height = 10)
- ```
|