Browse Source

initial commit of all bids-relevant cide

Lennart Wittkuhn 3 years ago
parent
commit
716fac2399

+ 45 - 0
code/bids_conversion/highspeed-bids-description.py

@@ -0,0 +1,45 @@
+# ======================================================================
+# SCRIPT INFORMATION:
+# ======================================================================
+# SCRIPT: UPDATE OF BIDS DIRECTORY
+# PROJECT: HIGHSPEED
+# WRITTEN BY LENNART WITTKUHN, 2018 - 2019
+# CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
+# MAX PLANCK RESEARCH GROUP NEUROCODE
+# MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
+# MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
+# LENTZEALLEE 94, 14195 BERLIN, GERMANY
+# ======================================================================
+# IMPORT RELEVANT PACKAGES
+# ======================================================================
+import json
+import os
+# ======================================================================
+# DEFINE PATHS
+# ======================================================================
+# path to the root directory:
+path_root = os.environ['HOME']
+# path to the data input directory (in bids format):
+path_bids = os.path.join(path_root, 'highspeed', 'bids')
+path_desc = os.path.join(path_bids, 'dataset_description.json')
+# ======================================================================
+# UPDATE DATA-SET DESCRIPTION FILE
+# ======================================================================
+# open the dataset_description.json file:
+with open(path_desc) as json_file:
+    json_desc = json.load(json_file)
+json_file.close()
+# update fields of the json file:
+json_desc["Acknowledgements"] = "This work was funded by a research group grant awarded to NWS by the Max Planck Society (M.TN.A.BILD0004). We thank Eran Eldar, Sam Hall-McMaster and Ondrej Zika for helpful comments on a previous version of this manuscript, Gregor Caregnato for help with participant recruitment and data collection, Anika Loewe, Sonali Beckmann and Nadine Taube for assistance with MRI data acquisition, Lion Schulz for help with behavioral data analysis, Michael Krause for support with cluster computing and all participants for their participation. Lennart Wittkuhn is a pre-doctoral fellow of the International Max Planck Research School on Computational Methods in Psychiatry and Ageing Research (IMPRS COMP2PSYCH). The participating institutions are the Max Planck Institute for Human Development, Berlin, Germany, and University College London, London, UK. For more information, see https://www.mps-ucl-centre.mpg.de/en/comp2psych."
+json_desc["Authors"] = ["Lennart Wittkuhn", "Nicolas W. Schuck"]
+json_desc["Funding"] = ["M.TN.A.BILD0004"]
+json_desc["DatasetDOI"] = "openneuro.org"
+json_desc["License"] = "Creative Commons CC0"
+json_desc["Name"] = "Faster than thought: Detecting sub-second activation sequences with sequential fMRI pattern analysis"
+json_desc["ReferencesAndLinks"] = ["Wittkuhn, L. and Schuck, N. W. (2020). Faster than thought: Detecting sub-second activation sequences with sequential fMRI pattern analysis. bioRxiv. doi: 10.1101/2020.02.15.950667"]
+json_desc["HowToAcknowledge"] = "Please cite: Wittkuhn, L. and Schuck, N. W. (2020). Faster than thought: Detecting sub-second activation sequences with sequential fMRI pattern analysis. bioRxiv. doi: 10.1101/2020.02.15.950667"
+json_desc["EthicsApprovals"] = ["The research protocol was approved by the ethics commission of the German Psychological Society (DPGs), reference number: NS 012018"]
+# save updated data-set_description.json file:
+with open(path_desc, 'w') as outfile:
+    json.dump(json_desc, outfile, indent=4)
+outfile.close()

+ 63 - 0
code/bids_conversion/highspeed-bids-events.py

@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# ======================================================================
+# SCRIPT INFORMATION:
+# ======================================================================
+# SCRIPT: CREATE JSON FILES DESCRIBING COLUMNS IN THE EVENTS.TSV FILES
+# PROJECT: HIGHSPEED
+# WRITTEN BY LENNART WITTKUHN, 2019
+# CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
+# MAX PLANCK RESEARCH GROUP NEUROCODE
+# MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
+# MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
+# LENTZEALLEE 94, 14195 BERLIN, GERMANY
+# ======================================================================
+# IMPORT RELEVANT PACKAGES
+# ======================================================================
+import json
+import glob
+from os.path import join as opj
+
+path_out = opj('/Users','wittkuhn','Desktop','test.json')
+
+data = {
+	"onset": {
+		"LongName": "Run-wise event onset in seconds",
+		"Description": "The time in seconds from the first scanner trigger of the respective run",
+		"Units": "seconds"
+		},
+	"subject": {
+		"LongName": "Unique subject identifier",
+		"Description": "A unique identifier for each person who is a subject in the study",
+		},
+	"duration": {
+		"LongName": "Event duration in seconds",
+		"Description": "The duration of each trial in seconds",
+		"Units": "seconds"
+		},
+    "session": {
+        "Description": "A unique identifier for each person who is a subject in the study", 
+        "LongName": "Unique subject identifier",
+    	"Levels": {
+    		"1": "Session 1 of the experiment",
+    		"2": "Session 2 of the experiment"
+    		}
+    	},
+    "run_session": {
+        "Description": "A unique identifier for each person who is a subject in the study", 
+        "LongName": "Unique subject identifier",
+    	"Levels": {
+    		"1": "Session 1 of the experiment",
+    		"2": "Session 2 of the experiment"
+    		}
+
+
+    }
+
+
+with open(path_out, 'w') as outfile:
+    json.dump(data, outfile, indent=4, sort_keys=True)
+
+
+
+

+ 57 - 0
code/bids_conversion/highspeed-bids-fieldmaps.py

@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# ======================================================================
+# SCRIPT INFORMATION:
+# ======================================================================
+# SCRIPT: UPDATE THE FIELDMAP JSON FILES
+# PROJECT: HIGHSPEED
+# WRITTEN BY LENNART WITTKUHN, 2018 - 2020
+# CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
+# MAX PLANCK RESEARCH GROUP NEUROCODE
+# MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
+# MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
+# LENTZEALLEE 94, 14195 BERLIN, GERMANY
+# ======================================================================
+# IMPORT RELEVANT PACKAGES
+# ======================================================================
+import os
+import glob
+import json
+import sys
+import stat
+# ======================================================================
+# DEFINE PATHS
+# ======================================================================
+# to run type python3 bids_fieldmaps_info.py $PATH_BIDS
+# where $PATH_BIDS is the path to your BIDS directory
+path_bids = str(sys.argv[1])
+path_fmap = os.path.join(path_bids, '*', '*', 'fmap', '*.json')
+path_func = os.path.join(path_bids, '*', '*', 'func', '*.nii.gz')
+# ======================================================================
+# UPDATE FIELDMAP JSON FILES
+# ======================================================================
+# get all fieldmap files in the data-set:
+files_fmap = glob.glob(path_fmap)
+# loop over all field-map files:
+for file_path in files_fmap:
+    # open the .json file of the fieldmap acquisition:
+    with open(file_path, 'r') as in_file:
+        json_info = json.load(in_file)
+    in_file.close()
+    # get the path to the session folder of a specific participant:
+    file_base = os.path.dirname(os.path.dirname(file_path))
+    # get the path to all functional acquisitions in that session:
+    files_func = glob.glob(os.path.join(file_base, 'func', '*nii.gz'))
+    session = os.path.basename(file_base)
+    up_dirs = os.path.join(session, 'func')
+    intended_for = [os.path.join(up_dirs, os.path.basename(file)) for file in files_func]
+    json_info["IntendedFor"] = sorted(intended_for)
+    # change file permissions to read:
+    permissions = os.stat(file_path).st_mode
+    os.chmod(path=file_path, mode=permissions | stat.S_IWUSR)
+    # save updated fieldmap json-file:
+    with open(file_path, 'w') as out_file:
+        json.dump(json_info, out_file, indent=2, sort_keys=True)
+    out_file.close()
+    # change file permissions back to read-only:
+    os.chmod(path=file_path, mode=permissions)

+ 40 - 0
code/bids_conversion/highspeed-bids-participants.py

@@ -0,0 +1,40 @@
+# ======================================================================
+# SCRIPT INFORMATION:
+# ======================================================================
+# SCRIPT: UPDATE PARTICIPANTS .JSON FILE
+# PROJECT: HIGHSPEED
+# WRITTEN BY LENNART WITTKUHN, 2018 - 2019
+# CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
+# MAX PLANCK RESEARCH GROUP NEUROCODE
+# MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
+# MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
+# LENTZEALLEE 94, 14195 BERLIN, GERMANY
+# ======================================================================
+# IMPORT RELEVANT PACKAGES
+# ======================================================================
+import json
+import os
+# ======================================================================
+# DEFINE PATHS
+# ======================================================================
+# path to the root directory:
+path_root = os.environ['HOME']
+# path to the data input directory (in bids format):
+path_bids = os.path.join(path_root, 'highspeed', 'bids')
+path_desc = os.path.join(path_bids, 'participants.json')
+# ======================================================================
+# UPDATE DATA-SET DESCRIPTION FILE
+# ======================================================================
+# update fields of the json file:
+json_desc = dict()
+json_desc["participant_id"] = "Participant identifier"
+json_desc["age"] = "Age, in years as in the first session"
+json_desc["sex"] = "Sex, self-rated by participant, m for male / f for female / o for other"
+json_desc["handedness"] = "Handedness, self-rated by participant; note that participants were required to be right-handed"
+json_desc["digit_span"] = "Total score in Digit-Span Test (Petermann & Wechsler, 2012), assessing working memory capacity"
+json_desc["randomization"] = "Pseudo-randomized group assignment for selection of sequences in sequence trials"
+json_desc["session_interval"] = "Interval in days between the two experimental sessions"
+# save updated data-set_description.json file:
+with open(path_desc, 'w') as outfile:
+    json.dump(json_desc, outfile, indent=4)
+outfile.close()

+ 50 - 0
code/bids_conversion/highspeed-bids-stimuli.sh

@@ -0,0 +1,50 @@
+#!/usr/bin/bash
+# ==============================================================================
+# SCRIPT INFORMATION:
+# ==============================================================================
+# SCRIPT: DOWNLOAD STIMULI AND MOVE SELECTION INTO STIMULI FOLDER
+# PROJECT: HIGHSPEED
+# WRITTEN BY LENNART WITTKUHN, 2020
+# CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
+# MAX PLANCK RESEARCH GROUP NEUROCODE
+# MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
+# MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
+# LENTZEALLEE 94, 14195 BERLIN, GERMANY
+# ==============================================================================
+# DEFINE ALL PATHS:
+# ==============================================================================
+PATH_BASE=${HOME}
+PATH_PROJECT="highspeed"
+PATH_BIDS=${PATH_BASE}/${PATH_PROJECT}/bids
+PATH_OUTPUT=${PATH_BIDS}/stimuli
+# ==============================================================================
+# DOWNLOAD AND MOVE STIMULI FILES:
+# ==============================================================================
+# download stimuli from Haxby et al. (2001), Science to the base directory:
+wget http://data.pymvpa.org/datasets/haxby2001/stimuli-2010.01.14.tar.gz -P ${PATH_BASE}
+# unpack the .tar.gz file into the stimuli folder (this creates a 'stimuli' folder):
+tar -zxvf ${PATH_BASE}/stimuli-2010.01.14.tar.gz -C ${PATH_BIDS}
+# ==============================================================================
+# CREATE RELEVANT DIRECTORIES:
+# ==============================================================================
+# create stimuli directory:
+if [ ! -d ${PATH_OUTPUT} ]; then
+	mkdir -p ${PATH_OUTPUT}
+	echo "created ${PATH_OUTPUT}"
+fi
+# create output directory (always overwrite old one):
+rm -rf ${PATH_OUTPUT}/images
+mkdir -p ${PATH_OUTPUT}/images
+echo "created ${PATH_OUTPUT}/images"
+# create arrays with old and new file names:
+FILES=("pepper5.jpg" "d9a.jpg" "Tim_3.jpg" "house2.3.jpg" "shoec3.jpg")
+NAMES=("cat.jpg" "chair.jpg" "face.jpg" "house.jpg" "shoe.jpg")
+# copy relevant files into 'images' (a sub-directory of 'stimuli')
+for ((i=0;i<${#FILES[@]};++i)); do
+	printf "copy %s to %s\n" "${FILES[i]}" "${NAMES[i]}"
+	cp -v ${PATH_OUTPUT}/*/"${FILES[i]}" "${PATH_OUTPUT}/images/${NAMES[i]}"
+done
+# remove all folders inside 'stimuli' except 'images'
+find ${PATH_OUTPUT} -mindepth 1 -maxdepth 1 -not -name images -exec rm -rf '{}' \;
+# remove original zipped stimulus folder:
+rm -rf ${PATH_BASE}/stimuli-2010.01.14.tar.gz*

+ 35 - 0
code/bids_conversion/highspeed-bids-validator.sh

@@ -0,0 +1,35 @@
+#!/usr/bin/bash
+# ==============================================================================
+# SCRIPT INFORMATION:
+# ==============================================================================
+# SCRIPT: RUN BIDS VALIDATOR COMMAND LINE TOOL THROUGH SINGULARITY
+# PROJECT: HIGHSPEED
+# WRITTEN BY LENNART WITTKUHN, 2018 - 2020
+# CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
+# MAX PLANCK RESEARCH GROUP NEUROCODE
+# MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
+# MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
+# LENTZEALLEE 94, 14195 BERLIN, GERMANY
+# ==============================================================================
+# DEFINE ALL PATHS:
+# ==============================================================================
+PATH_BASE="${HOME}"
+PROJECT="highspeed"
+PATH_CONTAINER="${PATH_BASE}/tools/bids_validator/validator_1.3.12.sif"
+PATH_INPUT="${PATH_BASE}/${PROJECT}/bids"
+PATH_OUTPUT="${PATH_BASE}/${PROJECT}/derivatives/bids_validator"
+# ==============================================================================
+# CREATE RELEVANT DIRECTORIES:
+# ==============================================================================
+# create output directory:
+if [ ! -d ${PATH_OUTPUT} ]; then
+	mkdir -p ${PATH_OUTPUT}
+	echo "created ${PATH_OUTPUT}"
+fi
+# ==============================================================================
+# RUN BIDS-VALIDATOR:
+# ==============================================================================
+# run bids-validator and save the output in a text file:
+singularity run -B ${PATH_INPUT}:/input:ro ${PATH_CONTAINER} /input/ | tee -a ${PATH_OUTPUT}/bids_validation.txt
+# run the bids-validator and save the output in a .json file:
+singularity run -B ${PATH_INPUT}:/input:ro ${PATH_CONTAINER} /input/ --json | tee -a ${PATH_OUTPUT}/bids_validation.json

+ 40 - 0
code/defacing/highspeed-defacing-cleanup.sh

@@ -0,0 +1,40 @@
+#!/bin/bash
+# ==============================================================================
+# SCRIPT INFORMATION:
+# ==============================================================================
+# SCRIPT: REPLACING ORIGINAL STRUCTURAL IMAGES WITH DEFACED STRUCTURAL IMAGES
+# PROJECT: HIGHSPEED
+# WRITTEN BY LENNART WITTKUHN, 2018 - 2020
+# CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
+# MAX PLANCK RESEARCH GROUP NEUROCODE
+# MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
+# MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
+# LENTZEALLEE 94, 14195 BERLIN, GERMANY
+# ==============================================================================
+# DEFINE ALL PATHS:
+# ==============================================================================
+# define home directory
+PATH_BASE="${HOME}"
+# define the name of the current task:
+TASK_NAME="pydeface"
+# define the name of the project:
+PROJECT_NAME="highspeed"
+# path to the data directory (in bids format):
+PATH_BIDS=${PATH_BASE}/${PROJECT_NAME}/bids
+# ==============================================================================
+# REMOVE ORIGINAL T1W IMAGES AND REPLACE WITH DEFACED ONES:
+# ==============================================================================
+for FILE in ${PATH_BIDS}/*/*/anat/*T1w_defaced.nii.gz; do
+	# to just get filename from a given path:
+	FILE_BASENAME="$(basename -- $FILE)"
+	# get the parent path of directories:
+	FILE_PARENT="$(dirname "$FILE")"
+	# get the file name without the _defaced extension:
+	FILE_NEW="${FILE_BASENAME//_defaced}"
+	# remove the undefaced T1w file:
+	rm -rf ${FILE_PARENT}/${FILE_NEW}
+	echo "removed ${FILE_PARENT}/${FILE_NEW}"
+	# replace the original T1w image with the defaced version:
+	mv ${FILE} ${FILE_PARENT}/${FILE_NEW}
+	echo "replaced with ${FILE}"
+done

+ 74 - 0
code/defacing/highspeed-defacing-cluster.sh

@@ -0,0 +1,74 @@
+#!/bin/bash
+# ==============================================================================
+# SCRIPT INFORMATION:
+# ==============================================================================
+# SCRIPT: DEFACING ANATOMICAL MRI DATA IN A BIDS DATASET
+# PROJECT: HIGHSPEED
+# WRITTEN BY LENNART WITTKUHN, 2018 - 2020
+# CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
+# MAX PLANCK RESEARCH GROUP NEUROCODE
+# MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
+# MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
+# LENTZEALLEE 94, 14195 BERLIN, GERMANY
+# ==============================================================================
+# DEFINE ALL PATHS:
+# ==============================================================================
+# define home directory:
+PATH_BASE="${HOME}"
+# define the name of the current task:
+TASK_NAME="pydeface"
+# define the name of the project:
+PROJECT_NAME="highspeed"
+# path to the singularity container:
+PATH_CONTAINER=${PATH_BASE}/tools/${TASK_NAME}/${TASK_NAME}_37-2e0c2d.sif
+# path to the log directory:
+PATH_LOG=${PATH_BASE}/${PROJECT_NAME}/logs/${TASK_NAME}
+# path to the data directory (in bids format):
+PATH_BIDS=${PATH_BASE}/${PROJECT_NAME}/bids
+# ==============================================================================
+# CREATE RELEVANT DIRECTORIES:
+# ==============================================================================
+# create directory for log files:
+if [ ! -d ${PATH_LOG} ]; then
+	mkdir -p ${PATH_LOG}
+else
+	# remove old log files inside the log container:
+	rm -r ${PATH_LOG}/*
+fi
+# ==============================================================================
+# DEFINE PARAMETERS:
+# ==============================================================================
+# maximum number of cpus per process:
+N_CPUS=1
+# memory demand in *MB*
+MEM_MB=500
+# memory demand in *KB*
+MEM_KB="$((${MEM_MB} * 1000))"
+# ==============================================================================
+# RUN PYDEFACE:
+# ==============================================================================
+for FILE in ${PATH_BIDS}/*/*/anat/*T1w.nii.gz; do
+	# to just get filename from a given path:
+	FILE_BASENAME="$(basename -- $FILE)"
+	# get the parent directory:
+	FILE_PARENT="$(dirname "$FILE")"
+	# name of the job
+	echo "#PBS -N pydeface_${FILE_BASENAME}" >> job
+	# set the expected maximum running time for the job:
+	echo "#PBS -l walltime=1:00:00" >> job
+	# determine how much RAM your operation needs:
+	echo "#PBS -l mem=${MEM_KB}kb" >> job
+	# email notification on abort/end, use 'n' for no notification:
+	echo "#PBS -m n" >> job
+	# write (output) log to log folder
+	echo "#PBS -o ${PATH_LOG}" >> job
+	# write (error) log to log folder
+	echo "#PBS -e ${PATH_LOG}" >> job
+	# request multiple cpus
+	echo "#PBS -l nodes=1:ppn=${N_CPUS}" >> job
+	# define the main command:
+	echo "singularity run -B ${FILE_PARENT}:/input:rw ${PATH_CONTAINER} pydeface /input/${FILE_BASENAME} --force" >> job
+	# submit job to cluster queue and remove it to avoid confusion:
+	qsub job
+	rm -f job
+done

+ 174 - 0
code/events/extract-bids-events.m

@@ -0,0 +1,174 @@
+function event_all = extract_bids_events(Data, Basics, Sets, pad_sub, run, ses, cond)
+%% CONVERT DATASET TO TABLE AND GET THE VARIABLE NAMES:
+% convert data table from dataset to table type:
+data = dataset2table(Data(cond).data);
+% get the variable names of the current data table:
+var_names = data.Properties.VariableNames;
+% get all events we want to convert:
+all_events = var_names(contains(var_names, {'tFlip'}));
+%% BASIC TASK STATS
+% determine the number of study sessions:
+num_ses = 2;
+% determine the number of task runs per study session:
+num_run = 4;
+% get the indices of the current sessions (as booleans):
+idx_session = Basics.runInfo.session == ses;
+% get the indices of the current run (as booleans):
+idx_run = Basics.runInfo.run == run;
+% get the timestamp of of the first scanner trigger:
+t_trigger = Basics.runInfo.tTrigger(idx_session & idx_run);
+% get the data indices of the current session:
+idx_data_ses = data.session == ses;
+% get the data indices of the current run within session:
+idx_data_run = data.run == run;
+% combine the indices to get the correct data indices:
+index = idx_data_ses & idx_data_run;
+% create a 2d-array of run indices ordered by run (row) and sessions (col):
+run_array = reshape(1:num_run * num_ses, num_run, num_ses);
+% define the names of the four different task conditions:
+task_names = {'oddball','sequence','repetition','repetition'};
+%% DEFINE DICTIONAIRY FOR THE STIMULUS LABELS:
+% define a cell array containing the stimulus labels in german:
+keys_stim = {'Gesicht','Haus','Katze','Schuh','Stuhl'};
+% define a cell array containing the stimulus labels in english:
+value_stim = {'face','house','cat','shoe','chair'};
+% create a dictionary that translates the stimulus labels:
+dict_stim = containers.Map(keys_stim,value_stim);
+%% DEFINE DICTIONAIRY FOR THE EVENTS:
+% define a cell array containing the stimulus labels in german:
+keys_type = {'tFlipCue','tFlipBlank','tFlipFix','tFlipStim','tFlipITI','tFlipDelay','tFlipResp','tResponse'};
+% define a cell array containing the stimulus labels in english:
+value_type = {'cue','blank','fixation','stimulus','interval','delay','choice','response'};
+% create a dictionary that translates the stimulus labels:
+dict_type = containers.Map(keys_type,value_type);
+%% LOOP OVER ALL EVENTS AND GATHER THE EVENT INFORMATION
+event_all = table;
+for i = 1:length(all_events)
+    % get the current event:
+    event_type = all_events{i};
+    % get the number of sequential stimuli of that event:
+    num_seq_stim = size(data{:,event_type},2);
+    % number of trials of cond in the current run and session:
+    num_events = sum(index) * num_seq_stim;
+    % initialize empty events struct
+    event = struct;
+    
+    % onsets, in seconds from first trigger:
+    event.onset = data{index, event_type} - t_trigger;
+    event.onset = reshape(transpose(event.onset),[],1);
+    
+    % duration, in seconds
+    if strcmp(event_type,'tFlipCue')
+        event.duration = repmat(Basics.tTargetCue,num_events,1);
+    elseif strcmp(event_type, 'tFlipBlank')
+        event.duration = repmat(Basics.tPreFixation,num_events,1);
+    elseif strcmp(event_type, 'tFlipFix')
+        event.duration = repmat(Basics.tFixation,num_events,1);
+    elseif strcmp(event_type, 'tFlipStim')
+        event.duration = repmat(Sets(cond).set.tStim,num_events,1);
+    elseif strcmp(event_type, 'tFlipStim')
+        event.duration = repmat(Sets(cond).set.tStim,num_events,1);
+    elseif strcmp(event_type, 'tFlipITI')
+        event.duration = repelem(data.tITI(index,:),num_seq_stim,1);
+    elseif strcmp(event_type, 'tFlipDelay')
+        event.duration = (data{index, 'tFlipResp'} - t_trigger) - event.onset;
+    elseif strcmp(event_type, 'tFlipResp')
+        event.duration = repmat(Basics.tResponseLimit,num_events,1);
+    end
+    
+    % participant id
+    event.subject = repmat({pad_sub},num_events,1);
+    % add column that contains the session identifier:
+    event.session = repmat(ses,num_events,1);
+    % run within session:
+    event.run_session = repmat(run,num_events,1);
+    % run across the entire experiment:
+    event.run_study = repmat(run_array(run,ses),num_events,1);
+    % add column that contains the trial counter
+    if cond == 4
+        trial_indices = 41:1:45;
+        event.trial = repelem(trial_indices(index)',num_seq_stim,1);
+    else
+        event.trial = repelem(find(index),num_seq_stim,1);
+    end
+    % add column that contains the condition:
+    event.condition = repmat(task_names(cond),num_events,1);
+    % add column that contains the trial type:
+    event.trial_type = (repmat({dict_type(event_type)},num_events,1));
+    
+    % initialize all other event information:
+    event.serial_position = nan(num_events,1);
+    event.interval_time = nan(num_events,1);
+    event.stim_orient = nan(num_events,1);
+    event.stim_index = nan(num_events,1);
+    event.stim_label = event.trial_type;
+    %event.stim_file = strcat('images/',event.stim_label,'.jpg');
+    event.target = nan(num_events,1);
+    event.nontarget = nan(num_events,1);
+    event.key_down = nan(num_events,1);
+    event.key_id = repmat({NaN},num_events,1);
+    event.key_target = repmat({NaN},num_events,1);
+    event.accuracy = nan(num_events,1);
+    event.response_time = nan(num_events,1);
+    
+    if strcmp(event_type, 'tFlipStim')
+        % add column that contains the sequential position:
+        event.serial_position = repmat(1:num_seq_stim,1,sum(index))';
+        % add column that contains the inter-stimulus interval:
+        event.interval_time = repelem(data.tITI(index,:),num_seq_stim,1);
+        % add column that contains the stimulus orientation:
+        event.stim_orient = repelem(data.orient(index,:),num_seq_stim,1);
+        % get stimulus labels of the current run:
+        event.stim_index = data.stimIndex(index,:);
+        event.stim_index = reshape(transpose(event.stim_index),[],1);
+        % add column that contains the path to the stimulus folder:
+        event.stim_label = transpose(value_stim(event.stim_index));
+        %event.stim_file = strcat('images/',event.stim_label,'.jpg');
+        % add column that indicates whether stimulus is a target:
+        if cond == 1
+            event.target = double(event.stim_orient == 180);
+            event.nontarget = nan(sum(index) * num_seq_stim,1);
+        elseif cond == 2 || cond == 3 || cond == 4
+            A = data.stimIndex(index,:);
+            V = data.targetPos(index,:);
+            W = data.targetPosAlt(index,:);
+            event.target = bsxfun(@eq, cumsum(ones(size(A)), 2), V);
+            event.target = reshape(transpose(event.target),[],1);
+            event.nontarget = bsxfun(@eq, cumsum(ones(size(A)), 2), W);
+            event.nontarget = reshape(transpose(event.nontarget),[],1);
+        end
+    end
+    
+    % add participant responses:
+    if (strcmp(event_type, 'tFlipStim') && strcmp(task_names{cond}, 'oddball')) || ...
+        (strcmp(event_type, 'tFlipResp') && ~strcmp(task_names{cond}, 'oddball'))
+        % key press
+        event.key_down = repelem(data.keyIsDown(index,:),num_seq_stim,1);
+        % key identity
+        event.key_id = repelem(data.keyIndex(index,:),num_seq_stim,1);
+        if ~isempty(event.key_id)
+            event.key_id = cellstr(num2str(event.key_id));
+            event.key_id(strcmp(strrep(event.key_id,' ',''),'90')) = {'left'};
+            event.key_id(strcmp(strrep(event.key_id,' ',''),'71')) = {'right'};
+            event.key_id(~strcmp(event.key_id,'left') & ...
+                ~strcmp(event.key_id,'right')) = {NaN};
+        end
+        % key target
+        if ismember('keyTarget',data.Properties.VariableNames)
+            event.key_target = repelem(data.keyTarget(index,:),num_seq_stim,1);
+        else
+            event.key_target = repmat({NaN},sum(index) * num_seq_stim,1);
+        end
+        % accuracy
+        event.accuracy = repelem(data.acc(index,:),num_seq_stim,1);
+        % response time
+        event.response_time = repelem(data.rt(index,:),num_seq_stim,1);
+
+    end
+    events = struct2table(event);
+    event_all = [event_all;events];
+end
+% remove all events that have no onset:
+event_all(isnan(event_all.onset),:) = [];
+end
+

+ 140 - 0
code/events/highspeed-bids-events.m

@@ -0,0 +1,140 @@
+%% SCRIPT: CREATE EVENT.TSV FILES FROM THE BEHAVIORAL DATA FOR BIDS
+% =========================================================================
+% PROJECT: HIGHSPEED
+% WRITTEN BY LENNART WITTKUHN 2018 - 2020
+% CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
+% MAX PLANCK RESEARCH GROUP NEUROCODE
+% MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
+% MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
+% LENTZEALLEE 94, 14195 BERLIN, GERMANY
+% =========================================================================
+%% DEFINE PATHS AND IMPORTANT VARIABLES:
+% clear the workspace and command window:
+clear variables; clc;
+% define the data root path
+path_root = fullfile('/Volumes','MPRG-Neurocode','Data','highspeed');
+% define the input path:
+path_input = fullfile(path_root,'main_mri','rawdata','behav_main');
+path_tardis = fullfile('/Users/wittkuhn/Volumes/tardis/highspeed');
+path_network = fullfile(path_root);
+% define the output path:
+% path_output = fullfile(path_root,'derivatives','events');
+path_output = fullfile(path_tardis, 'bids');
+%path_output = fullfile(path_network);
+% get the contents of the output directory:
+path_output_dir = dir(path_output);
+% check how many subjects are in the root directory:
+num_subs_found = sum(contains({path_output_dir.name},'sub'));
+% extended output path used to check for old files:
+path_old_files = fullfile(path_output,'*','*','func');
+% find all existing events.tsv files in the output directory:
+prev_files = dir(fullfile(path_old_files,'*events.tsv'));
+% delete all previous events files:
+for old_file = 1:length(prev_files)
+      delete(fullfile(prev_files(old_file).folder,prev_files(old_file).name))
+end
+% define the script path:
+path_script = fullfile('~','highspeed','highspeed_analysis','code');
+% read the text file containing a list of subject ids:
+sub_list = dlmread(fullfile(path_script, 'parameters', 'highspeed_participant_list.txt'));
+% turn the array with ids into a strings in a cell array:
+sub_list = cellstr(num2str(sub_list));
+%check if the number of subjects in the list matches the target directory
+if numel(sub_list) ~= num_subs_found
+    warning(['Number of subjects in the data dir does not match ' ...
+        'number of subjects in the subject text file!']);
+    sub_alt_list = cellfun(@num2str,num2cell(1:length(sub_list)),'un',0);
+else
+    sub_alt_list = sub_list;
+    sub_alt_list = cellfun(@num2str,num2cell(1:num_subs_found),'un',0);
+end
+% determine the number of study sessions:
+num_ses = 2;
+% determine the number of task runs per study session:
+num_run = 4;
+% define a cell array containing the stimulus labels in german:
+key_set = {'Gesicht','Haus','Katze','Schuh','Stuhl'};
+% define a cell array containing the stimulus labels in english:
+value_set = {'Face','House','Cat','Shoe','Chair'};
+% create a dictionary that translates the stimulus labels:
+label_dict = containers.Map(key_set,value_set);
+% create a 2d-array of run indices ordered by run (row) and sessions (col):
+run_array = reshape(1:num_run * num_ses, num_run, num_ses);
+% define the names of the four different task conditions:
+task_names = {'oddball','sequence','repetition','repetition'};
+%%
+for sub = 1:length(sub_alt_list)
+%for sub = 1:1
+    % initialize the maximum repetition trial index:
+    max_rep = 0;
+    % get the current subject id:
+    sub_id = sub_list{sub};
+    % print progress:
+    fprintf('Running sub %d of %d\n', sub, length(sub_alt_list))
+    % define a template string that takes subject, session and run id:
+    template_string = '*sub_%s_session_%d*run_%d*';
+    % put in the current subject, session and run id:
+    file_string = sprintf(template_string,sub_id,num_ses,num_run);
+    % read behavioral data files of all participants:
+    path_file = dir(fullfile(path_input,file_string));
+    % load the behavioral data into the workspace:
+    load(fullfile(path_input,path_file.name));
+    for session = 1:num_ses
+        % create a subject identifier (in bids format):
+        pad_sub = sprintf('sub-%02d',str2double(sub_alt_list{sub}));
+        % create a session identififer (in bids format):
+        pad_ses = ['ses-0', num2str(session)];
+        % combine the two identifiers as the first part of file names:
+        sub_file_name = strcat(pad_sub,'_',pad_ses);
+        % create the subject output path:
+        path_output_sub = (fullfile(path_output,pad_sub,pad_ses,'func'));
+        % create the subject directory if it does not exist yet:
+        if ~exist(path_output_sub,'dir')
+            system(sprintf('mkdir -p %s',path_output_sub));
+        end
+        for run = 1:num_run
+            events = table;
+            for cond = 1:4                
+                event_all = extract_bids_events(Data, Basics, Sets, pad_sub, run, session, cond);
+                events = [events;event_all];
+                
+            end
+            % sort by event onset (i.e., in chronological order):
+            events = sortrows(events,{'onset'});
+            % make two copies of the repetition trials:
+            rep_trials_old = events.trial(contains(events.condition, 'repetition'));
+            rep_trials_new = rep_trials_old;
+            % get the old trial indices while maintaining their order:
+            trial_old = unique(rep_trials_old, 'stable');
+            % get the number of repetition trials in the current run:
+            n_rep_trials = length(trial_old);
+            % create new trial indices depending on the running number of
+            % repetition trials:
+            trial_new = max_rep+1:max_rep+n_rep_trials;
+            % change the old trial indices
+            for i = 1:n_rep_trials
+                rep_trials_new(rep_trials_old == trial_old(i)) = trial_new(i);
+            end
+            % update the repetition trials of the events files:
+            events.trial(contains(events.condition, 'repetition')) = rep_trials_new;
+            % update the counter of the maximum repetition trial index:
+            max_rep = max(unique(events.trial(contains(events.condition, 'repetition'))));
+            % create template string file for data output (tsv format):
+            string_template = '_task-highspeed_rec-prenorm_run-0%d_events';
+            % write conditon and run information into the string:
+            string_written = sprintf(string_template,run);
+            % create the full filenames:
+            outfile_name = strcat(sub_file_name,string_written);
+            % create paths of the tsv and csv files:
+            path_tsv = fullfile(path_output_sub,strcat(outfile_name,'.tsv'));
+            path_csv = fullfile(path_output_sub,strcat(outfile_name,'.csv'));
+            % write the events table as csv file:
+            writetable(events,path_csv,'Delimiter','\t');
+            % copy the created file from csv to tsv file:
+            copyfile(path_csv,path_tsv)
+            % delete the csv file:
+            delete(path_csv);
+        end
+    end
+end
+

+ 71 - 0
code/events/highspeed-bids-participants.m

@@ -0,0 +1,71 @@
+%% HIGHSPEED: GET DATA OF THE HIGHSPEED TASK
+clear variables; clc; % clear workspace and command window
+
+path_base = '/Volumes/MPRG-Neurocode/Data/highspeed/main_mri/rawdata/';
+path_input = fullfile(path_base, 'behav_main');
+path_tardis = fullfile('/Users/wittkuhn/Volumes/tardis/highspeed');
+path_output = fullfile(path_tardis, 'bids');
+path_digitspan = fullfile(path_base, 'digit_span');
+allID = dlmread('/Users/wittkuhn/highspeed/highspeed_analysis/code/parameters/highspeed_participant_list.txt');
+num_subs = length(allID);
+
+% get data
+dirData = dir(path_input);
+dirData = {dirData.name};
+dataFiles = dirData(contains(dirData,'session_1_run_4') & contains(dirData,cellstr(num2str(allID)))); % search for matching files
+
+covariates = table;
+covariates.participant_id = cell(num_subs,1);
+covariates.age = nan(num_subs,1);
+covariates.sex = cell(num_subs,1);
+covariates.handedness = cell(num_subs,1);
+covariates.digit_span = nan(num_subs,1);
+covariates.randomization = nan(num_subs,1);
+covariates.session_interval = nan(num_subs,1);
+
+% study intervals, ordered by participant ids:
+intervals = {
+    1, 13, 4, 4, 17, 8, 14, 6, 7, 10, ...
+    7, 6, 18, 4, 8, 5, 23, 3, 1, 12, ...
+    9, 8, 24, 21, 17, 21, 14, 4, 4, 9, ...
+    7, 7, 11, 7, 14, 2, 1, 5, 3, 3};
+% create a dictionary that maps IDs to intervals:
+interval_dict = containers.Map(allID,intervals);
+
+filetemplate = 'highspeed_task_mri_sub_%d_session_%d_run_%d.mat';
+fprintf('List of missing data:\n')
+for sub = 1:num_subs
+    % get correct ids:
+    id_orig = allID(sub);
+    id_new = sprintf('sub-%02d', sub);
+    % load task statistics
+    session = 1; run = 4;
+    filename = sprintf(filetemplate,allID(sub),session,run);
+    dataframe = dirData(contains(dirData,filename));
+    if ~isempty(dataframe)
+        load(fullfile(path_input,filename));
+        covariates.participant_id{sub} = id_new;
+        covariates.age(sub) = Parameters.subjectInfo.age;
+        covariates.sex{sub} = Parameters.subjectInfo.gender;
+        covariates.handedness{sub} = 'right';
+        covariates.randomization(sub) = Parameters.subjectInfo.cbal;
+        covariates.session_interval(sub) = interval_dict(id_orig);
+    else
+        str = strcat(str,'- all behavioral data\n');
+    end
+    % digit span
+    digitspan_file = sprintf('DigitSpan_%d.mat',allID(sub));
+    digitspan_dir = dir(fullfile(path_digitspan));
+    if any(contains({digitspan_dir.name},digitspan_file))
+        load(fullfile(path_digitspan,digitspan_file))
+        covariates.digit_span(sub) = nansum(Data.acc);
+    end
+end
+
+% WRITE  DATA
+writetable(covariates,fullfile(path_output,'participants.csv'),'Delimiter','\t','WriteRowNames',true,...
+    'QuoteStrings',true,'WriteVariableNames',true)
+copyfile(fullfile(path_output,'participants.csv'), fullfile(path_output,'participants.tsv'));
+delete(fullfile(path_output,'participants.csv'));
+
+

+ 33 - 0
code/heudiconv/highspeed-heudiconv-anonymizer.py

@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# ==============================================================================
+# SCRIPT INFORMATION:
+# ==============================================================================
+# SCRIPT: ANONYMIZE PARTICIPANT IDS DURING BIDS-CONVERSION WITH HEUDICONV
+# PROJECT: HIGHSPEED
+# WRITTEN BY LENNART WITTKUHN, 2018 - 2019
+# CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
+# MAX PLANCK RESEARCH GROUP NEUROCODE
+# MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
+# MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
+# LENTZEALLEE 94, 14195 BERLIN, GERMANY
+# ==============================================================================
+# import relevant packages:
+import sys
+import os
+# define paths depending on the operating systen:
+if 'linux' in sys.platform:
+    # define the root path:
+    #path_root = os.path.join("/home", "mpib", "wittkuhn", "highspeed")
+    #path_code = os.path.join(path_root, "highspeed_analysis", "code")
+    # define the path to the text file containg the subject IDs:
+    path_sublist = os.path.join("/code", "parameters", "highspeed_participant_list.txt")
+# retrieve the user input:
+ids_orig = open(path_sublist, "r").read().splitlines()
+# define the number of subjects:
+ids_new = ["%02d" % t for t in range(1, len(ids_orig)+1)]
+# create a dictionary mapping original ids to anonymized ids:
+subj_map = dict(zip(ids_orig, ids_new))
+# replace the original ids with zero-padded numbers:
+sid = sys.argv[-1]
+if sid in subj_map:
+    print(subj_map[sid])

+ 107 - 0
code/heudiconv/highspeed-heudiconv-cluster.sh

@@ -0,0 +1,107 @@
+#!/usr/bin/bash
+# ==============================================================================
+# SCRIPT INFORMATION:
+# ==============================================================================
+# SCRIPT: PARALLELIZE BIDS CONVERSION USING HEUDICONV ON THE MPIB CLUSTER
+# PROJECT NAME: HIGHSPEED
+# WRITTEN BY LENNART WITTKUHN, 2018 - 2020
+# CONTACT: WITTKUHN AT MPIB HYPHEN BERLIN DOT MPG DOT DE
+# MAX PLANCK RESEARCH GROUP NEUROCODE
+# MAX PLANCK INSTITUTE FOR HUMAN DEVELOPMENT
+# MAX PLANCK UCL CENTRE FOR COMPUTATIONAL PSYCHIATRY AND AGEING RESEARCH
+# LENTZEALLEE 94, 14195 BERLIN, GERMANY
+# ==============================================================================
+# DEFINE ALL PATHS:
+# ==============================================================================
+PATH_BASE="${HOME}"
+# define the name of the project:
+PROJECT_NAME="highspeed"
+# define the path to the input directory:
+PATH_INPUT="${PATH_BASE}/${PROJECT_NAME}/rawdata/mri"
+# define the path to the output directory
+PATH_OUTPUT="${PATH_BASE}/${PROJECT_NAME}/bids"
+# define the path to the singularity container:
+PATH_CONTAINER="${PATH_BASE}/tools/heudiconv/heudiconv_0.6.0.sif"
+# define the path to the code main directory:
+PATH_CODE="${PATH_BASE}/${PROJECT_NAME}/${PROJECT_NAME}_analysis/code"
+# path to the heudiconv heuristic file:
+HEURISTIC_FILE="highspeed_heudiconv_heuristic.py"
+# define path to the python executable file that anonymizes the subject ids:
+ANON_FILE="highspeed_heudiconv_anonymizer.py"
+# make the anonymizer file executable:
+chmod +x "${PATH_CODE}/heudiconv/$ANON_FILE"
+# path to the directory where error and out path_logs of cluster jobs are saved:
+PATH_LOGS="${PATH_BASE}/${PROJECT_NAME}/logs/heudiconv/$(date '+%Y%m%d_%H%M%S')"
+# path to the text file with all subject ids:
+PATH_SUB_LIST="${PATH_CODE}/parameters/highspeed_participant_list.txt"
+# ==============================================================================
+# CREATE RELEVANT DIRECTORIES:
+# ==============================================================================
+# create output directory:
+if [ ! -d ${PATH_OUTPUT} ]; then
+	mkdir -p ${PATH_OUTPUT}
+	echo "created ${PATH_OUTPUT}"
+fi
+# create directory for log files:
+if [ ! -d ${PATH_LOGS} ]; then
+	mkdir -p ${PATH_LOGS}
+	echo "created ${PATH_LOGS}"
+fi
+# ==============================================================================
+# DEFINE PARAMETERS:
+# ==============================================================================
+# maximum number of cpus per process:
+N_CPUS=1
+# memory demand in *GB*
+MEM_GB=4
+# memory demand in *MB*
+MEM_MB="$((${MEM_GB} * 1000))"
+# read subject ids from the list of the text file
+SUB_LIST=$(cat ${PATH_SUB_LIST} | tr '\n' ' ')
+# ==============================================================================
+# RUN HEUDICONV:
+# ==============================================================================
+# initalize a subject counter:
+SUB_COUNT=0
+# loop over all subjects:
+for SUB in ${SUB_LIST}; do
+	# update the subject counter:
+	let SUB_COUNT=SUB_COUNT+1
+	# get the subject number with zero padding:
+	SUB_PAD=$(printf "%02d\n" $SUB_COUNT)
+	# loop over all sessions:
+	for SES in `seq 1 2`; do
+		# get the session number with zero padding:
+		SES_PAD=$(printf "%02d\n" $SES)
+		# define the dicom template for the heudiconv command:
+		DICOM_DIR_TEMPLATE="HIGHSPEED_{subject}_HIGHSPEED_{subject}_${SES}*/*/*/*IMA"
+		# check the existence of the input files and continue if data is missing:
+		if [ ! -d ${PATH_INPUT}/HIGHSPEED_${SUB}_HIGHSPEED_${SUB}_${SES}_* ]; then
+			echo "No data input available for sub-${SUB} ses-${SES_PAD}!"
+			continue
+		fi
+		# name of the job:
+    	echo "#PBS -N heudiconv_sub-${SUB_PAD}_ses-${SES_PAD}" > job
+    	# set the expected maximum running time for the job:
+		echo "#PBS -l walltime=12:00:00" >> job
+		# determine how much RAM your operation needs:
+		echo "#PBS -l mem=${MEM_GB}GB" >> job
+		# request multiple cpus
+		echo "#PBS -l nodes=1:ppn=${N_CPUS}" >> job
+		# write (output) log to log folder:
+		echo "#PBS -o ${PATH_LOGS}" >> job
+		# write (error) log to log folder:
+		echo "#PBS -e ${PATH_LOGS}" >> job
+		# email notification on abort/end, use 'n' for no notification:
+		echo "#PBS -m n" >> job
+		# define the heudiconv command:
+		echo "singularity run -B ${PATH_INPUT}:/input:ro \
+		-B ${PATH_OUTPUT}:/output:rw -B ${PATH_CODE}:/code:ro \
+		${PATH_CONTAINER} -d /input/${DICOM_DIR_TEMPLATE} -s ${SUB} \
+		--ses ${SES_PAD} -o /output -f /code/heudiconv/${HEURISTIC_FILE} \
+		--anon-cmd /code/heudiconv/${ANON_FILE} -c dcm2niix -b --overwrite" >> job
+		# submit job to cluster queue and remove it to avoid confusion:
+		qsub job
+		rm -f job
+	done
+done

+ 43 - 0
code/heudiconv/highspeed-heudiconv-heuristic.py

@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+
+def create_key(template, outtype=('nii.gz',), annotation_classes=None):
+    if template is None or not template:
+        raise ValueError('Template must be a valid format string')
+    return template, outtype, annotation_classes
+
+
+def infotodict(seqinfo):
+
+    # paths in BIDS format
+    anat = create_key('sub-{subject}/{session}/anat/sub-{subject}_{session}_rec-{rec}_T1w')
+    rest_pre = create_key('sub-{subject}/{session}/func/sub-{subject}_{session}_task-rest_rec-{rec}_run-pre_bold')
+    rest_post = create_key('sub-{subject}/{session}/func/sub-{subject}_{session}_task-rest_rec-{rec}_run-post_bold')
+    task = create_key('sub-{subject}/{session}/func/sub-{subject}_{session}_task-highspeed_rec-{rec}_run-{item:02d}_bold')
+    fmap_topup = create_key('sub-{subject}/{session}/fmap/sub-{subject}_{session}_rec-{rec}_dir-{dir}_epi')
+    info = {anat: [], rest_pre: [], rest_post: [], task: [], fmap_topup: []}
+
+    for s in seqinfo:
+
+        if 'NORM' in s.image_type:
+            rec = 'prenorm'
+        else:
+            rec = 'nonorm'
+
+        if ('t1' in s.series_description):
+            info[anat].append({'item': s.series_id, 'rec': rec})
+        if ('FM_' in s.series_description) and ('prenorm' in rec):
+            info[fmap_topup].append({'item': s.series_id, 'rec': rec, 'dir': 'AP'})
+        if ('FMInv_' in s.series_description) and ('prenorm' in rec):
+            info[fmap_topup].append({'item': s.series_id, 'rec': rec, 'dir': 'PA'})
+        if ('Rest_Pre' in s.series_description) and ('prenorm' in rec):
+            info[rest_pre].append({'item': s.series_id, 'rec': rec})
+        if ('Rest_Post' in s.series_description) and ('prenorm' in rec):
+            info[rest_post].append({'item': s.series_id, 'rec': rec})
+        # some participants have one post resting state labelled "Rest":
+        if ('Rest' in s.series_description) and ('prenorm' in rec):
+            info[rest_post].append({'item': s.series_id, 'rec': rec})
+        if ('Run' in s.series_description) and ('prenorm' in rec):
+            info[task].append({'item': s.series_id, 'rec': rec})
+
+    return info