Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

config.yaml 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. # After parsing this file, additional files mentioned in `supplemental_config` are read.
  2. # Leaf dictionary items are overwritten if they already existed.
  3. # `supplemental_config` entries in additional files are read but ignored.
  4. # Note on use:
  5. # 1) config files are read in order of this list
  6. # 2) folders can be specified. All *.yml and *.yaml files will be read recursively
  7. # 3) Absolute and relative paths are possible
  8. supplemental_config:
  9. - 'config/model_conf.yaml' # <======= This is classifier training output auto-generated by param_scan2.py
  10. # The path of the trained model configuration is determined by setting
  11. # classifier.saved_model_conf_name below.
  12. - 'config/channels.yaml' # <======= In this file, make changes regarding the channels controlling feedback,
  13. # the feedback hold period, and the feedback thresholds
  14. - 'config/feedback_wo_reward.yaml' # <= Uncomment this for feedback exploration without reward
  15. # - 'config/feedback.yaml' # <= Uncomment this for feedback with reward
  16. # - 'config/question_validation.yaml' # <= Uncomment this for question validation
  17. # - 'config/free_questions.yaml' # <= Uncomment this for free questions
  18. # - 'config/colorspeller_validation.yaml' # <= Uncomment this for color speller validation
  19. #- 'config/colorspeller.yaml' # <= Uncomment this for free color speller
  20. # - 'config/exploration.yaml' # <= Uncomment this for motor attempt exploration
  21. ### Examples how to point and read supplemental_config files
  22. # - '/full/path/to/yaml/file.yaml'
  23. # - 'relative/path/to/yaml/file.yaml'
  24. # - '/full/path/to/yaml/file.yaml'
  25. # - 'relative/path/to/yaml/file.yaml'
  26. # - '/path/to/folder/with/yaml/files
  27. system:
  28. plot: 0
  29. general:
  30. debug: 1
  31. clear_trial_history: False
  32. daq:
  33. n_channels_max: 128
  34. # n_channels: 2
  35. # exclude_channels: [] # These are BlackRock channel IDs (1-based).
  36. exclude_channels: []
  37. # car_channels: [] # channel IDs to use for common average reference. This is useful
  38. # for spike band power and LFP calculations
  39. car_channels: []
  40. fs: 30000. # sampling frequency
  41. smpl_fct: 30 # downsample factor
  42. trigger_len: 50 # length of triggers <--- review: this parameter only appears in a commented out line
  43. daq_sleep: 0.1 # s <--- review: this parameter does not seem to be used anywhere
  44. normalization:
  45. len: 600.0 # in seconds. Length of normalization period
  46. do_update: false # Performs automatic updates if true
  47. update_interval: 10.0 # in seconds. Defines in what intervals the rate normalization will be updated
  48. range: [10, 90] # centiles, for automated normalization
  49. clamp_firing_rates: True
  50. # if use_all_channels is False:
  51. # channel firing rate r will be clamped and normalized (r_n):
  52. # r_n = (max(bottom, min(top, r)) - bottom) / (top - bottom)
  53. # if the channel is set to 'invert', then r_n := 1 - r_n
  54. # All normalized rates are then averaged.
  55. # otherwise, all channels will be averaged first, then normalized
  56. use_all_channels: false # if True, all channels will be used. If False, channels as specified below will be used
  57. all_channels: {bottom: 1.15625, top: 1.59375, invert: false}
  58. channels:
  59. # - {id: 20, bottom: 5.0, top: 10.0, invert: True}
  60. # - {id: 97, bottom: 5.0, top: 10.0, invert: false}
  61. # - {id: 99, bottom: 5.0, top: 15.0, invert: false}
  62. data_source: 'spike_rates' # 'band_power' or 'spike_rates'
  63. spike_band_power:
  64. loop_interval: 10 # ms, how often the spike band power calculator should run
  65. integrated_samples: 1500 # samples to integrate over per step -> 1500 / 3e4 = 50ms bins
  66. average_n_bins: 10 # the ouput SBP will be the average of the last n bins, so for default settings
  67. # for sample group and integrated_samples, and 10 bins, it will be 0.5s.
  68. sample_group: 6
  69. filter: # IIR filter coefficients for (spike) band power calculation.
  70. b: [0.956543225556877, -1.91308645111375, 0.956543225556877]
  71. a: [1, -1.91119706742607, 0.914975834801434]
  72. spike_rates:
  73. n_units: 1 # number of units per channel
  74. bin_width: 0.05 # sec, for spike rate estimation
  75. loop_interval: 50 # ms
  76. method: 'boxcar' # exponential or boxcar
  77. decay_factor: .9 # for exponential decay, for each step back, a bin's count will be multiplied by decay_factor
  78. max_bins: 20 # for exponential and boxcar methods, determines numbers of bins in history to take into account
  79. # bl_offset: 0.000001 # baseline correction constant
  80. bl_offset: 30. # baseline correction constant
  81. # bl_offset: 0.1 # baseline correction constant
  82. correct_bl: False # for online mode
  83. correct_bl_model: False # for offline mode
  84. buffer:
  85. length: 600 # buffer shape: (length, channels)
  86. session:
  87. flags:
  88. bl: True
  89. bl_rand: True
  90. decode: True
  91. stimulus: True
  92. recording:
  93. timing:
  94. t_baseline_1: 1. # sec, trial-1 baseline duration
  95. t_baseline_all: 1. # sec, all other trials
  96. t_baseline_rand: 1. # sec, add random inter-trial interval between 0 and t_baseline_rand IF session.flags.bl_rand is True
  97. t_after_stimulus: 0.0
  98. t_response: 5. # sec, trial response duration
  99. decoder_refresh_interval: .01 # sec, for continuous decoding, the cycle time of the decoder
  100. bci_loop_interval: .05 # sec, step for bci thread loop
  101. recording_loop_interval: .05 # sec, step for bci thread loop
  102. recording_loop_interval_data: .02 # sec, step for data process loop
  103. classifier:
  104. max_active_ch_nr: []
  105. # include_channels: [38, 43, 50, 52, 56, 61, 65, 67, 73, 81, 87, 88, 91]
  106. # include_channels: [0, 1, 4, 7, 8, 12, 14, 19, 22, 26, 28, 29, 31, 96, 100, 103]
  107. # include_channels: range(0,128)
  108. include_channels: [20]
  109. # include_channels: [0, 1, 4, 6, 7, 8, 12, 14, 19, 20, 22, 26, 28, 29, 31, 96, 100, 103, 121]
  110. # include_channels: [ 1, 2, 3, 9, 19, 29, 41, 44, 48, 51, 52, 53, 54,
  111. # 62, 63, 66, 74, 82, 94, 95, 113]
  112. # exclude_channels: []
  113. exclude_channels: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
  114. 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27,
  115. 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
  116. 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
  117. 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
  118. 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
  119. 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,
  120. 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
  121. 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
  122. 119, 120, 121, 122, 123, 124, 125, 126, 127]
  123. # exclude_channels: range(32,96)
  124. # exclude_channels: []
  125. n_triggers: 2 # DO NOT CHANGE THIS
  126. n_classes: 2
  127. template: [10, 14, 18, 22, 26, 30, 34, 38]
  128. trigger_pos: 'start' # 'start' or 'stop'
  129. online: False # will be overwritten by code, see bci.py
  130. thr_prob: 0.8
  131. thr_window: 40 #30 # number of samples for prob above threshold to trigger decision
  132. break_loop: True # True: move on as soon as decision is there, otherwise wait t_response time
  133. # models to use for online decoding
  134. saved_model_conf_name: "config/model_conf.yaml"
  135. path_model1: '/data/clinical/neural/fr/2019-06-26/model1_104948.pkl' # scikit
  136. path_model2: '/data/clinical/neural/fr/2019-06-26/model2_104948.pkl' # explicit LDA
  137. exclude_data_channels: []
  138. n_neg_train: 100000
  139. deadtime: 40
  140. model_training:
  141. save_model: False
  142. model: 'scikit' # eigen, scikit, explicit
  143. solver: 'lsqr' # svd, lsqr, eigen
  144. cross_validation: True
  145. n_splits: 5 # for cross-validation
  146. test_size: .2 # float between (0,1) or int (absolute number of trials, >=n_classes)
  147. reg_fact: 0.3 # regularization factor
  148. fsel: False # feature selection
  149. triggers_plot: 3
  150. peaks: # these values are for offline training
  151. # height: 0.9 # probability threshold
  152. # width: 28 # min number of samples above threshold
  153. distance: 40 # number of samples for peaks to be apart
  154. sig: 'pred' # 'prob', 'pred' -> signal based on probabilities or prediction class
  155. prefilter: False
  156. psth:
  157. cut : [-40, 100]
  158. lfp:
  159. fs: 1000 # sampling rate
  160. sampling_ratio: 30
  161. filter_fc_lb: [10, 0] # cut-off frequencies for filter
  162. filter_fc_mb: [12, 40] # cut-off frequencies for filter
  163. filter_fc_hb: [60, 250] # cut-off frequencies for filter
  164. filter_order_lb: 2
  165. filter_order_mb: 6
  166. filter_order_hb: 10
  167. artifact_thr: 400 # exclude data above this threshold
  168. array1: range(32,64) #3 4 7 8 10 14 17 15 44
  169. array21: range(2)
  170. # array22: range(100,112)
  171. array22: [] #range(96,128)
  172. array1_exclude: []
  173. array2_exclude: []
  174. i_start: 0 #None # import data from start index
  175. i_stop: -1 #600000 #None # to stop index
  176. psth_win: [-1000, 5000]
  177. exclude: False
  178. normalize: False
  179. zscore: False
  180. car: True
  181. sub_band: 1
  182. motor_mapping: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
  183. spectra:
  184. spgr_len: 500
  185. plot:
  186. ch_ids: [0] # relative id of imported channel
  187. general: True
  188. filters: False
  189. cerebus:
  190. instance: 0
  191. buffer_reset: True
  192. buffer_size_cont: 30001
  193. buffer_size_comments: 500
  194. file_handling:
  195. data_path: '/data/clinical/neural/fr/'
  196. # data_path: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/'
  197. results: '/data/clinical/nf/results/'
  198. paradigm_config_file: 'paradigm.yaml'
  199. # results: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/results/'
  200. # data_path: '/media/vlachos/kiap_backup/Recordings/K01/laptop/clinical/neural/'
  201. # data_path: '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
  202. save_data: True # keep always True
  203. mode: 'ab' # ab: append binary, wb: write binary (will overwrite existing files)
  204. speller:
  205. type: 'exploration' # exploration, question, training_color, color, feedback
  206. audio: True
  207. pyttsx_rate: 100
  208. audio_result_fb: False
  209. speller_matrix: True
  210. feedback:
  211. # normalized rate is multiplied by alpha, and baseline beta added.
  212. feedback_tone: False
  213. alpha: 360 # scaling coefficient
  214. beta: 120 # offset
  215. tone_length: 0.25 # lenght of feedback tone in seconds
  216. target_tone_length: 1.0 # length of feedback tone in seconds
  217. target_n_tones: 5 # Play the target tone every n feedback tones
  218. hold_iterations: 2
  219. plot:
  220. channels: [97, 99] # channels for live plot, need to restart app if changed
  221. fps: 10. # frames per second
  222. pca: False
  223. filter_min_rate: 2 # sp/sec, exclude channels with rate below this value, eg. in show_ffedback_data.py
  224. sim_data:
  225. rate_bl: 10