123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265 |
- # After parsing this file, additional files mentioned in `supplemental_config` are read.
- # Leaf dictionary items are overwritten if they already existed.
- # `supplemental_config` entries in additional files are read but ignored.
- # Note on use:
- # 1) config files are read in order of this list
- # 2) folders can be specified. All *.yml and *.yaml files will be read recursively
- # 3) Absolute and relative paths are possible
- supplemental_config:
- - 'config/model_conf.yaml' # <======= This is classifier training output auto-generated by param_scan2.py
- # The path of the trained model configuration is determined by setting
- # classifier.saved_model_conf_name below.
- - 'config/channels.yaml' # <======= In this file, make changes regarding the channels controlling feedback,
- # the feedback hold period, and the feedback thresholds
- - 'config/feedback_wo_reward.yaml' # <= Uncomment this for feedback exploration without reward
- # - 'config/feedback.yaml' # <= Uncomment this for feedback with reward
- # - 'config/question_validation.yaml' # <= Uncomment this for question validation
- # - 'config/free_questions.yaml' # <= Uncomment this for free questions
- # - 'config/colorspeller_validation.yaml' # <= Uncomment this for color speller validation
- #- 'config/colorspeller.yaml' # <= Uncomment this for free color speller
- # - 'config/exploration.yaml' # <= Uncomment this for motor attempt exploration
- ### Examples how to point and read supplemental_config files
- # - '/full/path/to/yaml/file.yaml'
- # - 'relative/path/to/yaml/file.yaml'
- # - '/full/path/to/yaml/file.yaml'
- # - 'relative/path/to/yaml/file.yaml'
- # - '/path/to/folder/with/yaml/files
- system:
- plot: 0
- general:
- debug: 1
- clear_trial_history: False
- daq:
- n_channels_max: 128
- # n_channels: 2
- # exclude_channels: [] # These are BlackRock channel IDs (1-based).
- exclude_channels: []
- # car_channels: [] # channel IDs to use for common average reference. This is useful
- # for spike band power and LFP calculations
- car_channels: []
- fs: 30000. # sampling frequency
- smpl_fct: 30 # downsample factor
- trigger_len: 50 # length of triggers <--- review: this parameter only appears in a commented out line
- daq_sleep: 0.1 # s <--- review: this parameter does not seem to be used anywhere
-
- normalization:
- len: 600.0 # in seconds. Length of normalization period
- do_update: false # Performs automatic updates if true
- update_interval: 10.0 # in seconds. Defines in what intervals the rate normalization will be updated
- range: [10, 90] # centiles, for automated normalization
- clamp_firing_rates: True
- # if use_all_channels is False:
- # channel firing rate r will be clamped and normalized (r_n):
- # r_n = (max(bottom, min(top, r)) - bottom) / (top - bottom)
- # if the channel is set to 'invert', then r_n := 1 - r_n
- # All normalized rates are then averaged.
- # otherwise, all channels will be averaged first, then normalized
-
- use_all_channels: false # if True, all channels will be used. If False, channels as specified below will be used
- all_channels: {bottom: 1.15625, top: 1.59375, invert: false}
-
- channels:
- # - {id: 20, bottom: 5.0, top: 10.0, invert: True}
- # - {id: 97, bottom: 5.0, top: 10.0, invert: false}
- # - {id: 99, bottom: 5.0, top: 15.0, invert: false}
- data_source: 'spike_rates' # 'band_power' or 'spike_rates'
- spike_band_power:
- loop_interval: 10 # ms, how often the spike band power calculator should run
- integrated_samples: 1500 # samples to integrate over per step -> 1500 / 3e4 = 50ms bins
- average_n_bins: 10 # the ouput SBP will be the average of the last n bins, so for default settings
- # for sample group and integrated_samples, and 10 bins, it will be 0.5s.
- sample_group: 6
- filter: # IIR filter coefficients for (spike) band power calculation.
- b: [0.956543225556877, -1.91308645111375, 0.956543225556877]
- a: [1, -1.91119706742607, 0.914975834801434]
- spike_rates:
- n_units: 1 # number of units per channel
- bin_width: 0.05 # sec, for spike rate estimation
- loop_interval: 50 # ms
- method: 'boxcar' # exponential or boxcar
- decay_factor: .9 # for exponential decay, for each step back, a bin's count will be multiplied by decay_factor
- max_bins: 20 # for exponential and boxcar methods, determines numbers of bins in history to take into account
- # bl_offset: 0.000001 # baseline correction constant
- bl_offset: 30. # baseline correction constant
- # bl_offset: 0.1 # baseline correction constant
- correct_bl: False # for online mode
- correct_bl_model: False # for offline mode
- buffer:
- length: 600 # buffer shape: (length, channels)
- session:
- flags:
- bl: True
- bl_rand: True
- decode: True
- stimulus: True
- recording:
- timing:
- t_baseline_1: 1. # sec, trial-1 baseline duration
- t_baseline_all: 1. # sec, all other trials
- t_baseline_rand: 1. # sec, add random inter-trial interval between 0 and t_baseline_rand IF session.flags.bl_rand is True
- t_after_stimulus: 0.0
- t_response: 5. # sec, trial response duration
- decoder_refresh_interval: .01 # sec, for continuous decoding, the cycle time of the decoder
- bci_loop_interval: .05 # sec, step for bci thread loop
- recording_loop_interval: .05 # sec, step for bci thread loop
- recording_loop_interval_data: .02 # sec, step for data process loop
- classifier:
- max_active_ch_nr: []
- # include_channels: [38, 43, 50, 52, 56, 61, 65, 67, 73, 81, 87, 88, 91]
- # include_channels: [0, 1, 4, 7, 8, 12, 14, 19, 22, 26, 28, 29, 31, 96, 100, 103]
- # include_channels: range(0,128)
- include_channels: [20]
- # include_channels: [0, 1, 4, 6, 7, 8, 12, 14, 19, 20, 22, 26, 28, 29, 31, 96, 100, 103, 121]
- # include_channels: [ 1, 2, 3, 9, 19, 29, 41, 44, 48, 51, 52, 53, 54,
- # 62, 63, 66, 74, 82, 94, 95, 113]
- # exclude_channels: []
- exclude_channels: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27,
- 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
- 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,
- 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
- 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
- 119, 120, 121, 122, 123, 124, 125, 126, 127]
-
- # exclude_channels: range(32,96)
- # exclude_channels: []
- n_triggers: 2 # DO NOT CHANGE THIS
- n_classes: 2
- template: [10, 14, 18, 22, 26, 30, 34, 38]
- trigger_pos: 'start' # 'start' or 'stop'
- online: False # will be overwritten by code, see bci.py
- thr_prob: 0.8
- thr_window: 40 #30 # number of samples for prob above threshold to trigger decision
- break_loop: True # True: move on as soon as decision is there, otherwise wait t_response time
- # models to use for online decoding
- saved_model_conf_name: "config/model_conf.yaml"
-
- path_model1: '/data/clinical/neural/fr/2019-06-26/model1_104948.pkl' # scikit
- path_model2: '/data/clinical/neural/fr/2019-06-26/model2_104948.pkl' # explicit LDA
- exclude_data_channels: []
- n_neg_train: 100000
- deadtime: 40
- model_training:
- save_model: False
- model: 'scikit' # eigen, scikit, explicit
- solver: 'lsqr' # svd, lsqr, eigen
- cross_validation: True
- n_splits: 5 # for cross-validation
- test_size: .2 # float between (0,1) or int (absolute number of trials, >=n_classes)
- reg_fact: 0.3 # regularization factor
- fsel: False # feature selection
- triggers_plot: 3
- peaks: # these values are for offline training
- # height: 0.9 # probability threshold
- # width: 28 # min number of samples above threshold
- distance: 40 # number of samples for peaks to be apart
- sig: 'pred' # 'prob', 'pred' -> signal based on probabilities or prediction class
- prefilter: False
- psth:
- cut : [-40, 100]
- lfp:
- fs: 1000 # sampling rate
- sampling_ratio: 30
- filter_fc_lb: [10, 0] # cut-off frequencies for filter
- filter_fc_mb: [12, 40] # cut-off frequencies for filter
- filter_fc_hb: [60, 250] # cut-off frequencies for filter
- filter_order_lb: 2
- filter_order_mb: 6
- filter_order_hb: 10
- artifact_thr: 400 # exclude data above this threshold
- array1: range(32,64) #3 4 7 8 10 14 17 15 44
- array21: range(2)
- # array22: range(100,112)
- array22: [] #range(96,128)
- array1_exclude: []
- array2_exclude: []
- i_start: 0 #None # import data from start index
- i_stop: -1 #600000 #None # to stop index
- psth_win: [-1000, 5000]
- exclude: False
- normalize: False
- zscore: False
- car: True
- sub_band: 1
- motor_mapping: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
- spectra:
- spgr_len: 500
- plot:
- ch_ids: [0] # relative id of imported channel
- general: True
- filters: False
-
- cerebus:
- instance: 0
- buffer_reset: True
- buffer_size_cont: 30001
- buffer_size_comments: 500
- file_handling:
- data_path: '/data/clinical/neural/fr/'
- # data_path: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/'
- results: '/data/clinical/nf/results/'
- paradigm_config_file: 'paradigm.yaml'
- # results: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/results/'
- # data_path: '/media/vlachos/kiap_backup/Recordings/K01/laptop/clinical/neural/'
- # data_path: '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
- save_data: True # keep always True
- mode: 'ab' # ab: append binary, wb: write binary (will overwrite existing files)
- speller:
- type: 'exploration' # exploration, question, training_color, color, feedback
- audio: True
- pyttsx_rate: 100
- audio_result_fb: False
- speller_matrix: True
- feedback:
- # normalized rate is multiplied by alpha, and baseline beta added.
-
- feedback_tone: False
- alpha: 360 # scaling coefficient
- beta: 120 # offset
- tone_length: 0.25 # lenght of feedback tone in seconds
- target_tone_length: 1.0 # length of feedback tone in seconds
-
- target_n_tones: 5 # Play the target tone every n feedback tones
- hold_iterations: 2
- plot:
- channels: [97, 99] # channels for live plot, need to restart app if changed
- fps: 10. # frames per second
- pca: False
- filter_min_rate: 2 # sp/sec, exclude channels with rate below this value, eg. in show_ffedback_data.py
- sim_data:
- rate_bl: 10
|