system: plot: 0 general: debug: 1 clear_trial_history: False daq: n_channels_max: 128 # n_channels: 2 # exclude_channels: [] # These are BlackRock channel IDs (1-based). exclude_channels: [] # car_channels: [] # channel IDs to use for common average reference. This is useful # for spike band power and LFP calculations car_channels: [] fs: 30000. # sampling frequency smpl_fct: 30 # downsample factor trigger_len: 50 # length of triggers <--- review: this parameter only appears in a commented out line daq_sleep: 0.1 # s <--- review: this parameter does not seem to be used anywhere normalization: len: 600.0 # in seconds. Length of normalization period do_update: True # Performs automatic updates if true update_interval: 10.0 # in seconds. Defines in what intervals the rate normalization will be updated range: [10, 90] # centiles, for automated normalization clamp_firing_rates: True # if use_all_channels is False: # channel firing rate r will be clamped and normalized (r_n): # r_n = (max(bottom, min(top, r)) - bottom) / (top - bottom) # if the channel is set to 'invert', then r_n := 1 - r_n # All normalized rates are then averaged. # otherwise, all channels will be averaged first, then normalized use_all_channels: false # if True, all channels will be used. If False, channels as specified below will be used all_channels: {bottom: 1.15625, top: 1.59375, invert: false} channels: - {id: 20, bottom: 7.0, top: 16, invert: false} - {id: 99, bottom: 1.0, top: 10, invert: True} # - id: 18 # bottom: 2 # signal will be clamped to the range [bottom, top] and normalized # top: 19 # invert: True # if True, channels normalized rate will be subtracted from 1. # # - id: 17 # bottom: 0 # signal will be clamped to the range [bottom, top] and normalized # top: 16 # invert: False # if True, channels normalized rate will be subtracted from 1. # - id: 42 # bottom: 0 # signal will be clamped to the range [bottom, top] and normalized # top: 9 # invert: False # if True, channels normalized rate will be subtracted from 1. # - id: 0 # bottom: 1 # signal will be clamped to the range [bottom, top] and normalized # top: 20 # invert: False # if True, channels normalized rate will be subtracted from 1. # - id: 1 # bottom: 2 # signal will be clamped to the range [bottom, top] and normalized # top: 24 # invert: True # if True, channels normalized rate will be subtracted from 1. spike_rates: n_units: 1 # number of units per channel bin_width: 0.05 # sec, for spike rate estimation loop_interval: 50 # ms method: 'boxcar' # exponential or boxcar decay_factor: .9 # for exponential decay, for each step back, a bin's count will be multiplied by decay_factor max_bins: 20 # for exponential and boxcar methods, determines numbers of bins in history to take into account # bl_offset: 0.000001 # baseline correction constant bl_offset: 30. # baseline correction constant # bl_offset: 0.1 # baseline correction constant correct_bl: False # for online mode correct_bl_model: False # for offline mode buffer: length: 600 # buffer shape: (length, channels) session: flags: bl: True bl_rand: False decode: True stimulus: True recording: timing: t_baseline_1: 5. # sec, trial-1 baseline duration t_baseline_all: 1. # sec, all other trials t_baseline_rand: 1. # sec, add random inter-trial interval between 0 and t_baseline_rand IF session.flags.bl_rand is True t_after_stimulus: 0.0 t_response: 5. # sec, trial response duration decoder_refresh_interval: .01 # sec, for continuous decoding, the cycle time of the decoder bci_loop_interval: .05 # sec, step for bci thread loop recording_loop_interval: .05 # sec, step for bci thread loop recording_loop_interval_data: .02 # sec, step for data process loop classifier: max_active_ch_nr: [] # include_channels: [38, 43, 50, 52, 56, 61, 65, 67, 73, 81, 87, 88, 91] # include_channels: [0, 1, 4, 7, 8, 12, 14, 19, 22, 26, 28, 29, 31, 96, 100, 103] # include_channels: range(0,128) include_channels: [20] # include_channels: [0, 1, 4, 6, 7, 8, 12, 14, 19, 20, 22, 26, 28, 29, 31, 96, 100, 103, 121] # include_channels: [ 1, 2, 3, 9, 19, 29, 41, 44, 48, 51, 52, 53, 54, # 62, 63, 66, 74, 82, 94, 95, 113] # exclude_channels: [] exclude_channels: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127] # exclude_channels: range(32,96) # exclude_channels: [] n_triggers: 2 # DO NOT CHANGE THIS n_classes: 2 template: [10, 14, 18, 22, 26, 30, 34, 38] trigger_pos: 'stop' # 'start' or 'stop' online: False # will be overwritten by code, see bci.py thr_prob: 0.8 thr_window: 40 #30 # number of samples for prob above threshold to trigger decision break_loop: True # True: move on as soon as decision is there, otherwise wait t_response time # models to use for online decoding path_model1: '/data/clinical/neural/fr/2019-06-26/model1_104948.pkl' # scikit path_model2: '/data/clinical/neural/fr/2019-06-26/model2_104948.pkl' # explicit LDA exclude_data_channels: [] n_neg_train: 100000 deadtime: 40 model_training: save_model: False model: 'scikit' # eigen, scikit, explicit solver: 'lsqr' # svd, lsqr, eigen cross_validation: True n_splits: 5 # for cross-validation test_size: .2 # float between (0,1) or int (absolute number of trials, >=n_classes) reg_fact: 0.3 # regularization factor fsel: False # feature selection triggers_plot: 3 peaks: # these values are for offline training # height: 0.9 # probability threshold # width: 28 # min number of samples above threshold distance: 40 # number of samples for peaks to be apart sig: 'pred' # 'prob', 'pred' -> signal based on probabilities or prediction class prefilter: False psth: cut : [-40, 100] lfp: fs: 1000 # sampling rate sampling_ratio: 30 filter_fc_lb: [10, 0] # cut-off frequencies for filter filter_fc_mb: [12, 40] # cut-off frequencies for filter filter_fc_hb: [60, 250] # cut-off frequencies for filter filter_order_lb: 2 filter_order_mb: 6 filter_order_hb: 10 artifact_thr: 400 # exclude data above this threshold array1: range(32,64) #3 4 7 8 10 14 17 15 44 array21: range(2) # array22: range(100,112) array22: [] #range(96,128) array1_exclude: [] array2_exclude: [] i_start: 0 #None # import data from start index i_stop: -1 #600000 #None # to stop index psth_win: [-1000, 5000] exclude: False normalize: False zscore: False car: True sub_band: 1 motor_mapping: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf'] spectra: spgr_len: 500 plot: ch_ids: [0] # relative id of imported channel general: True filters: False cerebus: instance: 0 buffer_reset: True buffer_size_cont: 30001 buffer_size_comments: 500 file_handling: data_path: '/data/clinical/neural/fr/' # data_path: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/' results: '/data/clinical/nf/results/' # results: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/results/' # data_path: '/media/vlachos/kiap_backup/Recordings/K01/laptop/clinical/neural/' # data_path: '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/' save_data: True # keep always True mode: 'ab' # ab: append binary, wb: write binary (will overwrite existing files) git_hash: e5cf42b filename_data: /data/clinical/neural/fr/2019-07-03/data_15_34_39.bin filename_log_info: /data/clinical/neural/fr/2019-07-03/info_15_34_39.log filename_events: /data/clinical/neural/fr/2019-07-03/events_15_34_39.txt speller: type: 'feedback' # exploration, question, training_color, color, feedback audio: True pyttsx_rate: 100 audio_result_fb: True feedback: # normalized rate is multiplied by alpha, and baseline beta added. feedback_tone: True alpha: 360 # scaling coefficient beta: 120 # offset tone_length: 0.25 # lenght of feedback tone in seconds target_tone_length: 1.0 # length of feedback tone in seconds reward_on_target: false # If target is reached, play reward tone and abort trial target_n_tones: 5 # Play the target tone every n feedback tones reward_sound: '/kiap/data/speller/feedback/kerching.wav' hold_iterations: 3 plot: channels: [20, 18, 99, 16] # channels for live plot, need to restart app if changed fps: 10. # frames per second pca: False sim_data: rate_bl: 10