2 Commits 73cc8be31a ... b5abd657d5

Author SHA1 Message Date
  Jonas Zimmermann b5abd657d5 Merge remote-tracking branch 'refs/remotes/origin/master' 2 years ago
  Jonas Zimmermann c445f6fd6a gin commit from L-1010036236 2 years ago

+ 248 - 0
KIAP_BCI_neurofeedback/2019-07-03/config_dump_11_24_33.yaml

@@ -0,0 +1,248 @@
+!munch.Munch
+system:
+  plot: 0
+
+general:
+  debug: 1
+  clear_trial_history: False
+
+daq:
+  n_channels_max: 128
+  # n_channels: 2
+  # exclude_channels: []  # These are BlackRock channel IDs (1-based).
+  exclude_channels: []
+  # car_channels: [] # channel IDs to use for common average reference. This is useful
+  #   for spike band power and LFP calculations
+  car_channels: []
+  fs: 30000.          # sampling frequency
+  smpl_fct: 30        # downsample factor
+  trigger_len: 50     # length of triggers <--- review: this parameter only appears in a commented out line
+  daq_sleep: 0.1      # s           <--- review: this parameter does not seem to be used anywhere
+  
+  normalization:
+    len:  600.0           # in seconds. Length of normalization period
+    do_update:  True        # Performs automatic updates if true
+    update_interval: 10.0   # in seconds. Defines in what intervals the rate normalization will be updated
+    range: [10, 90]    # centiles, for automated normalization
+
+    clamp_firing_rates: True
+    # if use_all_channels is False:
+    # channel firing rate r will be clamped and normalized (r_n):
+    # r_n = (max(bottom, min(top, r)) - bottom) / (top - bottom)
+    # if the channel is set to 'invert', then r_n := 1 - r_n
+    # All normalized rates are then averaged.
+    # otherwise, all channels will be averaged first, then normalized
+    
+    use_all_channels: True      # if True, all channels will be used. If False, channels as specified below will be used
+    all_channels: {bottom: 1.703125, top: 2.484375, invert: false}
+    
+    channels:
+    - {id: 20, bottom: 14.0, top: 26, invert: false}
+
+      # - id: 18
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    19
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+      # # - id: 17
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    16
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 42
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    9
+      #  invert: False  # if True, channels normalized rate will be subtracted from 1.
+    
+      # - id: 0
+      #   bottom: 1   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:   20
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 1
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    24
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+
+
+  spike_rates:
+    n_units: 1        # number of units per channel
+    bin_width: 0.05    # sec, for spike rate estimation
+    loop_interval: 50  # ms 
+    method: 'boxcar'   # exponential or boxcar
+    decay_factor: .9    # for exponential decay, for each step back, a bin's count will be multiplied by decay_factor
+    max_bins: 20      # for exponential and boxcar methods, determines numbers of bins in history to take into account
+    # bl_offset: 0.000001      # baseline correction constant
+    bl_offset: 30.      # baseline correction constant
+    # bl_offset: 0.1        # baseline correction constant
+    correct_bl: False       # for online mode
+    correct_bl_model: False # for offline mode
+
+
+buffer:
+  length: 600         # buffer shape: (length, channels)
+
+session:  
+  flags:
+    bl: True
+    bl_rand: False
+    decode: True
+    stimulus: True
+
+
+recording:
+  timing:
+    t_baseline_1: 5.                   # sec, trial-1 baseline duration
+    t_baseline_all: 1.                 # sec, all other trials
+    t_baseline_rand: 1.                # sec, add random inter-trial interval between 0 and t_baseline_rand IF session.flags.bl_rand is True
+    t_after_stimulus: 0.        
+    t_response: 5.                     # sec, trial response duration
+    decoder_refresh_interval: .01     # sec, for continuous decoding, the cycle time of the decoder
+    bci_loop_interval: .05             # sec, step for bci thread loop
+    recording_loop_interval: .05       # sec, step for bci thread loop
+    recording_loop_interval_data: .02       # sec, step for data process loop
+
+classifier:
+    max_active_ch_nr: []
+    # include_channels: [38, 43, 50, 52, 56, 61, 65, 67, 73, 81, 87, 88, 91]
+    # include_channels: [0,   1,   4,   7,   8,  12,  14,  19,  22,  26,  28,  29,  31, 96, 100, 103]
+    # include_channels: range(0,128)
+    include_channels: [20]
+    # include_channels: [0,   1,   4,   6, 7,   8,  12,  14,  19, 20, 22,  26,  28,  29,  31, 96, 100, 103, 121]
+    # include_channels: [  1,   2,   3,   9,  19,  29,  41,  44,  48,  51,  52,  53,  54,
+         # 62,  63,  66,  74,  82,  94,  95, 113]
+    # exclude_channels: []
+    exclude_channels: [  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,
+        13,  14,  15,  16,  17,  18,  19,  21,  22,  23,  24,  25,  26,  27,
+        28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,
+        41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,  52,  53,
+        54,  55,  56,  57,  58,  59,  60,  61,  62,  63,  64,  65,  66,
+        67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,  78,  79,
+        80,  81,  82,  83,  84,  85,  86,  87,  88,  89,  90,  91,  92,
+        93,  94,  95,  96,  97,  98,  99, 100, 101, 102, 103, 104, 105,
+       106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+       119, 120, 121, 122, 123, 124, 125, 126, 127]
+       
+    # exclude_channels: range(32,96)
+    # exclude_channels: []
+    n_triggers: 2     # DO NOT CHANGE THIS
+    n_classes: 2
+    template: [10, 14, 18, 22, 26, 30, 34, 38]
+    trigger_pos: 'start'   # 'start' or 'stop' 
+
+    online: False        # will be overwritten by code, see bci.py
+    thr_prob: 0.8
+    thr_window: 40 #30    # number of samples for prob above threshold to trigger decision
+    break_loop: True      # True: move on as soon as decision is there, otherwise wait t_response time
+
+    # models to use for online decoding
+    path_model1: '/data/clinical/neural/fr/2019-06-26/model1_104948.pkl'   # scikit
+    path_model2: '/data/clinical/neural/fr/2019-06-26/model2_104948.pkl'   # explicit LDA
+    exclude_data_channels: []
+    n_neg_train: 100000
+    deadtime: 40
+
+
+
+    model_training:
+      save_model: False
+      model: 'scikit'  # eigen, scikit, explicit
+      solver: 'lsqr'    # svd, lsqr, eigen
+      cross_validation: True
+      n_splits:  5     # for cross-validation
+      test_size: .2     # float between (0,1) or int (absolute number of trials, >=n_classes)
+      reg_fact: 0.3       # regularization factor
+      fsel: False          # feature selection
+      triggers_plot: 3
+
+    peaks:          # these values are for offline training
+      # height: 0.9   # probability threshold
+      # width: 28      # min number of samples above threshold
+      distance: 40  # number of samples for peaks to be apart
+      sig: 'pred'   # 'prob', 'pred' -> signal based on probabilities or prediction class
+      prefilter: False
+
+    psth:
+      cut : [-40, 100]
+
+lfp:
+  fs: 1000               # sampling rate
+  sampling_ratio: 30
+  filter_fc_lb: [10, 0]    # cut-off frequencies for filter
+  filter_fc_mb: [12, 40]    # cut-off frequencies for filter
+  filter_fc_hb: [60, 250]    # cut-off frequencies for filter
+  filter_order_lb: 2     
+  filter_order_mb: 6     
+  filter_order_hb: 10     
+  artifact_thr: 400      # exclude data above this threshold
+  array1: range(32,64) #3 4 7 8 10 14 17 15 44
+  array21: range(2)
+  # array22: range(100,112)
+  array22: [] #range(96,128)
+  array1_exclude: []
+  array2_exclude: []
+  i_start: 0 #None            # import data from start index
+  i_stop:  -1 #600000 #None             # to stop index
+  psth_win: [-1000, 5000]
+  exclude: False
+  normalize: False
+  zscore: False
+  car: True
+  sub_band: 1
+  motor_mapping: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+
+  spectra:
+    spgr_len: 500
+  plot:
+    ch_ids: [0]     # relative id of imported channel
+    general: True
+    filters: False
+
+    
+cerebus:
+    instance: 0
+    buffer_reset: True
+    buffer_size_cont: 30001
+    buffer_size_comments: 500
+
+file_handling:
+  data_path: '/data/clinical/neural/fr/'
+  # data_path: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/'
+
+  results: '/data/clinical/nf/results/'
+  # results: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/results/'
+  # data_path: '/media/vlachos/kiap_backup/Recordings/K01/laptop/clinical/neural/'
+  # data_path: '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
+  save_data: True     # keep always True
+  mode: 'ab'   # ab: append binary, wb: write binary (will overwrite existing files)
+  git_hash: 4af0a92
+  filename_data: /data/clinical/neural/fr/2019-07-03/data_11_24_33.bin
+  filename_log_info: /data/clinical/neural/fr/2019-07-03/info_11_24_33.log
+  filename_events: /data/clinical/neural/fr/2019-07-03/events_11_24_33.txt
+
+speller:
+  type: 'feedback'    # exploration, question, training_color, color, feedback
+  audio: True
+  pyttsx_rate: 100
+  audio_result_fb: True
+
+feedback:
+  # normalized rate is multiplied by alpha, and baseline beta added.
+  
+  feedback_tone: True
+
+  alpha: 360    # scaling coefficient
+  beta: 120      # offset
+  tone_length: 0.25    # lenght of feedback tone in seconds
+  target_tone_length: 1.0    # length of feedback tone in seconds
+  
+  reward_on_target: False  # If target is reached, play reward tone and abort trial
+  target_n_tones: 5 # Play the target tone every n feedback tones
+  reward_sound: '/kiap/data/speller/feedback/kerching.wav'
+  hold_iterations: 3
+
+plot:
+  channels: [20, 24, 7, 16]   # channels for live plot, need to restart app if changed
+  fps: 10.       # frames per second
+  pca: False
+
+sim_data:
+  rate_bl: 10
+

+ 248 - 0
KIAP_BCI_neurofeedback/2019-07-03/config_dump_11_39_36.yaml

@@ -0,0 +1,248 @@
+!munch.Munch
+system:
+  plot: 0
+
+general:
+  debug: 1
+  clear_trial_history: False
+
+daq:
+  n_channels_max: 128
+  # n_channels: 2
+  # exclude_channels: []  # These are BlackRock channel IDs (1-based).
+  exclude_channels: []
+  # car_channels: [] # channel IDs to use for common average reference. This is useful
+  #   for spike band power and LFP calculations
+  car_channels: []
+  fs: 30000.          # sampling frequency
+  smpl_fct: 30        # downsample factor
+  trigger_len: 50     # length of triggers <--- review: this parameter only appears in a commented out line
+  daq_sleep: 0.1      # s           <--- review: this parameter does not seem to be used anywhere
+  
+  normalization:
+    len:  600.0           # in seconds. Length of normalization period
+    do_update:  True        # Performs automatic updates if true
+    update_interval: 10.0   # in seconds. Defines in what intervals the rate normalization will be updated
+    range: [10, 90]    # centiles, for automated normalization
+
+    clamp_firing_rates: True
+    # if use_all_channels is False:
+    # channel firing rate r will be clamped and normalized (r_n):
+    # r_n = (max(bottom, min(top, r)) - bottom) / (top - bottom)
+    # if the channel is set to 'invert', then r_n := 1 - r_n
+    # All normalized rates are then averaged.
+    # otherwise, all channels will be averaged first, then normalized
+    
+    use_all_channels: True      # if True, all channels will be used. If False, channels as specified below will be used
+    all_channels: {bottom: 1.703125, top: 2.484375, invert: false}
+    
+    channels:
+    - {id: 20, bottom: 6.0, top: 18, invert: false}
+
+      # - id: 18
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    19
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+      # # - id: 17
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    16
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 42
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    9
+      #  invert: False  # if True, channels normalized rate will be subtracted from 1.
+    
+      # - id: 0
+      #   bottom: 1   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:   20
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 1
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    24
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+
+
+  spike_rates:
+    n_units: 1        # number of units per channel
+    bin_width: 0.05    # sec, for spike rate estimation
+    loop_interval: 50  # ms 
+    method: 'boxcar'   # exponential or boxcar
+    decay_factor: .9    # for exponential decay, for each step back, a bin's count will be multiplied by decay_factor
+    max_bins: 20      # for exponential and boxcar methods, determines numbers of bins in history to take into account
+    # bl_offset: 0.000001      # baseline correction constant
+    bl_offset: 30.      # baseline correction constant
+    # bl_offset: 0.1        # baseline correction constant
+    correct_bl: False       # for online mode
+    correct_bl_model: False # for offline mode
+
+
+buffer:
+  length: 600         # buffer shape: (length, channels)
+
+session:  
+  flags:
+    bl: True
+    bl_rand: False
+    decode: True
+    stimulus: True
+
+
+recording:
+  timing:
+    t_baseline_1: 5.                   # sec, trial-1 baseline duration
+    t_baseline_all: 1.                 # sec, all other trials
+    t_baseline_rand: 1.                # sec, add random inter-trial interval between 0 and t_baseline_rand IF session.flags.bl_rand is True
+    t_after_stimulus: 0.        
+    t_response: 5.                     # sec, trial response duration
+    decoder_refresh_interval: .01     # sec, for continuous decoding, the cycle time of the decoder
+    bci_loop_interval: .05             # sec, step for bci thread loop
+    recording_loop_interval: .05       # sec, step for bci thread loop
+    recording_loop_interval_data: .02       # sec, step for data process loop
+
+classifier:
+    max_active_ch_nr: []
+    # include_channels: [38, 43, 50, 52, 56, 61, 65, 67, 73, 81, 87, 88, 91]
+    # include_channels: [0,   1,   4,   7,   8,  12,  14,  19,  22,  26,  28,  29,  31, 96, 100, 103]
+    # include_channels: range(0,128)
+    include_channels: [20]
+    # include_channels: [0,   1,   4,   6, 7,   8,  12,  14,  19, 20, 22,  26,  28,  29,  31, 96, 100, 103, 121]
+    # include_channels: [  1,   2,   3,   9,  19,  29,  41,  44,  48,  51,  52,  53,  54,
+         # 62,  63,  66,  74,  82,  94,  95, 113]
+    # exclude_channels: []
+    exclude_channels: [  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,
+        13,  14,  15,  16,  17,  18,  19,  21,  22,  23,  24,  25,  26,  27,
+        28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,
+        41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,  52,  53,
+        54,  55,  56,  57,  58,  59,  60,  61,  62,  63,  64,  65,  66,
+        67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,  78,  79,
+        80,  81,  82,  83,  84,  85,  86,  87,  88,  89,  90,  91,  92,
+        93,  94,  95,  96,  97,  98,  99, 100, 101, 102, 103, 104, 105,
+       106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+       119, 120, 121, 122, 123, 124, 125, 126, 127]
+       
+    # exclude_channels: range(32,96)
+    # exclude_channels: []
+    n_triggers: 2     # DO NOT CHANGE THIS
+    n_classes: 2
+    template: [10, 14, 18, 22, 26, 30, 34, 38]
+    trigger_pos: 'start'   # 'start' or 'stop' 
+
+    online: False        # will be overwritten by code, see bci.py
+    thr_prob: 0.8
+    thr_window: 40 #30    # number of samples for prob above threshold to trigger decision
+    break_loop: True      # True: move on as soon as decision is there, otherwise wait t_response time
+
+    # models to use for online decoding
+    path_model1: '/data/clinical/neural/fr/2019-06-26/model1_104948.pkl'   # scikit
+    path_model2: '/data/clinical/neural/fr/2019-06-26/model2_104948.pkl'   # explicit LDA
+    exclude_data_channels: []
+    n_neg_train: 100000
+    deadtime: 40
+
+
+
+    model_training:
+      save_model: False
+      model: 'scikit'  # eigen, scikit, explicit
+      solver: 'lsqr'    # svd, lsqr, eigen
+      cross_validation: True
+      n_splits:  5     # for cross-validation
+      test_size: .2     # float between (0,1) or int (absolute number of trials, >=n_classes)
+      reg_fact: 0.3       # regularization factor
+      fsel: False          # feature selection
+      triggers_plot: 3
+
+    peaks:          # these values are for offline training
+      # height: 0.9   # probability threshold
+      # width: 28      # min number of samples above threshold
+      distance: 40  # number of samples for peaks to be apart
+      sig: 'pred'   # 'prob', 'pred' -> signal based on probabilities or prediction class
+      prefilter: False
+
+    psth:
+      cut : [-40, 100]
+
+lfp:
+  fs: 1000               # sampling rate
+  sampling_ratio: 30
+  filter_fc_lb: [10, 0]    # cut-off frequencies for filter
+  filter_fc_mb: [12, 40]    # cut-off frequencies for filter
+  filter_fc_hb: [60, 250]    # cut-off frequencies for filter
+  filter_order_lb: 2     
+  filter_order_mb: 6     
+  filter_order_hb: 10     
+  artifact_thr: 400      # exclude data above this threshold
+  array1: range(32,64) #3 4 7 8 10 14 17 15 44
+  array21: range(2)
+  # array22: range(100,112)
+  array22: [] #range(96,128)
+  array1_exclude: []
+  array2_exclude: []
+  i_start: 0 #None            # import data from start index
+  i_stop:  -1 #600000 #None             # to stop index
+  psth_win: [-1000, 5000]
+  exclude: False
+  normalize: False
+  zscore: False
+  car: True
+  sub_band: 1
+  motor_mapping: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+
+  spectra:
+    spgr_len: 500
+  plot:
+    ch_ids: [0]     # relative id of imported channel
+    general: True
+    filters: False
+
+    
+cerebus:
+    instance: 0
+    buffer_reset: True
+    buffer_size_cont: 30001
+    buffer_size_comments: 500
+
+file_handling:
+  data_path: '/data/clinical/neural/fr/'
+  # data_path: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/'
+
+  results: '/data/clinical/nf/results/'
+  # results: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/results/'
+  # data_path: '/media/vlachos/kiap_backup/Recordings/K01/laptop/clinical/neural/'
+  # data_path: '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
+  save_data: True     # keep always True
+  mode: 'ab'   # ab: append binary, wb: write binary (will overwrite existing files)
+  git_hash: 4af0a92
+  filename_data: /data/clinical/neural/fr/2019-07-03/data_11_39_36.bin
+  filename_log_info: /data/clinical/neural/fr/2019-07-03/info_11_39_36.log
+  filename_events: /data/clinical/neural/fr/2019-07-03/events_11_39_36.txt
+
+speller:
+  type: 'feedback'    # exploration, question, training_color, color, feedback
+  audio: True
+  pyttsx_rate: 100
+  audio_result_fb: True
+
+feedback:
+  # normalized rate is multiplied by alpha, and baseline beta added.
+  
+  feedback_tone: True
+
+  alpha: 360    # scaling coefficient
+  beta: 120      # offset
+  tone_length: 0.25    # lenght of feedback tone in seconds
+  target_tone_length: 1.0    # length of feedback tone in seconds
+  
+  reward_on_target: True  # If target is reached, play reward tone and abort trial
+  target_n_tones: 5 # Play the target tone every n feedback tones
+  reward_sound: '/kiap/data/speller/feedback/kerching.wav'
+  hold_iterations: 3
+
+plot:
+  channels: [20, 18, 99, 16]   # channels for live plot, need to restart app if changed
+  fps: 10.       # frames per second
+  pca: False
+
+sim_data:
+  rate_bl: 10
+

+ 248 - 0
KIAP_BCI_neurofeedback/2019-07-03/config_dump_11_51_50.yaml

@@ -0,0 +1,248 @@
+!munch.Munch
+system:
+  plot: 0
+
+general:
+  debug: 1
+  clear_trial_history: False
+
+daq:
+  n_channels_max: 128
+  # n_channels: 2
+  # exclude_channels: []  # These are BlackRock channel IDs (1-based).
+  exclude_channels: []
+  # car_channels: [] # channel IDs to use for common average reference. This is useful
+  #   for spike band power and LFP calculations
+  car_channels: []
+  fs: 30000.          # sampling frequency
+  smpl_fct: 30        # downsample factor
+  trigger_len: 50     # length of triggers <--- review: this parameter only appears in a commented out line
+  daq_sleep: 0.1      # s           <--- review: this parameter does not seem to be used anywhere
+  
+  normalization:
+    len:  600.0           # in seconds. Length of normalization period
+    do_update:  True        # Performs automatic updates if true
+    update_interval: 10.0   # in seconds. Defines in what intervals the rate normalization will be updated
+    range: [10, 90]    # centiles, for automated normalization
+
+    clamp_firing_rates: True
+    # if use_all_channels is False:
+    # channel firing rate r will be clamped and normalized (r_n):
+    # r_n = (max(bottom, min(top, r)) - bottom) / (top - bottom)
+    # if the channel is set to 'invert', then r_n := 1 - r_n
+    # All normalized rates are then averaged.
+    # otherwise, all channels will be averaged first, then normalized
+    
+    use_all_channels: True      # if True, all channels will be used. If False, channels as specified below will be used
+    all_channels: {bottom: 1.703125, top: 2.484375, invert: false}
+    
+    channels:
+    - {id: 20, bottom: 8.0, top: 19, invert: false}
+
+      # - id: 18
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    19
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+      # # - id: 17
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    16
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 42
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    9
+      #  invert: False  # if True, channels normalized rate will be subtracted from 1.
+    
+      # - id: 0
+      #   bottom: 1   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:   20
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 1
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    24
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+
+
+  spike_rates:
+    n_units: 1        # number of units per channel
+    bin_width: 0.05    # sec, for spike rate estimation
+    loop_interval: 50  # ms 
+    method: 'boxcar'   # exponential or boxcar
+    decay_factor: .9    # for exponential decay, for each step back, a bin's count will be multiplied by decay_factor
+    max_bins: 20      # for exponential and boxcar methods, determines numbers of bins in history to take into account
+    # bl_offset: 0.000001      # baseline correction constant
+    bl_offset: 30.      # baseline correction constant
+    # bl_offset: 0.1        # baseline correction constant
+    correct_bl: False       # for online mode
+    correct_bl_model: False # for offline mode
+
+
+buffer:
+  length: 600         # buffer shape: (length, channels)
+
+session:  
+  flags:
+    bl: True
+    bl_rand: False
+    decode: True
+    stimulus: True
+
+
+recording:
+  timing:
+    t_baseline_1: 5.                   # sec, trial-1 baseline duration
+    t_baseline_all: 1.                 # sec, all other trials
+    t_baseline_rand: 1.                # sec, add random inter-trial interval between 0 and t_baseline_rand IF session.flags.bl_rand is True
+    t_after_stimulus: 0.        
+    t_response: 5.                     # sec, trial response duration
+    decoder_refresh_interval: .01     # sec, for continuous decoding, the cycle time of the decoder
+    bci_loop_interval: .05             # sec, step for bci thread loop
+    recording_loop_interval: .05       # sec, step for bci thread loop
+    recording_loop_interval_data: .02       # sec, step for data process loop
+
+classifier:
+    max_active_ch_nr: []
+    # include_channels: [38, 43, 50, 52, 56, 61, 65, 67, 73, 81, 87, 88, 91]
+    # include_channels: [0,   1,   4,   7,   8,  12,  14,  19,  22,  26,  28,  29,  31, 96, 100, 103]
+    # include_channels: range(0,128)
+    include_channels: [20]
+    # include_channels: [0,   1,   4,   6, 7,   8,  12,  14,  19, 20, 22,  26,  28,  29,  31, 96, 100, 103, 121]
+    # include_channels: [  1,   2,   3,   9,  19,  29,  41,  44,  48,  51,  52,  53,  54,
+         # 62,  63,  66,  74,  82,  94,  95, 113]
+    # exclude_channels: []
+    exclude_channels: [  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,
+        13,  14,  15,  16,  17,  18,  19,  21,  22,  23,  24,  25,  26,  27,
+        28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,
+        41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,  52,  53,
+        54,  55,  56,  57,  58,  59,  60,  61,  62,  63,  64,  65,  66,
+        67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,  78,  79,
+        80,  81,  82,  83,  84,  85,  86,  87,  88,  89,  90,  91,  92,
+        93,  94,  95,  96,  97,  98,  99, 100, 101, 102, 103, 104, 105,
+       106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+       119, 120, 121, 122, 123, 124, 125, 126, 127]
+       
+    # exclude_channels: range(32,96)
+    # exclude_channels: []
+    n_triggers: 2     # DO NOT CHANGE THIS
+    n_classes: 2
+    template: [10, 14, 18, 22, 26, 30, 34, 38]
+    trigger_pos: 'stop'   # 'start' or 'stop' 
+
+    online: False        # will be overwritten by code, see bci.py
+    thr_prob: 0.8
+    thr_window: 40 #30    # number of samples for prob above threshold to trigger decision
+    break_loop: True      # True: move on as soon as decision is there, otherwise wait t_response time
+
+    # models to use for online decoding
+    path_model1: '/data/clinical/neural/fr/2019-06-26/model1_104948.pkl'   # scikit
+    path_model2: '/data/clinical/neural/fr/2019-06-26/model2_104948.pkl'   # explicit LDA
+    exclude_data_channels: []
+    n_neg_train: 100000
+    deadtime: 40
+
+
+
+    model_training:
+      save_model: False
+      model: 'scikit'  # eigen, scikit, explicit
+      solver: 'lsqr'    # svd, lsqr, eigen
+      cross_validation: True
+      n_splits:  5     # for cross-validation
+      test_size: .2     # float between (0,1) or int (absolute number of trials, >=n_classes)
+      reg_fact: 0.3       # regularization factor
+      fsel: False          # feature selection
+      triggers_plot: 3
+
+    peaks:          # these values are for offline training
+      # height: 0.9   # probability threshold
+      # width: 28      # min number of samples above threshold
+      distance: 40  # number of samples for peaks to be apart
+      sig: 'pred'   # 'prob', 'pred' -> signal based on probabilities or prediction class
+      prefilter: False
+
+    psth:
+      cut : [-40, 100]
+
+lfp:
+  fs: 1000               # sampling rate
+  sampling_ratio: 30
+  filter_fc_lb: [10, 0]    # cut-off frequencies for filter
+  filter_fc_mb: [12, 40]    # cut-off frequencies for filter
+  filter_fc_hb: [60, 250]    # cut-off frequencies for filter
+  filter_order_lb: 2     
+  filter_order_mb: 6     
+  filter_order_hb: 10     
+  artifact_thr: 400      # exclude data above this threshold
+  array1: range(32,64) #3 4 7 8 10 14 17 15 44
+  array21: range(2)
+  # array22: range(100,112)
+  array22: [] #range(96,128)
+  array1_exclude: []
+  array2_exclude: []
+  i_start: 0 #None            # import data from start index
+  i_stop:  -1 #600000 #None             # to stop index
+  psth_win: [-1000, 5000]
+  exclude: False
+  normalize: False
+  zscore: False
+  car: True
+  sub_band: 1
+  motor_mapping: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+
+  spectra:
+    spgr_len: 500
+  plot:
+    ch_ids: [0]     # relative id of imported channel
+    general: True
+    filters: False
+
+    
+cerebus:
+    instance: 0
+    buffer_reset: True
+    buffer_size_cont: 30001
+    buffer_size_comments: 500
+
+file_handling:
+  data_path: '/data/clinical/neural/fr/'
+  # data_path: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/'
+
+  results: '/data/clinical/nf/results/'
+  # results: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/results/'
+  # data_path: '/media/vlachos/kiap_backup/Recordings/K01/laptop/clinical/neural/'
+  # data_path: '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
+  save_data: True     # keep always True
+  mode: 'ab'   # ab: append binary, wb: write binary (will overwrite existing files)
+  git_hash: 4af0a92
+  filename_data: /data/clinical/neural/fr/2019-07-03/data_11_51_50.bin
+  filename_log_info: /data/clinical/neural/fr/2019-07-03/info_11_51_50.log
+  filename_events: /data/clinical/neural/fr/2019-07-03/events_11_51_50.txt
+
+speller:
+  type: 'feedback'    # exploration, question, training_color, color, feedback
+  audio: True
+  pyttsx_rate: 100
+  audio_result_fb: True
+
+feedback:
+  # normalized rate is multiplied by alpha, and baseline beta added.
+  
+  feedback_tone: True
+
+  alpha: 360    # scaling coefficient
+  beta: 120      # offset
+  tone_length: 0.25    # lenght of feedback tone in seconds
+  target_tone_length: 1.0    # length of feedback tone in seconds
+  
+  reward_on_target: True  # If target is reached, play reward tone and abort trial
+  target_n_tones: 5 # Play the target tone every n feedback tones
+  reward_sound: '/kiap/data/speller/feedback/kerching.wav'
+  hold_iterations: 3
+
+plot:
+  channels: [20, 18, 99, 16]   # channels for live plot, need to restart app if changed
+  fps: 10.       # frames per second
+  pca: False
+
+sim_data:
+  rate_bl: 10
+

+ 248 - 0
KIAP_BCI_neurofeedback/2019-07-03/config_dump_15_34_39.yaml

@@ -0,0 +1,248 @@
+system:
+  plot: 0
+
+general:
+  debug: 1
+  clear_trial_history: False
+
+daq:
+  n_channels_max: 128
+  # n_channels: 2
+  # exclude_channels: []  # These are BlackRock channel IDs (1-based).
+  exclude_channels: []
+  # car_channels: [] # channel IDs to use for common average reference. This is useful
+  #   for spike band power and LFP calculations
+  car_channels: []
+  fs: 30000.          # sampling frequency
+  smpl_fct: 30        # downsample factor
+  trigger_len: 50     # length of triggers <--- review: this parameter only appears in a commented out line
+  daq_sleep: 0.1      # s           <--- review: this parameter does not seem to be used anywhere
+  
+  normalization:
+    len:  600.0           # in seconds. Length of normalization period
+    do_update:  True        # Performs automatic updates if true
+    update_interval: 10.0   # in seconds. Defines in what intervals the rate normalization will be updated
+    range: [10, 90]    # centiles, for automated normalization
+
+    clamp_firing_rates: True
+    # if use_all_channels is False:
+    # channel firing rate r will be clamped and normalized (r_n):
+    # r_n = (max(bottom, min(top, r)) - bottom) / (top - bottom)
+    # if the channel is set to 'invert', then r_n := 1 - r_n
+    # All normalized rates are then averaged.
+    # otherwise, all channels will be averaged first, then normalized
+    
+    use_all_channels: false      # if True, all channels will be used. If False, channels as specified below will be used
+    all_channels: {bottom: 1.15625, top: 1.59375, invert: false}
+    
+    channels:
+    - {id: 20, bottom: 7.0, top: 16, invert: false}
+    - {id: 99, bottom: 1.0, top: 10, invert: True}
+
+      # - id: 18
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    19
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+      # # - id: 17
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    16
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 42
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    9
+      #  invert: False  # if True, channels normalized rate will be subtracted from 1.
+    
+      # - id: 0
+      #   bottom: 1   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:   20
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 1
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    24
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+
+
+  spike_rates:
+    n_units: 1        # number of units per channel
+    bin_width: 0.05    # sec, for spike rate estimation
+    loop_interval: 50  # ms 
+    method: 'boxcar'   # exponential or boxcar
+    decay_factor: .9    # for exponential decay, for each step back, a bin's count will be multiplied by decay_factor
+    max_bins: 20      # for exponential and boxcar methods, determines numbers of bins in history to take into account
+    # bl_offset: 0.000001      # baseline correction constant
+    bl_offset: 30.      # baseline correction constant
+    # bl_offset: 0.1        # baseline correction constant
+    correct_bl: False       # for online mode
+    correct_bl_model: False # for offline mode
+
+
+buffer:
+  length: 600         # buffer shape: (length, channels)
+
+session:  
+  flags:
+    bl: True
+    bl_rand: False
+    decode: True
+    stimulus: True
+
+
+recording:
+  timing:
+    t_baseline_1: 5.                   # sec, trial-1 baseline duration
+    t_baseline_all: 1.                 # sec, all other trials
+    t_baseline_rand: 1.                # sec, add random inter-trial interval between 0 and t_baseline_rand IF session.flags.bl_rand is True
+    t_after_stimulus: 0.0        
+    t_response: 5.                     # sec, trial response duration
+    decoder_refresh_interval: .01     # sec, for continuous decoding, the cycle time of the decoder
+    bci_loop_interval: .05             # sec, step for bci thread loop
+    recording_loop_interval: .05       # sec, step for bci thread loop
+    recording_loop_interval_data: .02       # sec, step for data process loop
+
+classifier:
+    max_active_ch_nr: []
+    # include_channels: [38, 43, 50, 52, 56, 61, 65, 67, 73, 81, 87, 88, 91]
+    # include_channels: [0,   1,   4,   7,   8,  12,  14,  19,  22,  26,  28,  29,  31, 96, 100, 103]
+    # include_channels: range(0,128)
+    include_channels: [20]
+    # include_channels: [0,   1,   4,   6, 7,   8,  12,  14,  19, 20, 22,  26,  28,  29,  31, 96, 100, 103, 121]
+    # include_channels: [  1,   2,   3,   9,  19,  29,  41,  44,  48,  51,  52,  53,  54,
+         # 62,  63,  66,  74,  82,  94,  95, 113]
+    # exclude_channels: []
+    exclude_channels: [  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,
+        13,  14,  15,  16,  17,  18,  19,  21,  22,  23,  24,  25,  26,  27,
+        28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,
+        41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,  52,  53,
+        54,  55,  56,  57,  58,  59,  60,  61,  62,  63,  64,  65,  66,
+        67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,  78,  79,
+        80,  81,  82,  83,  84,  85,  86,  87,  88,  89,  90,  91,  92,
+        93,  94,  95,  96,  97,  98,  99, 100, 101, 102, 103, 104, 105,
+       106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+       119, 120, 121, 122, 123, 124, 125, 126, 127]
+       
+    # exclude_channels: range(32,96)
+    # exclude_channels: []
+    n_triggers: 2     # DO NOT CHANGE THIS
+    n_classes: 2
+    template: [10, 14, 18, 22, 26, 30, 34, 38]
+    trigger_pos: 'stop'   # 'start' or 'stop' 
+
+    online: False        # will be overwritten by code, see bci.py
+    thr_prob: 0.8
+    thr_window: 40 #30    # number of samples for prob above threshold to trigger decision
+    break_loop: True      # True: move on as soon as decision is there, otherwise wait t_response time
+
+    # models to use for online decoding
+    path_model1: '/data/clinical/neural/fr/2019-06-26/model1_104948.pkl'   # scikit
+    path_model2: '/data/clinical/neural/fr/2019-06-26/model2_104948.pkl'   # explicit LDA
+    exclude_data_channels: []
+    n_neg_train: 100000
+    deadtime: 40
+
+
+
+    model_training:
+      save_model: False
+      model: 'scikit'  # eigen, scikit, explicit
+      solver: 'lsqr'    # svd, lsqr, eigen
+      cross_validation: True
+      n_splits:  5     # for cross-validation
+      test_size: .2     # float between (0,1) or int (absolute number of trials, >=n_classes)
+      reg_fact: 0.3       # regularization factor
+      fsel: False          # feature selection
+      triggers_plot: 3
+
+    peaks:          # these values are for offline training
+      # height: 0.9   # probability threshold
+      # width: 28      # min number of samples above threshold
+      distance: 40  # number of samples for peaks to be apart
+      sig: 'pred'   # 'prob', 'pred' -> signal based on probabilities or prediction class
+      prefilter: False
+
+    psth:
+      cut : [-40, 100]
+
+lfp:
+  fs: 1000               # sampling rate
+  sampling_ratio: 30
+  filter_fc_lb: [10, 0]    # cut-off frequencies for filter
+  filter_fc_mb: [12, 40]    # cut-off frequencies for filter
+  filter_fc_hb: [60, 250]    # cut-off frequencies for filter
+  filter_order_lb: 2     
+  filter_order_mb: 6     
+  filter_order_hb: 10     
+  artifact_thr: 400      # exclude data above this threshold
+  array1: range(32,64) #3 4 7 8 10 14 17 15 44
+  array21: range(2)
+  # array22: range(100,112)
+  array22: [] #range(96,128)
+  array1_exclude: []
+  array2_exclude: []
+  i_start: 0 #None            # import data from start index
+  i_stop:  -1 #600000 #None             # to stop index
+  psth_win: [-1000, 5000]
+  exclude: False
+  normalize: False
+  zscore: False
+  car: True
+  sub_band: 1
+  motor_mapping: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+
+  spectra:
+    spgr_len: 500
+  plot:
+    ch_ids: [0]     # relative id of imported channel
+    general: True
+    filters: False
+
+    
+cerebus:
+    instance: 0
+    buffer_reset: True
+    buffer_size_cont: 30001
+    buffer_size_comments: 500
+
+file_handling:
+  data_path: '/data/clinical/neural/fr/'
+  # data_path: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/'
+
+  results: '/data/clinical/nf/results/'
+  # results: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/results/'
+  # data_path: '/media/vlachos/kiap_backup/Recordings/K01/laptop/clinical/neural/'
+  # data_path: '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
+  save_data: True     # keep always True
+  mode: 'ab'   # ab: append binary, wb: write binary (will overwrite existing files)
+  git_hash: e5cf42b
+  filename_data: /data/clinical/neural/fr/2019-07-03/data_15_34_39.bin
+  filename_log_info: /data/clinical/neural/fr/2019-07-03/info_15_34_39.log
+  filename_events: /data/clinical/neural/fr/2019-07-03/events_15_34_39.txt
+
+speller:
+  type: 'feedback'    # exploration, question, training_color, color, feedback
+  audio: True
+  pyttsx_rate: 100
+  audio_result_fb: True
+
+feedback:
+  # normalized rate is multiplied by alpha, and baseline beta added.
+  
+  feedback_tone: True
+
+  alpha: 360    # scaling coefficient
+  beta: 120      # offset
+  tone_length: 0.25    # lenght of feedback tone in seconds
+  target_tone_length: 1.0    # length of feedback tone in seconds
+  
+  reward_on_target: false  # If target is reached, play reward tone and abort trial
+  target_n_tones: 5 # Play the target tone every n feedback tones
+  reward_sound: '/kiap/data/speller/feedback/kerching.wav'
+  hold_iterations: 3
+
+plot:
+  channels: [20, 18, 99, 16]   # channels for live plot, need to restart app if changed
+  fps: 10.       # frames per second
+  pca: False
+
+sim_data:
+  rate_bl: 10
+

+ 248 - 0
KIAP_BCI_neurofeedback/2019-07-03/config_dump_15_46_20.yaml

@@ -0,0 +1,248 @@
+system:
+  plot: 0
+
+general:
+  debug: 1
+  clear_trial_history: False
+
+daq:
+  n_channels_max: 128
+  # n_channels: 2
+  # exclude_channels: []  # These are BlackRock channel IDs (1-based).
+  exclude_channels: []
+  # car_channels: [] # channel IDs to use for common average reference. This is useful
+  #   for spike band power and LFP calculations
+  car_channels: []
+  fs: 30000.          # sampling frequency
+  smpl_fct: 30        # downsample factor
+  trigger_len: 50     # length of triggers <--- review: this parameter only appears in a commented out line
+  daq_sleep: 0.1      # s           <--- review: this parameter does not seem to be used anywhere
+  
+  normalization:
+    len:  600.0           # in seconds. Length of normalization period
+    do_update:  True        # Performs automatic updates if true
+    update_interval: 10.0   # in seconds. Defines in what intervals the rate normalization will be updated
+    range: [10, 90]    # centiles, for automated normalization
+
+    clamp_firing_rates: True
+    # if use_all_channels is False:
+    # channel firing rate r will be clamped and normalized (r_n):
+    # r_n = (max(bottom, min(top, r)) - bottom) / (top - bottom)
+    # if the channel is set to 'invert', then r_n := 1 - r_n
+    # All normalized rates are then averaged.
+    # otherwise, all channels will be averaged first, then normalized
+    
+    use_all_channels: false      # if True, all channels will be used. If False, channels as specified below will be used
+    all_channels: {bottom: 1.15625, top: 1.59375, invert: false}
+    
+    channels:
+      - {id: 20, bottom: 8.0, top: 35.0, invert: false}
+      - {id: 99, bottom: 2.0, top: 17.0, invert: true}
+
+      # - id: 18
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    19
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+      # # - id: 17
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    16
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 42
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    9
+      #  invert: False  # if True, channels normalized rate will be subtracted from 1.
+    
+      # - id: 0
+      #   bottom: 1   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:   20
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 1
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    24
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+
+
+  spike_rates:
+    n_units: 1        # number of units per channel
+    bin_width: 0.05    # sec, for spike rate estimation
+    loop_interval: 50  # ms 
+    method: 'boxcar'   # exponential or boxcar
+    decay_factor: .9    # for exponential decay, for each step back, a bin's count will be multiplied by decay_factor
+    max_bins: 20      # for exponential and boxcar methods, determines numbers of bins in history to take into account
+    # bl_offset: 0.000001      # baseline correction constant
+    bl_offset: 30.      # baseline correction constant
+    # bl_offset: 0.1        # baseline correction constant
+    correct_bl: False       # for online mode
+    correct_bl_model: False # for offline mode
+
+
+buffer:
+  length: 600         # buffer shape: (length, channels)
+
+session:  
+  flags:
+    bl: True
+    bl_rand: True
+    decode: True
+    stimulus: True
+
+
+recording:
+  timing:
+    t_baseline_1: 5.                   # sec, trial-1 baseline duration
+    t_baseline_all: 2.                 # sec, all other trials
+    t_baseline_rand: 1.                # sec, add random inter-trial interval between 0 and t_baseline_rand IF session.flags.bl_rand is True
+    t_after_stimulus: 0.50        
+    t_response: 5.                     # sec, trial response duration
+    decoder_refresh_interval: .01     # sec, for continuous decoding, the cycle time of the decoder
+    bci_loop_interval: .05             # sec, step for bci thread loop
+    recording_loop_interval: .05       # sec, step for bci thread loop
+    recording_loop_interval_data: .02       # sec, step for data process loop
+
+classifier:
+    max_active_ch_nr: []
+    # include_channels: [38, 43, 50, 52, 56, 61, 65, 67, 73, 81, 87, 88, 91]
+    # include_channels: [0,   1,   4,   7,   8,  12,  14,  19,  22,  26,  28,  29,  31, 96, 100, 103]
+    # include_channels: range(0,128)
+    include_channels: [20]
+    # include_channels: [0,   1,   4,   6, 7,   8,  12,  14,  19, 20, 22,  26,  28,  29,  31, 96, 100, 103, 121]
+    # include_channels: [  1,   2,   3,   9,  19,  29,  41,  44,  48,  51,  52,  53,  54,
+         # 62,  63,  66,  74,  82,  94,  95, 113]
+    # exclude_channels: []
+    exclude_channels: [  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,
+        13,  14,  15,  16,  17,  18,  19,  21,  22,  23,  24,  25,  26,  27,
+        28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,
+        41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,  52,  53,
+        54,  55,  56,  57,  58,  59,  60,  61,  62,  63,  64,  65,  66,
+        67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,  78,  79,
+        80,  81,  82,  83,  84,  85,  86,  87,  88,  89,  90,  91,  92,
+        93,  94,  95,  96,  97,  98,  99, 100, 101, 102, 103, 104, 105,
+       106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+       119, 120, 121, 122, 123, 124, 125, 126, 127]
+       
+    # exclude_channels: range(32,96)
+    # exclude_channels: []
+    n_triggers: 2     # DO NOT CHANGE THIS
+    n_classes: 2
+    template: [10, 14, 18, 22, 26, 30, 34, 38]
+    trigger_pos: 'start'   # 'start' or 'stop' 
+
+    online: False        # will be overwritten by code, see bci.py
+    thr_prob: 0.8
+    thr_window: 40 #30    # number of samples for prob above threshold to trigger decision
+    break_loop: True      # True: move on as soon as decision is there, otherwise wait t_response time
+
+    # models to use for online decoding
+    path_model1: '/data/clinical/neural/fr/2019-06-26/model1_104948.pkl'   # scikit
+    path_model2: '/data/clinical/neural/fr/2019-06-26/model2_104948.pkl'   # explicit LDA
+    exclude_data_channels: []
+    n_neg_train: 100000
+    deadtime: 40
+
+
+
+    model_training:
+      save_model: False
+      model: 'scikit'  # eigen, scikit, explicit
+      solver: 'lsqr'    # svd, lsqr, eigen
+      cross_validation: True
+      n_splits:  5     # for cross-validation
+      test_size: .2     # float between (0,1) or int (absolute number of trials, >=n_classes)
+      reg_fact: 0.3       # regularization factor
+      fsel: False          # feature selection
+      triggers_plot: 3
+
+    peaks:          # these values are for offline training
+      # height: 0.9   # probability threshold
+      # width: 28      # min number of samples above threshold
+      distance: 40  # number of samples for peaks to be apart
+      sig: 'pred'   # 'prob', 'pred' -> signal based on probabilities or prediction class
+      prefilter: False
+
+    psth:
+      cut : [-40, 100]
+
+lfp:
+  fs: 1000               # sampling rate
+  sampling_ratio: 30
+  filter_fc_lb: [10, 0]    # cut-off frequencies for filter
+  filter_fc_mb: [12, 40]    # cut-off frequencies for filter
+  filter_fc_hb: [60, 250]    # cut-off frequencies for filter
+  filter_order_lb: 2     
+  filter_order_mb: 6     
+  filter_order_hb: 10     
+  artifact_thr: 400      # exclude data above this threshold
+  array1: range(32,64) #3 4 7 8 10 14 17 15 44
+  array21: range(2)
+  # array22: range(100,112)
+  array22: [] #range(96,128)
+  array1_exclude: []
+  array2_exclude: []
+  i_start: 0 #None            # import data from start index
+  i_stop:  -1 #600000 #None             # to stop index
+  psth_win: [-1000, 5000]
+  exclude: False
+  normalize: False
+  zscore: False
+  car: True
+  sub_band: 1
+  motor_mapping: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+
+  spectra:
+    spgr_len: 500
+  plot:
+    ch_ids: [0]     # relative id of imported channel
+    general: True
+    filters: False
+
+    
+cerebus:
+    instance: 0
+    buffer_reset: True
+    buffer_size_cont: 30001
+    buffer_size_comments: 500
+
+file_handling:
+  data_path: '/data/clinical/neural/fr/'
+  # data_path: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/'
+
+  results: '/data/clinical/nf/results/'
+  # results: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/results/'
+  # data_path: '/media/vlachos/kiap_backup/Recordings/K01/laptop/clinical/neural/'
+  # data_path: '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
+  save_data: True     # keep always True
+  mode: 'ab'   # ab: append binary, wb: write binary (will overwrite existing files)
+  git_hash: e5cf42b
+  filename_data: /data/clinical/neural/fr/2019-07-03/data_15_46_20.bin
+  filename_log_info: /data/clinical/neural/fr/2019-07-03/info_15_46_20.log
+  filename_events: /data/clinical/neural/fr/2019-07-03/events_15_46_20.txt
+
+speller:
+  type: 'feedback'    # exploration, question, training_color, color, feedback
+  audio: True
+  pyttsx_rate: 100
+  audio_result_fb: True
+
+feedback:
+  # normalized rate is multiplied by alpha, and baseline beta added.
+  
+  feedback_tone: True
+
+  alpha: 360    # scaling coefficient
+  beta: 120      # offset
+  tone_length: 0.25    # lenght of feedback tone in seconds
+  target_tone_length: 1.0    # length of feedback tone in seconds
+  
+  reward_on_target: true  # If target is reached, play reward tone and abort trial
+  target_n_tones: 5 # Play the target tone every n feedback tones
+  reward_sound: '/kiap/data/speller/feedback/kerching.wav'
+  hold_iterations: 3
+
+plot:
+  channels: [20, 99]   # channels for live plot, need to restart app if changed
+  fps: 10.       # frames per second
+  pca: False
+
+sim_data:
+  rate_bl: 10
+

+ 248 - 0
KIAP_BCI_neurofeedback/2019-07-03/config_dump_16_04_01.yaml

@@ -0,0 +1,248 @@
+system:
+  plot: 0
+
+general:
+  debug: 1
+  clear_trial_history: False
+
+daq:
+  n_channels_max: 128
+  # n_channels: 2
+  # exclude_channels: []  # These are BlackRock channel IDs (1-based).
+  exclude_channels: []
+  # car_channels: [] # channel IDs to use for common average reference. This is useful
+  #   for spike band power and LFP calculations
+  car_channels: []
+  fs: 30000.          # sampling frequency
+  smpl_fct: 30        # downsample factor
+  trigger_len: 50     # length of triggers <--- review: this parameter only appears in a commented out line
+  daq_sleep: 0.1      # s           <--- review: this parameter does not seem to be used anywhere
+  
+  normalization:
+    len:  600.0           # in seconds. Length of normalization period
+    do_update:  false        # Performs automatic updates if true
+    update_interval: 10.0   # in seconds. Defines in what intervals the rate normalization will be updated
+    range: [10, 90]    # centiles, for automated normalization
+
+    clamp_firing_rates: True
+    # if use_all_channels is False:
+    # channel firing rate r will be clamped and normalized (r_n):
+    # r_n = (max(bottom, min(top, r)) - bottom) / (top - bottom)
+    # if the channel is set to 'invert', then r_n := 1 - r_n
+    # All normalized rates are then averaged.
+    # otherwise, all channels will be averaged first, then normalized
+    
+    use_all_channels: false      # if True, all channels will be used. If False, channels as specified below will be used
+    all_channels: {bottom: 1.15625, top: 1.59375, invert: false}
+    
+    channels:
+      - {id: 20, bottom: 20.0, top: 35.0, invert: true}
+      - {id: 99, bottom: 7.0, top: 16.0, invert: false}
+
+      # - id: 18
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    19
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+      # # - id: 17
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    16
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 42
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    9
+      #  invert: False  # if True, channels normalized rate will be subtracted from 1.
+    
+      # - id: 0
+      #   bottom: 1   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:   20
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 1
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    24
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+
+
+  spike_rates:
+    n_units: 1        # number of units per channel
+    bin_width: 0.05    # sec, for spike rate estimation
+    loop_interval: 50  # ms 
+    method: 'boxcar'   # exponential or boxcar
+    decay_factor: .9    # for exponential decay, for each step back, a bin's count will be multiplied by decay_factor
+    max_bins: 20      # for exponential and boxcar methods, determines numbers of bins in history to take into account
+    # bl_offset: 0.000001      # baseline correction constant
+    bl_offset: 30.      # baseline correction constant
+    # bl_offset: 0.1        # baseline correction constant
+    correct_bl: False       # for online mode
+    correct_bl_model: False # for offline mode
+
+
+buffer:
+  length: 600         # buffer shape: (length, channels)
+
+session:  
+  flags:
+    bl: True
+    bl_rand: True
+    decode: True
+    stimulus: True
+
+
+recording:
+  timing:
+    t_baseline_1: 5.                   # sec, trial-1 baseline duration
+    t_baseline_all: 2.                 # sec, all other trials
+    t_baseline_rand: 1.                # sec, add random inter-trial interval between 0 and t_baseline_rand IF session.flags.bl_rand is True
+    t_after_stimulus: 0.50        
+    t_response: 5.                     # sec, trial response duration
+    decoder_refresh_interval: .01     # sec, for continuous decoding, the cycle time of the decoder
+    bci_loop_interval: .05             # sec, step for bci thread loop
+    recording_loop_interval: .05       # sec, step for bci thread loop
+    recording_loop_interval_data: .02       # sec, step for data process loop
+
+classifier:
+    max_active_ch_nr: []
+    # include_channels: [38, 43, 50, 52, 56, 61, 65, 67, 73, 81, 87, 88, 91]
+    # include_channels: [0,   1,   4,   7,   8,  12,  14,  19,  22,  26,  28,  29,  31, 96, 100, 103]
+    # include_channels: range(0,128)
+    include_channels: [20]
+    # include_channels: [0,   1,   4,   6, 7,   8,  12,  14,  19, 20, 22,  26,  28,  29,  31, 96, 100, 103, 121]
+    # include_channels: [  1,   2,   3,   9,  19,  29,  41,  44,  48,  51,  52,  53,  54,
+         # 62,  63,  66,  74,  82,  94,  95, 113]
+    # exclude_channels: []
+    exclude_channels: [  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,
+        13,  14,  15,  16,  17,  18,  19,  21,  22,  23,  24,  25,  26,  27,
+        28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,
+        41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,  52,  53,
+        54,  55,  56,  57,  58,  59,  60,  61,  62,  63,  64,  65,  66,
+        67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,  78,  79,
+        80,  81,  82,  83,  84,  85,  86,  87,  88,  89,  90,  91,  92,
+        93,  94,  95,  96,  97,  98,  99, 100, 101, 102, 103, 104, 105,
+       106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+       119, 120, 121, 122, 123, 124, 125, 126, 127]
+       
+    # exclude_channels: range(32,96)
+    # exclude_channels: []
+    n_triggers: 2     # DO NOT CHANGE THIS
+    n_classes: 2
+    template: [10, 14, 18, 22, 26, 30, 34, 38]
+    trigger_pos: 'start'   # 'start' or 'stop' 
+
+    online: False        # will be overwritten by code, see bci.py
+    thr_prob: 0.8
+    thr_window: 40 #30    # number of samples for prob above threshold to trigger decision
+    break_loop: True      # True: move on as soon as decision is there, otherwise wait t_response time
+
+    # models to use for online decoding
+    path_model1: '/data/clinical/neural/fr/2019-06-26/model1_104948.pkl'   # scikit
+    path_model2: '/data/clinical/neural/fr/2019-06-26/model2_104948.pkl'   # explicit LDA
+    exclude_data_channels: []
+    n_neg_train: 100000
+    deadtime: 40
+
+
+
+    model_training:
+      save_model: False
+      model: 'scikit'  # eigen, scikit, explicit
+      solver: 'lsqr'    # svd, lsqr, eigen
+      cross_validation: True
+      n_splits:  5     # for cross-validation
+      test_size: .2     # float between (0,1) or int (absolute number of trials, >=n_classes)
+      reg_fact: 0.3       # regularization factor
+      fsel: False          # feature selection
+      triggers_plot: 3
+
+    peaks:          # these values are for offline training
+      # height: 0.9   # probability threshold
+      # width: 28      # min number of samples above threshold
+      distance: 40  # number of samples for peaks to be apart
+      sig: 'pred'   # 'prob', 'pred' -> signal based on probabilities or prediction class
+      prefilter: False
+
+    psth:
+      cut : [-40, 100]
+
+lfp:
+  fs: 1000               # sampling rate
+  sampling_ratio: 30
+  filter_fc_lb: [10, 0]    # cut-off frequencies for filter
+  filter_fc_mb: [12, 40]    # cut-off frequencies for filter
+  filter_fc_hb: [60, 250]    # cut-off frequencies for filter
+  filter_order_lb: 2     
+  filter_order_mb: 6     
+  filter_order_hb: 10     
+  artifact_thr: 400      # exclude data above this threshold
+  array1: range(32,64) #3 4 7 8 10 14 17 15 44
+  array21: range(2)
+  # array22: range(100,112)
+  array22: [] #range(96,128)
+  array1_exclude: []
+  array2_exclude: []
+  i_start: 0 #None            # import data from start index
+  i_stop:  -1 #600000 #None             # to stop index
+  psth_win: [-1000, 5000]
+  exclude: False
+  normalize: False
+  zscore: False
+  car: True
+  sub_band: 1
+  motor_mapping: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+
+  spectra:
+    spgr_len: 500
+  plot:
+    ch_ids: [0]     # relative id of imported channel
+    general: True
+    filters: False
+
+    
+cerebus:
+    instance: 0
+    buffer_reset: True
+    buffer_size_cont: 30001
+    buffer_size_comments: 500
+
+file_handling:
+  data_path: '/data/clinical/neural/fr/'
+  # data_path: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/'
+
+  results: '/data/clinical/nf/results/'
+  # results: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/results/'
+  # data_path: '/media/vlachos/kiap_backup/Recordings/K01/laptop/clinical/neural/'
+  # data_path: '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
+  save_data: True     # keep always True
+  mode: 'ab'   # ab: append binary, wb: write binary (will overwrite existing files)
+  git_hash: e5cf42b
+  filename_data: /data/clinical/neural/fr/2019-07-03/data_16_04_01.bin
+  filename_log_info: /data/clinical/neural/fr/2019-07-03/info_16_04_01.log
+  filename_events: /data/clinical/neural/fr/2019-07-03/events_16_04_01.txt
+
+speller:
+  type: 'feedback'    # exploration, question, training_color, color, feedback
+  audio: True
+  pyttsx_rate: 100
+  audio_result_fb: True
+
+feedback:
+  # normalized rate is multiplied by alpha, and baseline beta added.
+  
+  feedback_tone: True
+
+  alpha: 360    # scaling coefficient
+  beta: 120      # offset
+  tone_length: 0.25    # lenght of feedback tone in seconds
+  target_tone_length: 1.0    # length of feedback tone in seconds
+  
+  reward_on_target: true  # If target is reached, play reward tone and abort trial
+  target_n_tones: 5 # Play the target tone every n feedback tones
+  reward_sound: '/kiap/data/speller/feedback/kerching.wav'
+  hold_iterations: 3
+
+plot:
+  channels: [20, 99]   # channels for live plot, need to restart app if changed
+  fps: 10.       # frames per second
+  pca: False
+
+sim_data:
+  rate_bl: 10
+

+ 248 - 0
KIAP_BCI_neurofeedback/2019-07-03/config_dump_16_11_16.yaml

@@ -0,0 +1,248 @@
+system:
+  plot: 0
+
+general:
+  debug: 1
+  clear_trial_history: False
+
+daq:
+  n_channels_max: 128
+  # n_channels: 2
+  # exclude_channels: []  # These are BlackRock channel IDs (1-based).
+  exclude_channels: []
+  # car_channels: [] # channel IDs to use for common average reference. This is useful
+  #   for spike band power and LFP calculations
+  car_channels: []
+  fs: 30000.          # sampling frequency
+  smpl_fct: 30        # downsample factor
+  trigger_len: 50     # length of triggers <--- review: this parameter only appears in a commented out line
+  daq_sleep: 0.1      # s           <--- review: this parameter does not seem to be used anywhere
+  
+  normalization:
+    len:  600.0           # in seconds. Length of normalization period
+    do_update:  false        # Performs automatic updates if true
+    update_interval: 10.0   # in seconds. Defines in what intervals the rate normalization will be updated
+    range: [10, 90]    # centiles, for automated normalization
+
+    clamp_firing_rates: True
+    # if use_all_channels is False:
+    # channel firing rate r will be clamped and normalized (r_n):
+    # r_n = (max(bottom, min(top, r)) - bottom) / (top - bottom)
+    # if the channel is set to 'invert', then r_n := 1 - r_n
+    # All normalized rates are then averaged.
+    # otherwise, all channels will be averaged first, then normalized
+    
+    use_all_channels: false      # if True, all channels will be used. If False, channels as specified below will be used
+    all_channels: {bottom: 1.15625, top: 1.59375, invert: false}
+    
+    channels:
+      - {id: 20, bottom: 20.0, top: 35.0, invert: true}
+      - {id: 99, bottom: 7.0, top: 16.0, invert: false}
+
+      # - id: 18
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    19
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+      # # - id: 17
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    16
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 42
+      #   bottom: 0   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    9
+      #  invert: False  # if True, channels normalized rate will be subtracted from 1.
+    
+      # - id: 0
+      #   bottom: 1   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:   20
+      #   invert: False  # if True, channels normalized rate will be subtracted from 1.
+      # - id: 1
+      #   bottom: 2   # signal will be clamped to the range [bottom, top] and normalized
+      #   top:    24
+      #   invert: True  # if True, channels normalized rate will be subtracted from 1.
+
+
+  spike_rates:
+    n_units: 1        # number of units per channel
+    bin_width: 0.05    # sec, for spike rate estimation
+    loop_interval: 50  # ms 
+    method: 'boxcar'   # exponential or boxcar
+    decay_factor: .9    # for exponential decay, for each step back, a bin's count will be multiplied by decay_factor
+    max_bins: 20      # for exponential and boxcar methods, determines numbers of bins in history to take into account
+    # bl_offset: 0.000001      # baseline correction constant
+    bl_offset: 30.      # baseline correction constant
+    # bl_offset: 0.1        # baseline correction constant
+    correct_bl: False       # for online mode
+    correct_bl_model: False # for offline mode
+
+
+buffer:
+  length: 600         # buffer shape: (length, channels)
+
+session:  
+  flags:
+    bl: True
+    bl_rand: True
+    decode: True
+    stimulus: True
+
+
+recording:
+  timing:
+    t_baseline_1: 5.                   # sec, trial-1 baseline duration
+    t_baseline_all: 2.                 # sec, all other trials
+    t_baseline_rand: 1.                # sec, add random inter-trial interval between 0 and t_baseline_rand IF session.flags.bl_rand is True
+    t_after_stimulus: 0.50        
+    t_response: 5.                     # sec, trial response duration
+    decoder_refresh_interval: .01     # sec, for continuous decoding, the cycle time of the decoder
+    bci_loop_interval: .05             # sec, step for bci thread loop
+    recording_loop_interval: .05       # sec, step for bci thread loop
+    recording_loop_interval_data: .02       # sec, step for data process loop
+
+classifier:
+    max_active_ch_nr: []
+    # include_channels: [38, 43, 50, 52, 56, 61, 65, 67, 73, 81, 87, 88, 91]
+    # include_channels: [0,   1,   4,   7,   8,  12,  14,  19,  22,  26,  28,  29,  31, 96, 100, 103]
+    # include_channels: range(0,128)
+    include_channels: [20]
+    # include_channels: [0,   1,   4,   6, 7,   8,  12,  14,  19, 20, 22,  26,  28,  29,  31, 96, 100, 103, 121]
+    # include_channels: [  1,   2,   3,   9,  19,  29,  41,  44,  48,  51,  52,  53,  54,
+         # 62,  63,  66,  74,  82,  94,  95, 113]
+    # exclude_channels: []
+    exclude_channels: [  0,   1,   2,   3,   4,   5,   6,   7,   8,   9,  10,  11,  12,
+        13,  14,  15,  16,  17,  18,  19,  21,  22,  23,  24,  25,  26,  27,
+        28,  29,  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,
+        41,  42,  43,  44,  45,  46,  47,  48,  49,  50,  51,  52,  53,
+        54,  55,  56,  57,  58,  59,  60,  61,  62,  63,  64,  65,  66,
+        67,  68,  69,  70,  71,  72,  73,  74,  75,  76,  77,  78,  79,
+        80,  81,  82,  83,  84,  85,  86,  87,  88,  89,  90,  91,  92,
+        93,  94,  95,  96,  97,  98,  99, 100, 101, 102, 103, 104, 105,
+       106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+       119, 120, 121, 122, 123, 124, 125, 126, 127]
+       
+    # exclude_channels: range(32,96)
+    # exclude_channels: []
+    n_triggers: 2     # DO NOT CHANGE THIS
+    n_classes: 2
+    template: [10, 14, 18, 22, 26, 30, 34, 38]
+    trigger_pos: 'start'   # 'start' or 'stop' 
+
+    online: False        # will be overwritten by code, see bci.py
+    thr_prob: 0.8
+    thr_window: 40 #30    # number of samples for prob above threshold to trigger decision
+    break_loop: True      # True: move on as soon as decision is there, otherwise wait t_response time
+
+    # models to use for online decoding
+    path_model1: '/data/clinical/neural/fr/2019-06-26/model1_104948.pkl'   # scikit
+    path_model2: '/data/clinical/neural/fr/2019-06-26/model2_104948.pkl'   # explicit LDA
+    exclude_data_channels: []
+    n_neg_train: 100000
+    deadtime: 40
+
+
+
+    model_training:
+      save_model: False
+      model: 'scikit'  # eigen, scikit, explicit
+      solver: 'lsqr'    # svd, lsqr, eigen
+      cross_validation: True
+      n_splits:  5     # for cross-validation
+      test_size: .2     # float between (0,1) or int (absolute number of trials, >=n_classes)
+      reg_fact: 0.3       # regularization factor
+      fsel: False          # feature selection
+      triggers_plot: 3
+
+    peaks:          # these values are for offline training
+      # height: 0.9   # probability threshold
+      # width: 28      # min number of samples above threshold
+      distance: 40  # number of samples for peaks to be apart
+      sig: 'pred'   # 'prob', 'pred' -> signal based on probabilities or prediction class
+      prefilter: False
+
+    psth:
+      cut : [-40, 100]
+
+lfp:
+  fs: 1000               # sampling rate
+  sampling_ratio: 30
+  filter_fc_lb: [10, 0]    # cut-off frequencies for filter
+  filter_fc_mb: [12, 40]    # cut-off frequencies for filter
+  filter_fc_hb: [60, 250]    # cut-off frequencies for filter
+  filter_order_lb: 2     
+  filter_order_mb: 6     
+  filter_order_hb: 10     
+  artifact_thr: 400      # exclude data above this threshold
+  array1: range(32,64) #3 4 7 8 10 14 17 15 44
+  array21: range(2)
+  # array22: range(100,112)
+  array22: [] #range(96,128)
+  array1_exclude: []
+  array2_exclude: []
+  i_start: 0 #None            # import data from start index
+  i_stop:  -1 #600000 #None             # to stop index
+  psth_win: [-1000, 5000]
+  exclude: False
+  normalize: False
+  zscore: False
+  car: True
+  sub_band: 1
+  motor_mapping: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+
+  spectra:
+    spgr_len: 500
+  plot:
+    ch_ids: [0]     # relative id of imported channel
+    general: True
+    filters: False
+
+    
+cerebus:
+    instance: 0
+    buffer_reset: True
+    buffer_size_cont: 30001
+    buffer_size_comments: 500
+
+file_handling:
+  data_path: '/data/clinical/neural/fr/'
+  # data_path: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/'
+
+  results: '/data/clinical/nf/results/'
+  # results: '/home/vlachos/devel/vg/kiapvmdev/data/clinical/neural_new/results/'
+  # data_path: '/media/vlachos/kiap_backup/Recordings/K01/laptop/clinical/neural/'
+  # data_path: '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
+  save_data: True     # keep always True
+  mode: 'ab'   # ab: append binary, wb: write binary (will overwrite existing files)
+  git_hash: e5cf42b
+  filename_data: /data/clinical/neural/fr/2019-07-03/data_16_11_16.bin
+  filename_log_info: /data/clinical/neural/fr/2019-07-03/info_16_11_16.log
+  filename_events: /data/clinical/neural/fr/2019-07-03/events_16_11_16.txt
+
+speller:
+  type: 'feedback'    # exploration, question, training_color, color, feedback
+  audio: True
+  pyttsx_rate: 100
+  audio_result_fb: True
+
+feedback:
+  # normalized rate is multiplied by alpha, and baseline beta added.
+  
+  feedback_tone: True
+
+  alpha: 360    # scaling coefficient
+  beta: 120      # offset
+  tone_length: 0.25    # lenght of feedback tone in seconds
+  target_tone_length: 1.0    # length of feedback tone in seconds
+  
+  reward_on_target: true  # If target is reached, play reward tone and abort trial
+  target_n_tones: 5 # Play the target tone every n feedback tones
+  reward_sound: '/kiap/data/speller/feedback/kerching.wav'
+  hold_iterations: 4
+
+plot:
+  channels: [20, 99]   # channels for live plot, need to restart app if changed
+  fps: 10.       # frames per second
+  pca: False
+
+sim_data:
+  rate_bl: 10
+

+ 86 - 0
KIAP_BCI_neurofeedback/2019-07-03/paradigm_11_24_33.yaml

@@ -0,0 +1,86 @@
+
+# EXPLORATION
+exploration:
+  mode: ['Screening'] # Screening only
+  selected_mode: 0
+  # states: ['ruhe','ja','nein','kopf','fuss']
+  # states: ['Zunge', 'Schliesse_Hand', 'RechterDaumen', 'Oeffne_Hand', 'Fuss']
+  states: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+  # states: ['rechte_hand','linke_hand','rechter_daumen','linker_daumen','zunge','fuesse']
+  # states: ['SchliesseHand','BeugeRechtenMittelfinger', 'BeugeRechtenZeigefinger','BeugeRechtenDaumen','OeffneHand',
+  # 'StreckeRechtenMittelfinger','StreckeRechtenZeigefinger','StreckeRechtenDaumen']
+  selected_states: [0,1,2,3,4]
+  # selected_states: [0,1,2,3,4,5,6,7]
+  audio_path: '/kiap/data/speller/Audio/exploration'
+  number_of_stim:  2  # per state
+
+# QUESTION
+question:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 1
+  audio_path: '/kiap/data/speller/Audio/question'
+  number_of_stim: 10
+  symmetrical: True               # Same number of YES/NO stim 
+
+# FEEDBACK
+feedback:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 0
+  audio_path: '/kiap/data/speller/feedback/'
+  number_of_stim: 10
+  symmetrical: True
+  states:   # states are defined as a map from name to an array consisting of the actual target
+            # and the lower and upper bounds of the acceptance interval
+    baseline: [0.5, 0.4, 0.6]
+    up: [1.0, 0.75, 1.0]
+    down: [0.0, 0.0, 0.25]
+
+# TRAINING_COLOR
+training_color:
+  mode: ['Training'] # Training only
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  training_string: 'ICH BIN'
+  confirmation_yes: 1
+  confirmation_no: 1
+
+# COLOR
+color:
+  mode: ['Validation','Free'] # Free or Validation
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  max_length_vocabulary: 5
+  confirmation_methods: ['single','double_yes','best_of_three']
+  selected_confirmation_method: 0
+  # corpora_path: 'Corpora'
+  corpora_path: '/kiap/data/speller/Corpora'
+  audio_path: '/kiap/data/speller/Audio'
+  general_corpus: 'cfd_tiger.p'
+  # general_corpus: 'deu_mixed-typical_2011_1M-sentences.txt'
+  user_corpus: 'user.txt'
+  word_prediction: True
+  validation_string: 'ICH BIN FELIX.' # only for validation mode
+  
+# timing:
+#     t_baseline: 10                  # sec, trial baseline duration
+#     t_response: 1                  # sec, trial response duration
+#     decoder_refresh_interval: 0.01     # sec, for continuous decoding, the cycle time of the decoder
+#     bci_loop_interval: .05            # sec, step for bci thread loop
+#     recording_loop_interval: .05            # sec, step for bci thread loop
+
+variablesToShowInGUI:
+    variableGroups: ['timing','color']
+    variableNames: ['t_baseline','t_response','bci_loop_interval','selected_mode','max_length_vocabulary']
+    t: 1

+ 86 - 0
KIAP_BCI_neurofeedback/2019-07-03/paradigm_11_39_36.yaml

@@ -0,0 +1,86 @@
+
+# EXPLORATION
+exploration:
+  mode: ['Screening'] # Screening only
+  selected_mode: 0
+  # states: ['ruhe','ja','nein','kopf','fuss']
+  # states: ['Zunge', 'Schliesse_Hand', 'RechterDaumen', 'Oeffne_Hand', 'Fuss']
+  states: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+  # states: ['rechte_hand','linke_hand','rechter_daumen','linker_daumen','zunge','fuesse']
+  # states: ['SchliesseHand','BeugeRechtenMittelfinger', 'BeugeRechtenZeigefinger','BeugeRechtenDaumen','OeffneHand',
+  # 'StreckeRechtenMittelfinger','StreckeRechtenZeigefinger','StreckeRechtenDaumen']
+  selected_states: [0,1,2,3,4]
+  # selected_states: [0,1,2,3,4,5,6,7]
+  audio_path: '/kiap/data/speller/Audio/exploration'
+  number_of_stim:  2  # per state
+
+# QUESTION
+question:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 1
+  audio_path: '/kiap/data/speller/Audio/question'
+  number_of_stim: 10
+  symmetrical: True               # Same number of YES/NO stim 
+
+# FEEDBACK
+feedback:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 0
+  audio_path: '/kiap/data/speller/feedback/'
+  number_of_stim: 10
+  symmetrical: True
+  states:   # states are defined as a map from name to an array consisting of the actual target
+            # and the lower and upper bounds of the acceptance interval
+    baseline: [0.5, 0.4, 0.6]
+    up: [1.0, 0.75, 1.0]
+    down: [0.0, 0.0, 0.25]
+
+# TRAINING_COLOR
+training_color:
+  mode: ['Training'] # Training only
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  training_string: 'ICH BIN'
+  confirmation_yes: 1
+  confirmation_no: 1
+
+# COLOR
+color:
+  mode: ['Validation','Free'] # Free or Validation
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  max_length_vocabulary: 5
+  confirmation_methods: ['single','double_yes','best_of_three']
+  selected_confirmation_method: 0
+  # corpora_path: 'Corpora'
+  corpora_path: '/kiap/data/speller/Corpora'
+  audio_path: '/kiap/data/speller/Audio'
+  general_corpus: 'cfd_tiger.p'
+  # general_corpus: 'deu_mixed-typical_2011_1M-sentences.txt'
+  user_corpus: 'user.txt'
+  word_prediction: True
+  validation_string: 'ICH BIN FELIX.' # only for validation mode
+  
+# timing:
+#     t_baseline: 10                  # sec, trial baseline duration
+#     t_response: 1                  # sec, trial response duration
+#     decoder_refresh_interval: 0.01     # sec, for continuous decoding, the cycle time of the decoder
+#     bci_loop_interval: .05            # sec, step for bci thread loop
+#     recording_loop_interval: .05            # sec, step for bci thread loop
+
+variablesToShowInGUI:
+    variableGroups: ['timing','color']
+    variableNames: ['t_baseline','t_response','bci_loop_interval','selected_mode','max_length_vocabulary']
+    t: 1

+ 86 - 0
KIAP_BCI_neurofeedback/2019-07-03/paradigm_11_51_50.yaml

@@ -0,0 +1,86 @@
+
+# EXPLORATION
+exploration:
+  mode: ['Screening'] # Screening only
+  selected_mode: 0
+  # states: ['ruhe','ja','nein','kopf','fuss']
+  # states: ['Zunge', 'Schliesse_Hand', 'RechterDaumen', 'Oeffne_Hand', 'Fuss']
+  states: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+  # states: ['rechte_hand','linke_hand','rechter_daumen','linker_daumen','zunge','fuesse']
+  # states: ['SchliesseHand','BeugeRechtenMittelfinger', 'BeugeRechtenZeigefinger','BeugeRechtenDaumen','OeffneHand',
+  # 'StreckeRechtenMittelfinger','StreckeRechtenZeigefinger','StreckeRechtenDaumen']
+  selected_states: [0,1,2,3,4]
+  # selected_states: [0,1,2,3,4,5,6,7]
+  audio_path: '/kiap/data/speller/Audio/exploration'
+  number_of_stim:  2  # per state
+
+# QUESTION
+question:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 1
+  audio_path: '/kiap/data/speller/Audio/question'
+  number_of_stim: 10
+  symmetrical: True               # Same number of YES/NO stim 
+
+# FEEDBACK
+feedback:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 0
+  audio_path: '/kiap/data/speller/feedback/'
+  number_of_stim: 10
+  symmetrical: True
+  states:   # states are defined as a map from name to an array consisting of the actual target
+            # and the lower and upper bounds of the acceptance interval
+    baseline: [0.5, 0.4, 0.6]
+    up: [1.0, 0.75, 1.0]
+    down: [0.0, 0.0, 0.25]
+
+# TRAINING_COLOR
+training_color:
+  mode: ['Training'] # Training only
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  training_string: 'ICH BIN'
+  confirmation_yes: 1
+  confirmation_no: 1
+
+# COLOR
+color:
+  mode: ['Validation','Free'] # Free or Validation
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  max_length_vocabulary: 5
+  confirmation_methods: ['single','double_yes','best_of_three']
+  selected_confirmation_method: 0
+  # corpora_path: 'Corpora'
+  corpora_path: '/kiap/data/speller/Corpora'
+  audio_path: '/kiap/data/speller/Audio'
+  general_corpus: 'cfd_tiger.p'
+  # general_corpus: 'deu_mixed-typical_2011_1M-sentences.txt'
+  user_corpus: 'user.txt'
+  word_prediction: True
+  validation_string: 'ICH BIN FELIX.' # only for validation mode
+  
+# timing:
+#     t_baseline: 10                  # sec, trial baseline duration
+#     t_response: 1                  # sec, trial response duration
+#     decoder_refresh_interval: 0.01     # sec, for continuous decoding, the cycle time of the decoder
+#     bci_loop_interval: .05            # sec, step for bci thread loop
+#     recording_loop_interval: .05            # sec, step for bci thread loop
+
+variablesToShowInGUI:
+    variableGroups: ['timing','color']
+    variableNames: ['t_baseline','t_response','bci_loop_interval','selected_mode','max_length_vocabulary']
+    t: 1

+ 86 - 0
KIAP_BCI_neurofeedback/2019-07-03/paradigm_15_34_39.yaml

@@ -0,0 +1,86 @@
+
+# EXPLORATION
+exploration:
+  mode: ['Screening'] # Screening only
+  selected_mode: 0
+  # states: ['ruhe','ja','nein','kopf','fuss']
+  # states: ['Zunge', 'Schliesse_Hand', 'RechterDaumen', 'Oeffne_Hand', 'Fuss']
+  states: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+  # states: ['rechte_hand','linke_hand','rechter_daumen','linker_daumen','zunge','fuesse']
+  # states: ['SchliesseHand','BeugeRechtenMittelfinger', 'BeugeRechtenZeigefinger','BeugeRechtenDaumen','OeffneHand',
+  # 'StreckeRechtenMittelfinger','StreckeRechtenZeigefinger','StreckeRechtenDaumen']
+  selected_states: [0,1,2,3,4]
+  # selected_states: [0,1,2,3,4,5,6,7]
+  audio_path: '/kiap/data/speller/Audio/exploration'
+  number_of_stim:  2  # per state
+
+# QUESTION
+question:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 1
+  audio_path: '/kiap/data/speller/Audio/question'
+  number_of_stim: 10
+  symmetrical: True               # Same number of YES/NO stim 
+
+# FEEDBACK
+feedback:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 0
+  audio_path: '/kiap/data/speller/feedback/'
+  number_of_stim: 20
+  symmetrical: True
+  states:   # states are defined as a map from name to an array consisting of the actual target
+            # and the lower and upper bounds of the acceptance interval
+    #baseline: [0.5, 0.4, 0.6]
+    up: [1.0, 0.75, 1.0]
+    down: [0.0, 0.0, 0.25]
+
+# TRAINING_COLOR
+training_color:
+  mode: ['Training'] # Training only
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  training_string: 'ICH BIN'
+  confirmation_yes: 1
+  confirmation_no: 1
+
+# COLOR
+color:
+  mode: ['Validation','Free'] # Free or Validation
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  max_length_vocabulary: 5
+  confirmation_methods: ['single','double_yes','best_of_three']
+  selected_confirmation_method: 0
+  # corpora_path: 'Corpora'
+  corpora_path: '/kiap/data/speller/Corpora'
+  audio_path: '/kiap/data/speller/Audio'
+  general_corpus: 'cfd_tiger.p'
+  # general_corpus: 'deu_mixed-typical_2011_1M-sentences.txt'
+  user_corpus: 'user.txt'
+  word_prediction: True
+  validation_string: 'ICH BIN FELIX.' # only for validation mode
+  
+# timing:
+#     t_baseline: 10                  # sec, trial baseline duration
+#     t_response: 1                  # sec, trial response duration
+#     decoder_refresh_interval: 0.01     # sec, for continuous decoding, the cycle time of the decoder
+#     bci_loop_interval: .05            # sec, step for bci thread loop
+#     recording_loop_interval: .05            # sec, step for bci thread loop
+
+variablesToShowInGUI:
+    variableGroups: ['timing','color']
+    variableNames: ['t_baseline','t_response','bci_loop_interval','selected_mode','max_length_vocabulary']
+    t: 1

+ 86 - 0
KIAP_BCI_neurofeedback/2019-07-03/paradigm_15_46_20.yaml

@@ -0,0 +1,86 @@
+
+# EXPLORATION
+exploration:
+  mode: ['Screening'] # Screening only
+  selected_mode: 0
+  # states: ['ruhe','ja','nein','kopf','fuss']
+  # states: ['Zunge', 'Schliesse_Hand', 'RechterDaumen', 'Oeffne_Hand', 'Fuss']
+  states: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+  # states: ['rechte_hand','linke_hand','rechter_daumen','linker_daumen','zunge','fuesse']
+  # states: ['SchliesseHand','BeugeRechtenMittelfinger', 'BeugeRechtenZeigefinger','BeugeRechtenDaumen','OeffneHand',
+  # 'StreckeRechtenMittelfinger','StreckeRechtenZeigefinger','StreckeRechtenDaumen']
+  selected_states: [0,1,2,3,4]
+  # selected_states: [0,1,2,3,4,5,6,7]
+  audio_path: '/kiap/data/speller/Audio/exploration'
+  number_of_stim:  2  # per state
+
+# QUESTION
+question:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 1
+  audio_path: '/kiap/data/speller/Audio/question'
+  number_of_stim: 10
+  symmetrical: True               # Same number of YES/NO stim 
+
+# FEEDBACK
+feedback:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 0
+  audio_path: '/kiap/data/speller/feedback/'
+  number_of_stim: 20
+  symmetrical: True
+  states:   # states are defined as a map from name to an array consisting of the actual target
+            # and the lower and upper bounds of the acceptance interval
+    #baseline: [0.5, 0.4, 0.6]
+    up: [1.0, 0.75, 1.0]
+    down: [0.0, 0.0, 0.25]
+
+# TRAINING_COLOR
+training_color:
+  mode: ['Training'] # Training only
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  training_string: 'ICH BIN'
+  confirmation_yes: 1
+  confirmation_no: 1
+
+# COLOR
+color:
+  mode: ['Validation','Free'] # Free or Validation
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  max_length_vocabulary: 5
+  confirmation_methods: ['single','double_yes','best_of_three']
+  selected_confirmation_method: 0
+  # corpora_path: 'Corpora'
+  corpora_path: '/kiap/data/speller/Corpora'
+  audio_path: '/kiap/data/speller/Audio'
+  general_corpus: 'cfd_tiger.p'
+  # general_corpus: 'deu_mixed-typical_2011_1M-sentences.txt'
+  user_corpus: 'user.txt'
+  word_prediction: True
+  validation_string: 'ICH BIN FELIX.' # only for validation mode
+  
+# timing:
+#     t_baseline: 10                  # sec, trial baseline duration
+#     t_response: 1                  # sec, trial response duration
+#     decoder_refresh_interval: 0.01     # sec, for continuous decoding, the cycle time of the decoder
+#     bci_loop_interval: .05            # sec, step for bci thread loop
+#     recording_loop_interval: .05            # sec, step for bci thread loop
+
+variablesToShowInGUI:
+    variableGroups: ['timing','color']
+    variableNames: ['t_baseline','t_response','bci_loop_interval','selected_mode','max_length_vocabulary']
+    t: 1

+ 86 - 0
KIAP_BCI_neurofeedback/2019-07-03/paradigm_16_04_01.yaml

@@ -0,0 +1,86 @@
+
+# EXPLORATION
+exploration:
+  mode: ['Screening'] # Screening only
+  selected_mode: 0
+  # states: ['ruhe','ja','nein','kopf','fuss']
+  # states: ['Zunge', 'Schliesse_Hand', 'RechterDaumen', 'Oeffne_Hand', 'Fuss']
+  states: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+  # states: ['rechte_hand','linke_hand','rechter_daumen','linker_daumen','zunge','fuesse']
+  # states: ['SchliesseHand','BeugeRechtenMittelfinger', 'BeugeRechtenZeigefinger','BeugeRechtenDaumen','OeffneHand',
+  # 'StreckeRechtenMittelfinger','StreckeRechtenZeigefinger','StreckeRechtenDaumen']
+  selected_states: [0,1,2,3,4]
+  # selected_states: [0,1,2,3,4,5,6,7]
+  audio_path: '/kiap/data/speller/Audio/exploration'
+  number_of_stim:  2  # per state
+
+# QUESTION
+question:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 1
+  audio_path: '/kiap/data/speller/Audio/question'
+  number_of_stim: 10
+  symmetrical: True               # Same number of YES/NO stim 
+
+# FEEDBACK
+feedback:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 0
+  audio_path: '/kiap/data/speller/feedback/'
+  number_of_stim: 20
+  symmetrical: True
+  states:   # states are defined as a map from name to an array consisting of the actual target
+            # and the lower and upper bounds of the acceptance interval
+    #baseline: [0.5, 0.4, 0.6]
+    up: [1.0, 0.7, 1.0]
+    down: [0.0, 0.0, 0.3]
+
+# TRAINING_COLOR
+training_color:
+  mode: ['Training'] # Training only
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  training_string: 'ICH BIN'
+  confirmation_yes: 1
+  confirmation_no: 1
+
+# COLOR
+color:
+  mode: ['Validation','Free'] # Free or Validation
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  max_length_vocabulary: 5
+  confirmation_methods: ['single','double_yes','best_of_three']
+  selected_confirmation_method: 0
+  # corpora_path: 'Corpora'
+  corpora_path: '/kiap/data/speller/Corpora'
+  audio_path: '/kiap/data/speller/Audio'
+  general_corpus: 'cfd_tiger.p'
+  # general_corpus: 'deu_mixed-typical_2011_1M-sentences.txt'
+  user_corpus: 'user.txt'
+  word_prediction: True
+  validation_string: 'ICH BIN FELIX.' # only for validation mode
+  
+# timing:
+#     t_baseline: 10                  # sec, trial baseline duration
+#     t_response: 1                  # sec, trial response duration
+#     decoder_refresh_interval: 0.01     # sec, for continuous decoding, the cycle time of the decoder
+#     bci_loop_interval: .05            # sec, step for bci thread loop
+#     recording_loop_interval: .05            # sec, step for bci thread loop
+
+variablesToShowInGUI:
+    variableGroups: ['timing','color']
+    variableNames: ['t_baseline','t_response','bci_loop_interval','selected_mode','max_length_vocabulary']
+    t: 1

+ 86 - 0
KIAP_BCI_neurofeedback/2019-07-03/paradigm_16_11_16.yaml

@@ -0,0 +1,86 @@
+
+# EXPLORATION
+exploration:
+  mode: ['Screening'] # Screening only
+  selected_mode: 0
+  # states: ['ruhe','ja','nein','kopf','fuss']
+  # states: ['Zunge', 'Schliesse_Hand', 'RechterDaumen', 'Oeffne_Hand', 'Fuss']
+  states: ['Zunge', 'Schliesse_Hand', 'Oeffne_Hand', 'Bewege_Augen', 'Bewege_Kopf']
+  # states: ['rechte_hand','linke_hand','rechter_daumen','linker_daumen','zunge','fuesse']
+  # states: ['SchliesseHand','BeugeRechtenMittelfinger', 'BeugeRechtenZeigefinger','BeugeRechtenDaumen','OeffneHand',
+  # 'StreckeRechtenMittelfinger','StreckeRechtenZeigefinger','StreckeRechtenDaumen']
+  selected_states: [0,1,2,3,4]
+  # selected_states: [0,1,2,3,4,5,6,7]
+  audio_path: '/kiap/data/speller/Audio/exploration'
+  number_of_stim:  2  # per state
+
+# QUESTION
+question:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 1
+  audio_path: '/kiap/data/speller/Audio/question'
+  number_of_stim: 10
+  symmetrical: True               # Same number of YES/NO stim 
+
+# FEEDBACK
+feedback:
+  mode: ['Training','Validation'] # Training or Validation
+  selected_mode: 0
+  audio_path: '/kiap/data/speller/feedback/'
+  number_of_stim: 20
+  symmetrical: True
+  states:   # states are defined as a map from name to an array consisting of the actual target
+            # and the lower and upper bounds of the acceptance interval
+    #baseline: [0.5, 0.4, 0.6]
+    up: [1.0, 0.75, 1.0]
+    down: [0.0, 0.0, 0.25]
+
+# TRAINING_COLOR
+training_color:
+  mode: ['Training'] # Training only
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  training_string: 'ICH BIN'
+  confirmation_yes: 1
+  confirmation_no: 1
+
+# COLOR
+color:
+  mode: ['Validation','Free'] # Free or Validation
+  selected_mode: 0
+  states: [
+          ['gelb', ['E', 'A', 'D', 'C', 'B', 'F', '^']],
+          ['gruen',['N', 'S', 'R', 'O', 'M', 'P', 'Q', '^']],
+          ['rot',['I', 'H', 'L', 'G', 'K', 'J', '^']],
+          ['blau',['T', 'U', 'W', 'Z', 'V', 'Y', 'X', '^']],
+          ['weiss',['<', ' ', '?', 'word', 'end', '^']]
+          ]
+  max_length_vocabulary: 5
+  confirmation_methods: ['single','double_yes','best_of_three']
+  selected_confirmation_method: 0
+  # corpora_path: 'Corpora'
+  corpora_path: '/kiap/data/speller/Corpora'
+  audio_path: '/kiap/data/speller/Audio'
+  general_corpus: 'cfd_tiger.p'
+  # general_corpus: 'deu_mixed-typical_2011_1M-sentences.txt'
+  user_corpus: 'user.txt'
+  word_prediction: True
+  validation_string: 'ICH BIN FELIX.' # only for validation mode
+  
+# timing:
+#     t_baseline: 10                  # sec, trial baseline duration
+#     t_response: 1                  # sec, trial response duration
+#     decoder_refresh_interval: 0.01     # sec, for continuous decoding, the cycle time of the decoder
+#     bci_loop_interval: .05            # sec, step for bci thread loop
+#     recording_loop_interval: .05            # sec, step for bci thread loop
+
+variablesToShowInGUI:
+    variableGroups: ['timing','color']
+    variableNames: ['t_baseline','t_response','bci_loop_interval','selected_mode','max_length_vocabulary']
+    t: 1