|
@@ -0,0 +1,521 @@
|
|
|
+'''
|
|
|
+description: script to read header and data from data.bin
|
|
|
+author: Ioannis Vlachos
|
|
|
+date: 02.11.18
|
|
|
+
|
|
|
+Copyright (c) 2018 Ioannis Vlachos.
|
|
|
+All rights reserved.
|
|
|
+
|
|
|
+HEADER OF BINARY FILE
|
|
|
+---------------------
|
|
|
+laptop timestamp np.datetime64 bytes: 8
|
|
|
+NSP timestamp np.int64 bytes: 8
|
|
|
+number of bytes np.int64 bytes: 8
|
|
|
+number of samples np.int64 bytes: 8
|
|
|
+number of channels np.int64 bytes: 8
|
|
|
+
|
|
|
+'''
|
|
|
+
|
|
|
+import csv
|
|
|
+import datetime as dt
|
|
|
+import glob
|
|
|
+import os
|
|
|
+import sys
|
|
|
+
|
|
|
+import matplotlib.pyplot as plt
|
|
|
+import numpy as np
|
|
|
+from numpy import datetime64 as dt64
|
|
|
+from tabulate import tabulate
|
|
|
+# import munch
|
|
|
+
|
|
|
+from . import kaux as aux
|
|
|
+from .kaux import log
|
|
|
+
|
|
|
+# params = aux.load_config()
|
|
|
+
|
|
|
+
|
|
|
+# def get_event_name(filename, events_file_names)
|
|
|
+
|
|
|
+
|
|
|
+def get_raw(verbose=0, n_triggers=2, exploration=False, feedback=False, triggers_all=False, fids=[], trigger_pos='start'):
|
|
|
+
|
|
|
+ '''read raw data from one or more binary files
|
|
|
+ trigger_pos: ['start, 'stop']'''
|
|
|
+ params = aux.load_config()
|
|
|
+ n_channels = params.daq.n_channels
|
|
|
+
|
|
|
+
|
|
|
+ file_names = []
|
|
|
+ # events_file_names = []
|
|
|
+ print('Available binary data files:\n')
|
|
|
+ for ii, file_name in enumerate(sorted(glob.iglob(params.file_handling.data_path + '**/*.bin', recursive=True))):
|
|
|
+ print(f'{ii}, {file_name}, {os.path.getsize(file_name)//1000}K')
|
|
|
+ file_names.append(file_name)
|
|
|
+
|
|
|
+ # for ii, file_name in enumerate(sorted(glob.iglob(params.file_handling.data_path + '**/*.txt', recursive=True))):
|
|
|
+ # events_file_names.append(file_name)
|
|
|
+
|
|
|
+ if fids ==[]:
|
|
|
+ fids = [int(x) for x in input('\nSelect file ids (separated by space): ').split()] # select file ids
|
|
|
+ if fids == []:
|
|
|
+ fids = [len(file_names)-1]
|
|
|
+ print(f'Selected: {fids}')
|
|
|
+
|
|
|
+ data_tot = np.empty((len(fids), 1), dtype=object)
|
|
|
+ time_stamps_tot = np.empty((len(fids), 1), dtype=object)
|
|
|
+ if triggers_all:
|
|
|
+ triggers_tot = np.empty((len(fids), 6), dtype=object)
|
|
|
+ elif exploration:
|
|
|
+ triggers_tot = np.empty((len(fids), 1), dtype=object)
|
|
|
+ elif feedback:
|
|
|
+ triggers_tot = np.empty((len(fids), 2), dtype=object) # up and down
|
|
|
+ else:
|
|
|
+ triggers_tot = np.empty((len(fids), n_triggers), dtype=object)
|
|
|
+
|
|
|
+ ch_rec_list_tot = np.empty((len(fids), 1), dtype=object)
|
|
|
+
|
|
|
+ for ii, fid in enumerate(fids): # go through all sessions
|
|
|
+ data, time_stamps, ch_rec_list = get_session(file_names[fid], verbose=verbose)
|
|
|
+ data = np.delete(data, params.classifier.exclude_data_channels, axis=1)
|
|
|
+ # triggers = get_triggers(os.path.dirname(file_names[fid])+'/events.txt', time_stamps, n_triggers)
|
|
|
+ event_file_name = file_names[fid].replace('data_','events_').replace('.bin','.txt')
|
|
|
+ info_file_name = file_names[fid].replace('data_','info_').replace('.bin','.log')
|
|
|
+
|
|
|
+ if triggers_all:
|
|
|
+ triggers = get_triggers_all(event_file_name, time_stamps, trigger_pos)
|
|
|
+ elif exploration:
|
|
|
+ triggers = get_triggers_exploration(event_file_name, time_stamps)
|
|
|
+ elif feedback:
|
|
|
+ triggers = get_triggers_feedback(event_file_name, time_stamps, trigger_pos, n_triggers)
|
|
|
+ else:
|
|
|
+ triggers = get_triggers(event_file_name, time_stamps, trigger_pos, n_triggers)
|
|
|
+
|
|
|
+ # yes_mask, no_mask = get_accuracy(info_file_name,n_triggers)
|
|
|
+ # triggers[0] = np.where(yes_mask,triggers[0],'')
|
|
|
+ # triggers[1] = np.where(no_mask,triggers[1],'')
|
|
|
+
|
|
|
+ data_tot[ii, 0] = data
|
|
|
+ time_stamps_tot[ii, 0] = time_stamps
|
|
|
+ if exploration:
|
|
|
+ triggers_tot[ii,0] = triggers
|
|
|
+ else:
|
|
|
+ triggers_tot[ii, :] = triggers
|
|
|
+ ch_rec_list_tot[ii, 0] = ch_rec_list
|
|
|
+ # triggers_tot[ii,1] = triggers[1]
|
|
|
+ print(f'\nRead binary neural data from file {file_names[fid]}')
|
|
|
+ print(f'Read trigger info events from file {event_file_name}')
|
|
|
+ print(f'Session {ii}, data shape: {data.shape}')
|
|
|
+
|
|
|
+
|
|
|
+ config = read_config('paradigm.yaml')
|
|
|
+ states = config.exploration.states
|
|
|
+
|
|
|
+ for jj, _ in enumerate(fids):
|
|
|
+ print()
|
|
|
+ if triggers_all:
|
|
|
+ for ii in range(6):
|
|
|
+ print(f'Session {fids[jj]}, Class {ii}, trigger #: {triggers_tot[jj,ii].shape}')
|
|
|
+ elif exploration:
|
|
|
+ for ii in range(len(triggers_tot[jj,0][0])):
|
|
|
+ print(f'Session {fids[jj]}, State {states[ii]}, trigger #: {triggers_tot[jj,0][0][ii].shape}')
|
|
|
+ else:
|
|
|
+ for ii in range(n_triggers):
|
|
|
+ print(f'Session {fids[jj]}, Class {ii}, trigger #: {triggers_tot[jj,ii].shape}')
|
|
|
+
|
|
|
+ file_names = [file_names[ii] for ii in fids]
|
|
|
+ return data_tot, time_stamps_tot, triggers_tot, ch_rec_list_tot, file_names
|
|
|
+
|
|
|
+
|
|
|
+def get_session(file_name, verbose=0, t_lim_start=None, t_lim_end=None, params=None):
|
|
|
+
|
|
|
+ ii = 0
|
|
|
+ # data = np.empty((0, params.daq.n_channels-len(params.daq.exclude_channels)))
|
|
|
+ data = np.empty((0, params.daq.n_channels_max))
|
|
|
+
|
|
|
+ log.info(f'Data shape: {data.shape}')
|
|
|
+
|
|
|
+ time_stamps = []
|
|
|
+ date_times = []
|
|
|
+ time_stamps_rcv = []
|
|
|
+
|
|
|
+ with open(file_name, 'rb') as fh:
|
|
|
+ log.info(f'\nReading binary file {file_name}...\n')
|
|
|
+ while True:
|
|
|
+ tmp = np.frombuffer(fh.read(8), dtype='datetime64[us]') # laptop timestamp
|
|
|
+
|
|
|
+ if tmp.size == 0:
|
|
|
+ break
|
|
|
+ # return data, np.array(time_stamps, dtype=np.uint), ch_rec_list
|
|
|
+ else:
|
|
|
+ t_now1 = tmp
|
|
|
+
|
|
|
+ if (t_lim_end is not None) and (t_lim_end < t_now1[0]):
|
|
|
+ break
|
|
|
+
|
|
|
+ t_now2 = np.frombuffer(fh.read(8), dtype=np.int64)[0] # NSP timestamp
|
|
|
+
|
|
|
+ n_bytes = int.from_bytes(fh.read(8), byteorder='little') # number of bytes
|
|
|
+ n_samples = int.from_bytes(fh.read(8), byteorder='little') # number of samples
|
|
|
+ n_ch = int.from_bytes(fh.read(8), byteorder='little') # number of channels
|
|
|
+ ch_rec_list_len = int.from_bytes(fh.read(2), byteorder='little')
|
|
|
+ ch_rec_list = np.frombuffer(fh.read(ch_rec_list_len), dtype=np.uint16) # detailed channel list
|
|
|
+ # log.info(f'recorded channels: {ch_rec_list}')
|
|
|
+
|
|
|
+ d = fh.read(n_bytes)
|
|
|
+ d2 = np.frombuffer(d, dtype=np.float32)
|
|
|
+ d3 = d2.reshape(d2.size // n_ch, n_ch) # data, shape : (n_samples, n_ch)
|
|
|
+ log.info(f'data shape: {d3.shape}')
|
|
|
+
|
|
|
+ if data.size == 0 and data.shape[1] != d3.shape[1]:
|
|
|
+ log.warning(f'Shape mismatch. {d3.shape} vs {data.shape[1]}. Using data shape from file: {d3.shape}')
|
|
|
+ data = np.empty((0, d3.shape[1]))
|
|
|
+
|
|
|
+ # fct = params.daq.spike_rates.bin_width * 30000 # factor to get correct starting time in ticks
|
|
|
+ # time_stamps.extend(np.arange(t_now2-d3.shape[0]*fct + 1, t_now2+1)) # check if +1 index is correct
|
|
|
+ # time_stamps.extend(np.arange(t_now2-d3.shape[0]*fct + 1, t_now2+1, 3000)) # check if +1 index is correct
|
|
|
+ ts = np.frombuffer(fh.read(8 * d3.shape[0]), dtype=np.uint64)
|
|
|
+
|
|
|
+ if (t_lim_start is None) or (t_lim_start <= t_now1[0] - np.timedelta64(int((t_now2 - ts[0]) / 3e4), 's')):
|
|
|
+ data = np.concatenate((data, d3))
|
|
|
+ time_stamps.extend(ts)
|
|
|
+ date_times.append(t_now1[0])
|
|
|
+ time_stamps_rcv.append(t_now2)
|
|
|
+ else:
|
|
|
+ log.info(f"Skipped set {t_now1[0]} | {ts[0]} n_samples: {n_samples}")
|
|
|
+
|
|
|
+ # if ts.size[0] > 0:
|
|
|
+ log.info(f'ts size: {ts.size}')
|
|
|
+ # log.info(time_stamps)
|
|
|
+
|
|
|
+ # if verbose:
|
|
|
+ # print(ii, t_now1[0], t_now2, n_bytes, n_samples, n_ch, d3[10:20, 0], np.any(d3))
|
|
|
+ ii += 1
|
|
|
+
|
|
|
+ return data, np.array(time_stamps, dtype=np.uint), ch_rec_list
|
|
|
+ # return data, np.array(time_stamps, dtype=np.uint), ch_rec_list, np.array(date_times, dtype='datetime64[us]'), \
|
|
|
+ # np.array(time_stamps_rcv, dtype=np.uint64)
|
|
|
+
|
|
|
+def get_accuracy_question(fname, n_triggers=2):
|
|
|
+ with open(fname, 'r') as fh:
|
|
|
+ events = fh.read().splitlines()
|
|
|
+
|
|
|
+ cl1 = []
|
|
|
+ cl2 = []
|
|
|
+
|
|
|
+ for ev in events:
|
|
|
+ if 'Yes Question' in ev:
|
|
|
+ if 'Decoder decision: yes' in ev:
|
|
|
+ cl1.append(True)
|
|
|
+ else:
|
|
|
+ cl1.append(False)
|
|
|
+ elif 'No Question' in ev:
|
|
|
+ if 'Decoder decision: no' in ev:
|
|
|
+ cl2.append(True)
|
|
|
+ else:
|
|
|
+ cl2.append(False)
|
|
|
+
|
|
|
+ return [ind1, ind2]
|
|
|
+
|
|
|
+def get_triggers(fname, time_stamps, trigger_pos, n_triggers=2):
|
|
|
+ with open(fname, 'r') as fh:
|
|
|
+ # events = fh.readlines()
|
|
|
+ events = fh.read().splitlines()
|
|
|
+
|
|
|
+ cl1 = []
|
|
|
+ cl2 = []
|
|
|
+ cl3 = []
|
|
|
+ tt1 = []
|
|
|
+ tt2 = []
|
|
|
+ tt3 = []
|
|
|
+
|
|
|
+ for ev in events:
|
|
|
+ if 'response' in ev and 'yes' in ev and trigger_pos in ev:
|
|
|
+ cl1.append(int(ev.split(',')[0]))
|
|
|
+ elif 'response' in ev and 'no' in ev and trigger_pos in ev:
|
|
|
+ cl2.append(int(ev.split(',')[0]))
|
|
|
+ elif 'baseline' in ev and 'start'in ev:
|
|
|
+ cl3.append(int(ev.split(',')[0]))
|
|
|
+
|
|
|
+
|
|
|
+ if n_triggers == 2:
|
|
|
+ # cl2.extend(cl3) # add baseline to class 2
|
|
|
+ cl3 = []
|
|
|
+
|
|
|
+ for ev in events:
|
|
|
+ if 'response' in ev and trigger_pos in ev:
|
|
|
+ print(f'\033[91m{ev}\033[0m')
|
|
|
+ else:
|
|
|
+ print(ev)
|
|
|
+
|
|
|
+
|
|
|
+ for ii in cl1:
|
|
|
+ tt1.append(time_stamps.flat[np.abs(time_stamps - ii).argmin()])
|
|
|
+
|
|
|
+ for ii in cl2:
|
|
|
+ tt2.append(time_stamps.flat[np.abs(time_stamps - ii).argmin()])
|
|
|
+
|
|
|
+ for ii in cl3:
|
|
|
+ tt3.append(time_stamps.flat[np.abs(time_stamps - ii).argmin()])
|
|
|
+
|
|
|
+ ind1 = np.where(np.in1d(time_stamps, tt1))[0][np.newaxis, :]
|
|
|
+ ind2 = np.where(np.in1d(time_stamps, tt2))[0][np.newaxis, :]
|
|
|
+ ind3 = np.where(np.in1d(time_stamps, tt3))[0][np.newaxis, :]
|
|
|
+
|
|
|
+ print()
|
|
|
+ print(cl1, tt1, ind1)
|
|
|
+ print(cl2, tt2, ind2)
|
|
|
+ print(cl3, tt3, ind3)
|
|
|
+
|
|
|
+ if n_triggers == 1:
|
|
|
+ res = [ind1]
|
|
|
+ elif n_triggers == 2:
|
|
|
+ res = [ind1, ind2]
|
|
|
+ # res = [ind1, np.hstack((ind2, ind3))] # put no and baseline together
|
|
|
+ elif n_triggers == 3:
|
|
|
+ res = [ind1, ind2, ind3]
|
|
|
+
|
|
|
+ return res
|
|
|
+
|
|
|
+def get_triggers_feedback(fname, time_stamps, trigger_pos, n_triggers=2):
|
|
|
+ with open(fname, 'r') as fh:
|
|
|
+ # events = fh.readlines()
|
|
|
+ events = fh.read().splitlines()
|
|
|
+
|
|
|
+ cl1 = []
|
|
|
+ cl2 = []
|
|
|
+ cl3 = []
|
|
|
+ tt1 = []
|
|
|
+ tt2 = []
|
|
|
+ tt3 = []
|
|
|
+
|
|
|
+ for ev in events:
|
|
|
+ if 'response' in ev and 'down' in ev and trigger_pos in ev:
|
|
|
+ cl2.append(int(ev.split(',')[0]))
|
|
|
+ elif 'response' in ev and 'up' in ev and trigger_pos in ev:
|
|
|
+ cl1.append(int(ev.split(',')[0]))
|
|
|
+ elif 'baseline' in ev and 'start'in ev:
|
|
|
+ cl3.append(int(ev.split(',')[0]))
|
|
|
+
|
|
|
+ if n_triggers == 2:
|
|
|
+ # cl2.extend(cl3) # add baseline to class 2
|
|
|
+ cl3 = []
|
|
|
+
|
|
|
+ for ev in events:
|
|
|
+ if 'response' in ev and trigger_pos in ev:
|
|
|
+ print(f'\033[91m{ev}\033[0m')
|
|
|
+ else:
|
|
|
+ print(ev)
|
|
|
+
|
|
|
+
|
|
|
+ for ii in cl1:
|
|
|
+ tt1.append(time_stamps.flat[np.abs(time_stamps - ii).argmin()])
|
|
|
+
|
|
|
+ for ii in cl2:
|
|
|
+ tt2.append(time_stamps.flat[np.abs(time_stamps - ii).argmin()])
|
|
|
+
|
|
|
+ for ii in cl3:
|
|
|
+ tt3.append(time_stamps.flat[np.abs(time_stamps - ii).argmin()])
|
|
|
+
|
|
|
+ ind1 = np.where(np.in1d(time_stamps, tt1))[0][np.newaxis, :]
|
|
|
+ ind2 = np.where(np.in1d(time_stamps, tt2))[0][np.newaxis, :]
|
|
|
+ ind3 = np.where(np.in1d(time_stamps, tt3))[0][np.newaxis, :]
|
|
|
+
|
|
|
+ print()
|
|
|
+ print(cl1, tt1, ind1)
|
|
|
+ print(cl2, tt2, ind2)
|
|
|
+ print(cl3, tt3, ind3)
|
|
|
+
|
|
|
+ if n_triggers == 1:
|
|
|
+ res = [ind1]
|
|
|
+ elif n_triggers == 2:
|
|
|
+ res = [ind1, ind2]
|
|
|
+ # res = [ind1, np.hstack((ind2, ind3))] # put no and baseline together
|
|
|
+ elif n_triggers == 3:
|
|
|
+ res = [ind1, ind2, ind3]
|
|
|
+
|
|
|
+ return res
|
|
|
+
|
|
|
+def get_triggers_exploration(fname, time_stamps):
|
|
|
+ with open(fname, 'r') as fh:
|
|
|
+ # events = fh.readlines()
|
|
|
+ events = fh.read().splitlines()
|
|
|
+
|
|
|
+ config = read_config('paradigm.yaml')
|
|
|
+ states = config.exploration.states
|
|
|
+
|
|
|
+ cl1 = [[] for x in range(len(states))]
|
|
|
+ cl2 = []
|
|
|
+ # cl3 = []
|
|
|
+ tt1 = [[] for x in range(len(states))]
|
|
|
+ tt2 = []
|
|
|
+ # tt3 = []
|
|
|
+ for ev in events:
|
|
|
+ for ii,state in enumerate(states):
|
|
|
+ if 'response' in ev and state in ev and 'start' in ev:
|
|
|
+ cl1[ii].append(int(ev.split(',')[0]))
|
|
|
+ if 'baseline' in ev and 'start'in ev:
|
|
|
+ cl2.append(int(ev.split(',')[0]))
|
|
|
+
|
|
|
+
|
|
|
+ for ev in events:
|
|
|
+ if 'response' in ev and 'start' in ev:
|
|
|
+ print(f'\033[91m{ev}\033[0m')
|
|
|
+ else:
|
|
|
+ print(ev)
|
|
|
+
|
|
|
+
|
|
|
+ for ii in range(len(cl1)):
|
|
|
+ for jj in cl1[ii]:
|
|
|
+ tt1[ii].append(time_stamps.flat[np.abs(time_stamps - jj).argmin()])
|
|
|
+
|
|
|
+ for ii in cl2:
|
|
|
+ tt2.append(time_stamps.flat[np.abs(time_stamps - ii).argmin()])
|
|
|
+
|
|
|
+ ind1 = [[] for x in range(len(states))]
|
|
|
+ for ii in range(len(tt1)):
|
|
|
+ ind1[ii] = np.where(np.in1d(time_stamps, tt1[ii]))[0][np.newaxis, :]
|
|
|
+
|
|
|
+ ind2 = np.where(np.in1d(time_stamps, tt2))[0][np.newaxis, :]
|
|
|
+ # ind3 = np.where(np.in1d(time_stamps, tt3))[0][np.newaxis, :]
|
|
|
+
|
|
|
+ print()
|
|
|
+ print(cl1, tt1, ind1)
|
|
|
+ print(cl2, tt2, ind2)
|
|
|
+ # print(cl3, tt3, ind3)
|
|
|
+ # xx
|
|
|
+ res = [ind1,ind2]
|
|
|
+
|
|
|
+ return res
|
|
|
+
|
|
|
+
|
|
|
+def get_triggers_all(fname, time_stamps, trigger_pos, n_triggers=2):
|
|
|
+ with open(fname, 'r') as fh:
|
|
|
+ # events = fh.readlines()
|
|
|
+ events = fh.read().splitlines()
|
|
|
+
|
|
|
+ cl1 = []
|
|
|
+ cl2 = []
|
|
|
+ cl3 = []
|
|
|
+ cl4 = []
|
|
|
+ cl5 = []
|
|
|
+ cl6 = []
|
|
|
+ tt1 = []
|
|
|
+ tt2 = []
|
|
|
+ tt3 = []
|
|
|
+ tt4 = []
|
|
|
+ tt5 = []
|
|
|
+ tt6 = []
|
|
|
+
|
|
|
+ for ev in events:
|
|
|
+ if 'baseline' in ev and 'start'in ev:
|
|
|
+ cl1.append(int(ev.split(',')[0]))
|
|
|
+
|
|
|
+ elif 'baseline' in ev and 'stop'in ev:
|
|
|
+ cl2.append(int(ev.split(',')[0]))
|
|
|
+
|
|
|
+ elif 'stimulus' in ev and 'start'in ev:
|
|
|
+ cl3.append(int(ev.split(',')[0]))
|
|
|
+
|
|
|
+ elif 'stimulus' in ev and 'stop'in ev:
|
|
|
+ cl4.append(int(ev.split(',')[0]))
|
|
|
+
|
|
|
+ elif 'response' in ev and 'start' in ev:
|
|
|
+ cl5.append(int(ev.split(',')[0]))
|
|
|
+
|
|
|
+ elif 'response' in ev and 'stop' in ev:
|
|
|
+ cl6.append(int(ev.split(',')[0]))
|
|
|
+
|
|
|
+
|
|
|
+ for ev in events:
|
|
|
+ if 'response' in ev and trigger_pos in ev:
|
|
|
+ print(f'\033[91m{ev}\033[0m')
|
|
|
+ else:
|
|
|
+ print(ev)
|
|
|
+
|
|
|
+ for ii in cl1:
|
|
|
+ tt1.append(time_stamps.flat[np.abs(np.int64(time_stamps - ii)).argmin()])
|
|
|
+
|
|
|
+ for ii in cl2:
|
|
|
+ tt2.append(time_stamps.flat[np.abs(np.int64(time_stamps - ii)).argmin()])
|
|
|
+
|
|
|
+ for ii in cl3:
|
|
|
+ tt3.append(time_stamps.flat[np.abs(np.int64(time_stamps - ii)).argmin()])
|
|
|
+
|
|
|
+ for ii in cl4:
|
|
|
+ tt4.append(time_stamps.flat[np.abs(np.int64(time_stamps - ii)).argmin()])
|
|
|
+
|
|
|
+ for ii in cl5:
|
|
|
+ tt5.append(time_stamps.flat[np.abs(np.int64(time_stamps - ii)).argmin()])
|
|
|
+
|
|
|
+ for ii in cl6:
|
|
|
+ tt6.append(time_stamps.flat[np.abs(np.int64(time_stamps - ii)).argmin()])
|
|
|
+
|
|
|
+ ind1 = np.where(np.in1d(time_stamps, tt1))[0][np.newaxis, :]
|
|
|
+ ind2 = np.where(np.in1d(time_stamps, tt2))[0][np.newaxis, :]
|
|
|
+ ind3 = np.where(np.in1d(time_stamps, tt3))[0][np.newaxis, :]
|
|
|
+ ind4 = np.where(np.in1d(time_stamps, tt4))[0][np.newaxis, :]
|
|
|
+ ind5 = np.where(np.in1d(time_stamps, tt5))[0][np.newaxis, :]
|
|
|
+ ind6 = np.where(np.in1d(time_stamps, tt6))[0][np.newaxis, :]
|
|
|
+
|
|
|
+ print('\nTriggers and timestamps')
|
|
|
+ print(cl1, tt1, ind1)
|
|
|
+ print(cl2, tt2, ind2)
|
|
|
+ print(cl3, tt3, ind3)
|
|
|
+ print(cl4, tt4, ind4)
|
|
|
+ print(cl5, tt5, ind5)
|
|
|
+ print(cl6, tt6, ind6)
|
|
|
+
|
|
|
+ # if n_triggers == 1:
|
|
|
+ # res = [ind1]
|
|
|
+ # elif n_triggers == 2:
|
|
|
+ # res = [ind1, ind2]
|
|
|
+ # # res = [ind1, np.hstack((ind2, ind3))] # put no and baseline together
|
|
|
+ # elif n_triggers == 3:
|
|
|
+ # res = [ind1, ind2, ind3]
|
|
|
+
|
|
|
+ res = [ind1, ind2, ind3, ind4, ind5, ind6]
|
|
|
+
|
|
|
+ return res
|
|
|
+
|
|
|
+def read_config(file_name):
|
|
|
+ try:
|
|
|
+ with open(file_name) as stream:
|
|
|
+ config = munch.fromYAML(stream)
|
|
|
+ return config
|
|
|
+ except Exception as e:
|
|
|
+ raise e
|
|
|
+
|
|
|
+
|
|
|
+if __name__ == '__main__':
|
|
|
+
|
|
|
+ print("\nto read binary data use: 'data = get_raw(verbose=1)'")
|
|
|
+ print("\nto read log file use: 'log = read_log(date)'")
|
|
|
+
|
|
|
+
|
|
|
+ if aux.args.speller == 'exploration':
|
|
|
+ exploration = True
|
|
|
+ else:
|
|
|
+ exploration = False
|
|
|
+ if aux.args.speller == 'feedback':
|
|
|
+ feedback = True
|
|
|
+ else:
|
|
|
+ feedback = False
|
|
|
+
|
|
|
+ col = ['b', 'r', 'g']
|
|
|
+ data_tot, tt, triggers_tot, ch_rec_list, file_names = get_raw(n_triggers=params.classifier.n_classes, exploration=exploration, feedback=feedback)
|
|
|
+
|
|
|
+ if not exploration:
|
|
|
+ plt.figure(1)
|
|
|
+ plt.clf()
|
|
|
+ xx = np.arange(data_tot[0, 0].shape[0]) * 0.05
|
|
|
+
|
|
|
+ for cl_id in range(triggers_tot.shape[1]):
|
|
|
+ markerline, stemlines, baseline = plt.stem(triggers_tot[0, cl_id][0]*0.05, triggers_tot[0, cl_id][0] * 0 + 50, '--', basefmt=' ')
|
|
|
+ plt.setp(stemlines, alpha=0.8, color=col[cl_id], lw=1)
|
|
|
+ plt.setp(markerline, marker=None)
|
|
|
+
|
|
|
+ plt.gca().set_prop_cycle(None)
|
|
|
+ plt.plot(xx, data_tot[0, 0][:, :2], alpha=0.5)
|
|
|
+ # plt.plot(cur_data[0, 0][:, :2])
|
|
|
+
|
|
|
+ plt.show()
|