''' description: offline analysis of kiap data author: Ioannis Vlachos date: 02.04.2019 Copyright (c) 2019 Ioannis Vlachos. All rights reserved.''' import re import os import glob import importlib import urllib import time import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from scipy import signal from scipy import stats from cachetools import cached, LRUCache, TTLCache from neo.io.blackrockio import BlackrockIO import aux from aux import log from analytics import analytics1 importlib.reload(analytics1) # @cached(cache={}) def read_data(): '''returns first array1 and then array2 channels''' data = reader.get_analogsignal_chunk(i_stop=eval(str(params.lfp.i_stop)), channel_indexes=list(set(params.lfp.array1) | set(params.lfp.array2))) return data def get_triggers(n_triggers=10, verbose=True): '''get triggers from corresponding nev files''' try: triggers_ts = [tt[0] for tt in reader.nev_data['Comments'][0]] triggers_txt = [tt[5].decode('utf-8') for tt in reader.nev_data['Comments'][0]] except: log.error('Triggers could not be loaded. Does Nev file exist?') return None, None if verbose: print(reader.nev_data['Comments']) triggers= [list() for ii in range(8)] for ii in range(len(triggers_txt)): if 'SchliesseHand, response, start' in triggers_txt[ii]: triggers[0].append(triggers_ts[ii]// params.lfp.sampling_ratio) elif 'BeugeRechtenMittelfinger, response, start' in triggers_txt[ii]: triggers[1].append(triggers_ts[ii]// params.lfp.sampling_ratio) elif 'BeugeRechtenZeigefinger, response, start' in triggers_txt[ii]: triggers[2].append(triggers_ts[ii]// params.lfp.sampling_ratio) elif 'BeugeRechtenDaumen, response, start' in triggers_txt[ii]: triggers[3].append(triggers_ts[ii]// params.lfp.sampling_ratio) elif 'OeffneHand, response, start' in triggers_txt[ii]: triggers[4].append(triggers_ts[ii]// params.lfp.sampling_ratio) elif 'StreckeRechtenMittelfinger, response, start' in triggers_txt[ii]: triggers[5].append(triggers_ts[ii]// params.lfp.sampling_ratio) elif 'StreckeRechtenZeigefinger, response, start' in triggers_txt[ii]: triggers[6].append(triggers_ts[ii]// params.lfp.sampling_ratio) elif 'StreckeRechtenDaumen, response, start' in triggers_txt[ii]: triggers[7].append(triggers_ts[ii]// params.lfp.sampling_ratio) for ii in range(8): triggers[ii] = np.array(triggers[ii]) # triggers = np.array(triggers_yes) # triggers_no = np.array(triggers_no) # get timestamps for ns2 # triggers = triggers_yes // params.lfp.sampling_ratio # triggers_no_ns2 = triggers_no // params.lfp.sampling_ratio return triggers def get_valid_trials(triggers_no_ns2, triggers_yes_ns2): trials_msk = np.ones((10, 2), dtype=bool) for ii, tt in enumerate(triggers_no_ns2): tmp = data1[tt + psth_win[0]:tt + psth_win[1], :] if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)): trials_msk[ii, 0] = False for ii, tt in enumerate(triggers_yes_ns2): tmp = data1[tt + psth_win[0]:tt + psth_win[1], :] if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)): trials_msk[ii, 1] = False print(ii) print('\nTrials excluded', np.where(trials_msk[:, 0] == False)[0], np.where(trials_msk[:, 1] == False)[0]) return trials_msk def calc_cc(): '''calculate time resolved correlation coefficients betweech channels of each array''' cc_step = 5000 rng = range(0, data1.shape[0], cc_step) cc1 = np.zeros((len(rng), 3)) cc2 = np.zeros((len(rng), 3)) hist1 = np.zeros((len(rng), 100)) hist2 = np.zeros((len(rng), 100)) for ii, idx in enumerate(rng): # cc1[ii, 0] = np.min(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))) # cc1[ii, 1] = np.max(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))) # cc1[ii, 2] = np.mean(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))) cc1[ii, 0] = np.min(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)) cc1[ii, 1] = np.max(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)) cc1[ii, 2] = np.mean(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)) # hist1[ii, :] = np.histogram(np.triu(np.corrcoef(data0[idx:idx + cc_step, array1].T)).flatten(), 100)[1][:-1] # cc2[ii, 0] = np.min(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))) # cc2[ii, 1] = np.max(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))) # cc2[ii, 2] = np.mean(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))) cc2[ii, 0] = np.min(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)) cc2[ii, 1] = np.max(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)) cc2[ii, 2] = np.mean(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)) # hist2[ii, :] = np.histogram(np.triu(np.corrcoef(data0[idx:idx + cc_step, array2].T)), 100)[1][:-1] # print(ii) if params.lfp.plot.general: plt.figure() plt.plot(cc1) plt.gca().set_prop_cycle(None) plt.plot(c2, '-.') plt.show() return None def compute_psth(triggers, arr_nr=1, sub_band=-1): '''compute the psth for no and yes questions''' psth_len = np.diff(psth_win)[0] if arr_nr == 1: ch_nr = ch_nr1 if sub_band == -1: data_tmp = data01 # raw data else: data_tmp = data1[:, :, sub_band] # sub_band elif arr_nr == 2: ch_nr = ch_nr2 if sub_band == -1: data_tmp = data02 # raw data else: data_tmp = data2[:, :, sub_band] # sub_band psth = [list() for ii in range(8)] for ii in range(8): psth[ii] = np.zeros((len(triggers[ii]), psth_len, ch_nr)) for jj in range(8): for ii in range(len(triggers[jj])): if (triggers[jj][ii] + psth_win[0] > 0) & (triggers[jj][ii] + psth_win[1] < len(data2)): tmp = data_tmp[(triggers[jj][ii] + psth_win[0]):(triggers[jj][ii] + psth_win[1]), :] psth[jj][ii, :, :] = tmp # psth_yes, psth_no = remove_trials(psth_yes, psth_no) return psth def remove_channels(data01, data02): '''remove channels for which variance lies outside 25-75 percentiles''' var1 = np.var(data01, axis=0) var2 = np.var(data02, axis=0) L1 = np.percentile(var1, 25, axis=0, keepdims=True) L2 = np.percentile(var2, 25, axis=0, keepdims=True) U1 = np.percentile(var1, 75, axis=0, keepdims=True) U2 = np.percentile(var2, 75, axis=0, keepdims=True) w = 1 # manual parameter that affects the valid range valid1 = [L1 - w * (U1 - L1), U1 + w * (U1 - L1)] valid2 = [L2 - w * (U2 - L2), U2 + w * (U2 - L2)] # plt.plot(var1, 'C0') # plt.plot(var2, 'C1') # array1_msk = (np.where(var1 < valid1[0]) and np.where(var1 > valid1[1]))[0] # array1_msk = (np.where(var1 < valid1[0]) and np.where(var1 > valid1[1]))[0] array1_msk = np.unique(((var1 < valid1[0]) | (var1 > valid1[1])).nonzero()[0]) array2_msk = np.unique(((var2 < valid2[0]) | (var2 > valid2[1])).nonzero()[0]) array1_msk = np.hstack((array1_msk, params.lfp.array1_exclude)).astype(int) array2_msk = np.hstack((array2_msk, params.lfp.array2_exclude)).astype(int) array1_msk.sort() array2_msk.sort() print(f'excluded channels, array1: {array1_msk}') print(f'excluded channels, array2: {array2_msk}') plot_channels(data01, [], arr_id=1, fs=params.lfp.fs, array_msk=array1_msk, i_stop=1000, step=1, color='C0') plot_channels(data02, [], arr_id=2, fs=params.lfp.fs, array_msk=array2_msk, i_stop=1000, step=1, color='C1') data01 = np.delete(data01, array1_msk, axis=1) data02 = np.delete(data02, array2_msk, axis=1) return data01, data02, array1_msk, array2_msk def remove_trials(psth_yes, psth_no): '''remove trials for which variance lies outside 25-75 percentiles''' var1 = np.var(psth_yes, axis=1) var2 = np.var(psth_no, axis=1) L1 = np.percentile(var1, 25, axis=0, keepdims=True) L2 = np.percentile(var2, 25, axis=0, keepdims=True) U1 = np.percentile(var1, 75, axis=0, keepdims=True) U2 = np.percentile(var2, 75, axis=0, keepdims=True) w = 10 valid1 = [L1 - w * (U1 - L1), U1 + w * (U1 - L1)] valid2 = [L2 - w * (U2 - L2), U2 + w * (U2 - L2)] # array1_msk = np.unique((np.where(var1 < valid1[0])[0] or np.where(var1 > valid1[1])[0])) # array2_msk = np.unique((np.where(var2 < valid2[0])[0] or np.where(var2 > valid2[1])[0])) array1_msk = ((var1 < valid1[0]) | (var1 > valid1[1])).nonzero()[0] array2_msk = ((var2 < valid2[0]) | (var2 > valid2[1])).nonzero()[0] print(f'excluded yes-trials: {array1_msk}') print(f'excluded no-trials: {array2_msk}') psth_yes = np.delete(psth_yes, array1_msk, axis=0) psth_no = np.delete(psth_no, array2_msk, axis=0) return psth_yes, psth_no def plot_channels(data, ch_ids, fs, array_msk=[], arr_id=1, i_start=0, i_stop=10000, step=5, color='C0'): '''plot all data from array1 or array2 in the time domain''' if not params.lfp.zscore: # z-score only for visualization data = stats.zscore(data) if ch_ids == []: ch_ids = range(data.shape[1]) if i_stop == -1: i_stop = data.shape[0] offset = np.cumsum(4 * np.var(data[:, ch_ids], axis=0)[:, np.newaxis]).T plt.figure() plt.plot(np.arange(i_start, i_stop) / fs, data[i_start:i_stop, ch_ids] + offset, color=color, lw=1) if array_msk.size > 0: # highlight excluded channels plt.plot(np.arange(i_start, i_stop) / fs, data[i_start:i_stop, array_msk] + offset[array_msk], color='C3', lw=1) plt.xlabel('Time (sec)') plt.yticks(offset[::step], range(0, len(ch_ids), step)) plt.ylim(0, offset[-1] + 4) plt.title(f'raw data from array {arr_id}') plt.tight_layout() return None def plot_spectra(ch_ids=[0]): '''plot spectra of raw data and sub_bands, single channels from ch_ids and averages''' mpl.rcParams['axes.formatter.limits'] = (-2, 2) yy = data01.std() * 2 xx = np.arange(data1.shape[0]) / params.lfp.fs # plot raw and filtered traces for visual inspection xmax = 150 plt.figure(1, figsize=(10, 10)) plt.clf() # column 1: first array plt.subplot(521) # plot raw plt.plot(ff1, Y01[:, ch_ids[0]], 'C0', label=f'channel: {array1[ch_ids[0]]}') plt.plot(ff1, Y01.mean(1), 'C3', alpha=0.2, label='average') plt.xlim(xmin=0, xmax=xmax) plt.title('array1 raw') # plt.legend() plt.gca().set_xticklabels([]) plt.legend() plt.subplot(523) plt.plot(ff1, Y1[:, ch_ids[0], 0], 'C0', label='array1') plt.plot(ff1, Y1[:, :, 0].mean(1), 'C3', alpha=0.2) # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_lb[1] * 2) plt.xlim(xmin=0, xmax=xmax) plt.gca().set_xticklabels([]) plt.title('sub_band 0') plt.subplot(525) plt.plot(ff1, Y1[:, ch_ids[0], 1], 'C0', label='array1') plt.plot(ff1, Y1[:, :, 1].mean(1), 'C3', alpha=0.2) # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_mb[1] * 2) plt.xlim(xmin=0, xmax=xmax) plt.gca().set_xticklabels([]) plt.title('sub_band 1') plt.subplot(527) plt.plot(ff1, Y1[:, ch_ids[0], 2], 'C0', label='array1') plt.plot(ff1, Y1[:, :, 2].mean(1), 'C3', alpha=0.2) # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_hb[1] * 2) plt.xlim(xmin=0, xmax=xmax) plt.title('sub_band 2') plt.subplot(529) plt.loglog(ff1, Y01[:, ch_ids[0]], 'C0') plt.loglog(ff1, Y01.mean(1), 'C3', alpha=0.2) plt.title('array1 raw loglog') plt.xlabel('Frequency (Hz)') # plt.legend() # column 2: second array plt.subplot(522) plt.plot(ff02, Y02[:, ch_ids[0]], 'C0', label=f'channel: {array2[ch_ids[0]]}') plt.plot(ff02, Y02.mean(1), 'C3', alpha=0.2, label='average') plt.xlim(xmin=0, xmax=xmax) # plt.gca().set_yticklabels([]) plt.title('array2 raw') # xxx # plt.legend() plt.gca().set_xticklabels([]) plt.legend() plt.subplot(524) plt.plot(ff2, Y2[:, ch_ids[0], 0], 'C0', label='array1') plt.plot(ff2, Y2[:, :, 0].mean(1), 'C3', alpha=0.2) # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_lb[1] * 2) plt.xlim(xmin=0, xmax=xmax) plt.gca().set_xticklabels([]) plt.title('sub_band 0') plt.subplot(526) plt.plot(ff2, Y2[:, ch_ids[0], 1], 'C0', label='array1') plt.plot(ff2, Y2[:, :, 1].mean(1), 'C3', alpha=0.2) # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_mb[1] * 2) plt.xlim(xmin=0, xmax=xmax) plt.gca().set_xticklabels([]) # plt.gca().set_yticklabels([]) plt.title('sub_band 1') plt.subplot(528) plt.plot(ff2, Y2[:, ch_ids[0], 2], 'C0', label='array1') plt.plot(ff2, Y2[:, :, 2].mean(1), 'C3', alpha=0.2) # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_hb[1] * 2) plt.xlim(xmin=0, xmax=xmax) plt.title('sub_band 2') # plt.gca().set_yticklabels([]) plt.subplot(5, 2, 10) plt.loglog(ff01, Y02[:, ch_ids[0]], 'C0') plt.loglog(ff01, Y02.mean(1), 'C3', alpha=0.2) # plt.gca().set_yticklabels([]) plt.title('raw loglog') plt.xlabel('Frequency (Hz)') plt.tight_layout() plt.draw() plt.show() return None def plot_spectra2(arr_nr=1): '''plot spectra of raw data for all channels of arr_nr, compare with spectrum of white noise''' mpl.rcParams['axes.formatter.limits'] = (-2, 2) if arr_nr == 1: YY = Y01 d0,d1 = data01.shape elif arr_nr == 2: YY = Y02 d0,d1 = data02.shape Y_GWN, _ = analytics1.calc_fft(np.random.randn(d0, d1), params.lfp.fs, i_stop=1e5, axis=0) plt.figure(1, figsize=(10, 10)) plt.clf() for ii in range(YY.shape[1]): ax = plt.subplot(8,8,ii+1) plt.loglog(ff1, YY[:, ii], 'C0') plt.loglog(ff1, Y_GWN[:, ii], 'C1', alpha=0.5) plt.ylim(10e-16, 10e2) # if ii<56: if (ii // 8) < (d1 // 8): ax.set_xticks([]) if np.mod(ii, 8) != 0: ax.set_yticks([]) plt.draw() return None def plot_psth(ff=[], sub_band=0, ch_id=0, trial_id=0, step=10): ''' plot psth for no and yes responses separately''' yy = max(data01.std(), data1[:, ch_id, sub_band].std()) * 2 xx1 = np.arange(data1.shape[0]) / params.lfp.fs xx2 = np.arange(psth_win[0], psth_win[1]) / params.lfp.fs f_idx = ff < 150 plt.figure(figsize=(10, 10)) plt.clf() log.info(f'Selected sub-band: {sub_band}') plt.subplot(411) plt.plot(xx1[::step], data01[::step, ch_id], label='raw ns2') # plot raw plt.plot(xx1[::step], data1[::step, ch_id, sub_band], 'C1', alpha=0.5, label=f'sub-band: {sub_band}') # plot sub_band (low, middle, high) for ii in range(8): plt.stem(triggers[ii] / params.lfp.fs, triggers[ii] * 0 + yy * 0.8, markerfmt='C3o') # red # plt.stem(triggers_yes_ns2 / params.lfp.fs, triggers_yes_ns2 * 0 + yy * 0.8, markerfmt='C2o', label='yes') # green plt.ylim(-yy, yy) plt.xlabel('sec') plt.legend(loc=1) plt.subplot(423) # plot psth no-questions plt.plot(xx2, psth_no.mean(0)[:, ch_id], label='trial av. no') # average accross trials, plot 1 channel plt.plot(xx2, psth_no.mean(0).mean(1), 'C3', alpha=0.4, label='trial-ch av. no') # average accross trials and channels plt.xlabel('sec') plt.legend() plt.subplot(424) # plot psth yes-questions plt.plot(xx2, psth_yes.mean(0)[:, ch_id], label='trial av. yes') # average accross trials, plot 1 channel plt.plot(xx2, psth_yes.mean(0).mean(1), 'C3', alpha=0.4, label='trial-ch av. yes') # average accross trials and channels plt.xlabel('sec') plt.legend() plt.subplot(425) # plot spectrogram no-question, single channel and trial # plt.pcolormesh(tt, ff[f_idx], Sxx1[f_idx, ch_id, :]) if params.lfp.normalize: plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :] / Sxx1_norm[f_idx, :, ch_id]) else: plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :]) vmax = max(Sxx1[trial_id, f_idx, ch_id, :].max(), Sxx2[trial_id, f_idx, ch_id, :].max()) plt.clim(vmax=vmax) plt.xlabel('sec') plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50) plt.subplot(426) # plot spectrogram yes-question, single channel and trial if params.lfp.normalize: plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :] / Sxx2_norm[f_idx, :, ch_id]) else: plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :]) plt.clim(vmax=vmax) plt.xlabel('sec') plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50) plt.subplot(427) # plot spectrogram no-question, averaged # plt.pcolormesh(tt, ff[f_idx], Sxx1[f_idx, ch_id, :]) if params.lfp.normalize: plt.pcolormesh(tt, ff[f_idx], Sxx1[:, f_idx, ch_id, :].mean(0) / Sxx1_norm[f_idx, :, ch_id]) else: plt.pcolormesh(tt, ff[f_idx], Sxx1[:, f_idx, ch_id, :].mean(0)) vmax = max(Sxx1[trial_id, f_idx, ch_id, :].max(), Sxx2[trial_id, f_idx, ch_id, :].max()) plt.clim(vmax=vmax) plt.xlabel('sec') plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50) plt.subplot(428) # plot spectrogram yes-question, averaged if params.lfp.normalize: plt.pcolormesh(tt, ff[f_idx], Sxx2[:, f_idx, ch_id, :].mean(0) / Sxx2_norm[f_idx, :, ch_id]) else: plt.pcolormesh(tt, ff[f_idx], Sxx2[:, f_idx, ch_id, :].mean(0)) plt.clim(vmax=vmax) plt.xlabel('sec') plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50) plt.tight_layout() plt.show() return None def plot_psth(ff=[], sub_band=0, ch_id=0, trial_id=0, step=10): ''' plot psth for no and yes responses separately''' xx1 = np.arange(data1.shape[0]) / params.lfp.fs xx2 = np.arange(psth_win[0], psth_win[1]) / params.lfp.fs f_idx = ff < 150 plt.figure(figsize=(10, 10)) plt.clf() for ii in range(8): plt.subplot(4,4,ii+1) plt.pcolormesh(tt, ff[f_idx], Sxx1[ii][trial_id, f_idx, ch_id, :]) if params.lfp.sub_band<1: fct = 10 if params.lfp.sub_band==1: fct = 10 elif params.lfp.sub_band==2: fct = 5 cmax = np.array([Sxx1[ii][:,f_idx, ch_id,:].mean() for ii in range(8)]).max()*fct for ii in range(8): plt.subplot(4,4,8+ii+1) plt.pcolormesh(tt, ff[f_idx], np.mean(Sxx1[ii][:, f_idx, ch_id, :], axis=0)) plt.clim(0,cmax) plt.tight_layout() # plt.show() return None params = aux.load_config() # params.lfp.array1 = np.delete(params.lfp.array1, params.lfp.array1_exclude) # params.lfp.array2 = np.delete(params.lfp.array2, params.lfp.array2_exclude) psth_win = params.lfp.psth_win # file_name = '/home/vlachos/data_kiap/20190321-135311/20190321-135311-001' # path = '/home/kiap/Documents/kiap/k01/20190414-123429/' # path = '/media/vlachos/kiap_backup/Recordings/K01/Recordings/20190415-145905/' path = '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/' # file_name = path + '20190415-145905-004.ns2' file_name = path + '20190326-160445-001.ns2' # file_name = '/media/vlachos/kiap_backup/Recordings/K01/bci_sessions/20190321-140130/20190321-140130-003' # file_name = path + '20190414-123429-001.ns2' array1 = params.lfp.array1 array2 = params.lfp.array2 if not 'data0' in locals(): # load data if not already loaded by previous call # READ DATA # ---------------------------------- reader = BlackrockIO(filename=file_name) data0 = read_data() # (samples, channels) data0 = data0.copy().astype(np.float64) # IMPORT TRIGGERS FROM NEV FILE # ---------------------------------- triggers = get_triggers(n_triggers=20) # trials_msk = get_valid_trials(triggers_no_ns2, triggers_yes_ns2) # data1[data1 > params.lfp.artifact_thr] = 0 # data1[data1 < -params.lfp.artifact_thr] = 0 # data0 = data0 - np.mean(data0, 1)[:, np.newaxis] # reader.nev_data['Comments'][0] # Z-SCORE DATA # ---------------------------------- if params.lfp.zscore: data0 = stats.zscore(data0) # SEPARATE DATA INTO CHANNELS FROM ARRAY-1 AND ARRAY-2 # ---------------------------------- data01 = data0[:, :len(array1)] # array-1 electrodes data02 = data0[:, len(array1):len(array1) + len(array2)] # array-2 electrodes # EXCLUDE CHANNELS IF VARIANCE IS OUTSIDE PERCENTILES # ---------------------------------------------------------------------- if params.lfp.exclude: data01, data02, array1_msk, array2_msk = remove_channels(data01, data02) ch_nr1 = len(params.lfp.array1) - len(array1_msk) ch_nr2 = len(params.lfp.array2) - len(array2_msk) else: ch_nr1 = len(params.lfp.array1) ch_nr2 = len(params.lfp.array2) # PERFORM COMMON AVERAGE REFERENCING # ---------------------------------------------------------------------- if params.lfp.car: data01 = data01 - np.repeat(np.mean(data01, axis=1)[:, np.newaxis], data01.shape[1], 1) data02 = data02 - np.repeat(np.mean(data02, axis=1)[:, np.newaxis], data02.shape[1], 1) # DEVIDE DATA INTO 3 SUB-BANDS # ---------------------------------- data1 = np.zeros((data01.shape[0], data01.shape[1], 3)) # (samples, channels, 3) data2 = np.zeros((data02.shape[0], data02.shape[1], 3)) # (samples, channels, 3) # Y1 = np.zeros((data01.shape[0], data01.shape[1], 3)) # FILTER IN THE THREE DIFFERENT BANDS # ---------------------------------- data1[:, :, 0], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_lb, params.lfp.fs, params.lfp.filter_order_lb, plot=params.lfp.plot.filters) data1[:, :, 1], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_mb, params.lfp.fs, params.lfp.filter_order_mb, plot=params.lfp.plot.filters) data1[:, :, 2], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_hb, params.lfp.fs, params.lfp.filter_order_hb, plot=params.lfp.plot.filters) data2[:, :, 0], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_lb, params.lfp.fs, params.lfp.filter_order_lb, plot=params.lfp.plot.filters) data2[:, :, 1], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_mb, params.lfp.fs, params.lfp.filter_order_mb, plot=params.lfp.plot.filters) data2[:, :, 2], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_hb, params.lfp.fs, params.lfp.filter_order_hb, plot=params.lfp.plot.filters) # PERFORM STFT USING FFT # ---------------------------------- Y01, ff01 = analytics1.calc_fft(data01, params.lfp.fs, i_stop=1e5, axis=0) Y02, ff02 = analytics1.calc_fft(data02, params.lfp.fs, i_stop=1e5, axis=0) Y1, ff1 = analytics1.calc_fft(data1, params.lfp.fs, i_stop=1e5, axis=0) Y2, ff2 = analytics1.calc_fft(data2, params.lfp.fs, i_stop=1e5, axis=0) # COMPUTE PSTH # ---------------------------------- # psth shape: (trials, time, channels) # offset = 600 * params.lfp.fs # offset in sec to get data for normalization # triggers_yes_norm = np.hstack((triggers_yes_ns2 + offset, triggers_yes_ns2 + offset + 10)) # triggers_no_norm = np.hstack((triggers_yes_ns2 + offset, triggers_yes_ns2 + offset + 10)) # if np.any(triggers_yes_norm > data01.shape[1]): # log.error('triggers_norm outside imported data') psth = compute_psth(triggers, arr_nr=1, sub_band=params.lfp.sub_band) # psth_norm1, psth_norm2 = compute_psth(triggers_yes_norm, triggers_no_norm, arr_nr=1, sub_band=params.lfp.sub_band) # COMPUTE PSTH SPECTROGRAMS # ---------------------------------- nperseg = params.lfp.spectra.spgr_len nperseg = min(nperseg, psth[0].shape[1]) # ff, tt, Sxx1 = signal.spectrogram(psth_no.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9)) # ff, tt, Sxx2 = signal.spectrogram(psth_yes.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9)) # Sxx1.shape = (trials, ff, channels, time) # Sxx1_norm.shape = (ff,time, channels) Sxx1 = [list() for ii in range(8)] for ii in range(8): ff, tt, Sxx1_tmp = signal.spectrogram(psth[ii], params.lfp.fs, axis=1, nperseg=nperseg, noverlap=int(nperseg * 0.9)) Sxx1[ii] = Sxx1_tmp # ff, tt, Sxx1_norm = signal.spectrogram(psth_norm1.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9)) # ff, tt, Sxx2_norm = signal.spectrogram(psth_norm2.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9)) # Sxx1 = Sxx1.mean(1) # Sxx2 = Sxx2.mean(1) # Sxx1_norm = Sxx1_norm.mean(2) # Sxx2_norm = Sxx2_norm.mean(2) tt = tt + psth_win[0] / params.lfp.fs t_idx = tt < 1 # Sxx1_norm = Sxx1_norm[:, np.newaxis].repeat(len(tt), 1) # Sxx2_norm = Sxx2_norm[:, np.newaxis].repeat(len(tt), 1) # if params.lfp.normalize: # Sxx1 = Sxx1 / Sxx1_norm # Sxx2 = Sxx2 / Sxx2_norm # 11. PLOT RESULTS # ---------------------------------- if params.lfp.plot.general: # plot_spectra() plot_psth(ff=ff, sub_band=params.lfp.sub_band, ch_id=0, trial_id=0, step=100) pass # xxx plt.ioff() for trial_id in range(0, 1): # for ch_id in range(ch_nr1 - 1, ch_nr1): for ch_id in range(ch_nr1): plot_psth(ff=ff, sub_band=params.lfp.sub_band, ch_id=ch_id, trial_id=0, step=100) fig2 = plt.gcf() fname = path + f'results/trial_{trial_id}_ch_{ch_id}_sband_{params.lfp.sub_band}.png' fig2.savefig(fname) print(ch_id)