123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682 |
- '''
- description: offline analysis of kiap data
- author: Ioannis Vlachos
- date: 02.04.2019
- Copyright (c) 2019 Ioannis Vlachos.
- All rights reserved.'''
- import re
- import os
- import glob
- import importlib
- import urllib
- import time
- import numpy as np
- import matplotlib.pyplot as plt
- import matplotlib as mpl
- from scipy import signal
- from scipy import stats
- from cachetools import cached, LRUCache, TTLCache
- from neo.io.blackrockio import BlackrockIO
- import aux
- from aux import log
- from analytics import analytics1
- importlib.reload(analytics1)
- # @cached(cache={})
- def read_data():
- '''returns first array1 and then array2 channels'''
- data = reader.get_analogsignal_chunk(i_stop=eval(str(params.lfp.i_stop)),
- channel_indexes=list(set(params.lfp.array1) | set(params.lfp.array2)))
- return data
- def get_triggers(n_triggers=10, verbose=True, speller='question'):
- '''get triggers from corresponding nev files'''
- try:
- triggers_ts = [tt[0] for tt in reader.nev_data['Comments'][0]]
- triggers_txt = [tt[5].decode('utf-8') for tt in reader.nev_data['Comments'][0]]
- except:
- log.error('Triggers could not be loaded. Does Nev file exist?')
- return None, None
- if verbose:
- print(reader.nev_data['Comments'])
- triggers_yes = []
- triggers_no = []
- if speller == 'question':
- class1 = 'yes'
- class2 = 'no'
- elif speller == 'feedback':
- class1 = 'up'
- class2 = 'down'
- for ii in range(len(triggers_txt)):
- if f'{class1}, response, start' in triggers_txt[ii]:
- triggers_yes.append(triggers_ts[ii])
- elif f'{class2}, response, start' in triggers_txt[ii]:
- triggers_no.append(triggers_ts[ii])
- triggers_yes = np.array(triggers_yes)
- triggers_no = np.array(triggers_no)
- # get timestamps for ns2
- triggers_yes_ns2 = triggers_yes // params.lfp.sampling_ratio
- triggers_no_ns2 = triggers_no // params.lfp.sampling_ratio
- return triggers_no_ns2[:n_triggers], triggers_yes_ns2[:n_triggers]
- def get_valid_trials(triggers_no_ns2, triggers_yes_ns2):
- trials_msk = np.ones((10, 2), dtype=bool)
- for ii, tt in enumerate(triggers_no_ns2):
- tmp = data1[tt + psth_win[0]:tt + psth_win[1], :]
- if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
- trials_msk[ii, 0] = False
- for ii, tt in enumerate(triggers_yes_ns2):
- tmp = data1[tt + psth_win[0]:tt + psth_win[1], :]
- if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
- trials_msk[ii, 1] = False
- print(ii)
- print('\nTrials excluded', np.where(trials_msk[:, 0] == False)[0], np.where(trials_msk[:, 1] == False)[0])
- return trials_msk
- def calc_cc():
- '''calculate time resolved correlation coefficients betweech channels of each array'''
-
- cc_step = 5000
- rng = range(0, data1.shape[0], cc_step)
- cc1 = np.zeros((len(rng), 3))
- cc2 = np.zeros((len(rng), 3))
- hist1 = np.zeros((len(rng), 100))
- hist2 = np.zeros((len(rng), 100))
- for ii, idx in enumerate(rng):
- # cc1[ii, 0] = np.min(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
- # cc1[ii, 1] = np.max(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
- # cc1[ii, 2] = np.mean(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
- cc1[ii, 0] = np.min(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
- cc1[ii, 1] = np.max(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
- cc1[ii, 2] = np.mean(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
- # hist1[ii, :] = np.histogram(np.triu(np.corrcoef(data0[idx:idx + cc_step, array1].T)).flatten(), 100)[1][:-1]
- # cc2[ii, 0] = np.min(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
- # cc2[ii, 1] = np.max(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
- # cc2[ii, 2] = np.mean(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
- cc2[ii, 0] = np.min(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
- cc2[ii, 1] = np.max(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
- cc2[ii, 2] = np.mean(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
- # hist2[ii, :] = np.histogram(np.triu(np.corrcoef(data0[idx:idx + cc_step, array2].T)), 100)[1][:-1]
- # print(ii)
- if params.lfp.plot.general:
- plt.figure()
- plt.plot(cc1)
- plt.gca().set_prop_cycle(None)
- plt.plot(c2, '-.')
- plt.show()
- return None
- def compute_psth(triggers_yes_ns2, triggers_no_ns2, arr_nr=1, sub_band=-1):
- '''compute the psth for no and yes questions'''
- psth_len = np.diff(psth_win)[0]
- if arr_nr == 1:
- ch_nr = ch_nr1
- if sub_band == -1:
- data_tmp = data01 # raw data
- else:
- data_tmp = data1[:, :, sub_band] # sub_band
- elif arr_nr == 2:
- ch_nr = ch_nr2
- if sub_band == -1:
- data_tmp = data02 # raw data
- else:
- data_tmp = data2[:, :, sub_band] # sub_band
- psth_no = np.zeros((len(triggers_no_ns2), psth_len, ch_nr))
- psth_yes = np.zeros((len(triggers_yes_ns2), psth_len, ch_nr))
- for ii in range(len(triggers_no_ns2)):
- if (triggers_no_ns2[ii] + psth_win[0] > 0) & (triggers_no_ns2[ii] + psth_win[1] < len(data2)):
- tmp = data_tmp[(triggers_no_ns2[ii] + psth_win[0]):(triggers_no_ns2[ii] + psth_win[1]), :]
- # if not (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
- psth_no[ii, :, :] = tmp
- # else:
- # print(f'Excluded no-trial: {ii}')
- for ii in range(len(triggers_yes_ns2)):
- if (triggers_yes_ns2[ii] + psth_win[0] > 0) & (triggers_yes_ns2[ii] + psth_win[1] < len(data2)):
- tmp = data_tmp[(triggers_yes_ns2[ii] + psth_win[0]):(triggers_yes_ns2[ii] + psth_win[1]), :]
- # if not (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
- psth_yes[ii, :, :] = tmp
- # else:
- # print(f'Excluded yes-trial: {ii}')
- psth_yes, psth_no = remove_trials(psth_yes, psth_no)
- return psth_no, psth_yes
- def remove_channels(data01, data02):
- '''remove channels for which variance lies outside 25-75 percentiles'''
- var1 = np.var(data01, axis=0)
- var2 = np.var(data02, axis=0)
- L1 = np.percentile(var1, 25, axis=0, keepdims=True)
- L2 = np.percentile(var2, 25, axis=0, keepdims=True)
- U1 = np.percentile(var1, 75, axis=0, keepdims=True)
- U2 = np.percentile(var2, 75, axis=0, keepdims=True)
- w = 1 # manual parameter that affects the valid range
- valid1 = [L1 - w * (U1 - L1), U1 + w * (U1 - L1)]
- valid2 = [L2 - w * (U2 - L2), U2 + w * (U2 - L2)]
- # plt.plot(var1, 'C0')
- # plt.plot(var2, 'C1')
- # array1_msk = (np.where(var1 < valid1[0]) and np.where(var1 > valid1[1]))[0]
- # array1_msk = (np.where(var1 < valid1[0]) and np.where(var1 > valid1[1]))[0]
- array1_msk = np.unique(((var1 < valid1[0]) | (var1 > valid1[1])).nonzero()[0])
- array2_msk = np.unique(((var2 < valid2[0]) | (var2 > valid2[1])).nonzero()[0])
- array1_msk = np.hstack((array1_msk, params.lfp.array1_exclude)).astype(int)
- array2_msk = np.hstack((array2_msk, params.lfp.array2_exclude)).astype(int)
- array1_msk.sort()
- array2_msk.sort()
- print(f'excluded channels, array1: {array1_msk}')
- print(f'excluded channels, array2: {array2_msk}')
- plot_channels(data01, [], arr_id=1, fs=params.lfp.fs, array_msk=array1_msk, i_stop=1000, step=1, color='C0')
- plot_channels(data02, [], arr_id=2, fs=params.lfp.fs, array_msk=array2_msk, i_stop=1000, step=1, color='C1')
- data01 = np.delete(data01, array1_msk, axis=1)
- data02 = np.delete(data02, array2_msk, axis=1)
- return data01, data02, array1_msk, array2_msk
- def remove_trials(psth_yes, psth_no):
- '''remove trials for which variance lies outside 25-75 percentiles'''
- var1 = np.var(psth_yes, axis=1)
- var2 = np.var(psth_no, axis=1)
- L1 = np.percentile(var1, 25, axis=0, keepdims=True)
- L2 = np.percentile(var2, 25, axis=0, keepdims=True)
- U1 = np.percentile(var1, 75, axis=0, keepdims=True)
- U2 = np.percentile(var2, 75, axis=0, keepdims=True)
- w = 10
- valid1 = [L1 - w * (U1 - L1), U1 + w * (U1 - L1)]
- valid2 = [L2 - w * (U2 - L2), U2 + w * (U2 - L2)]
- # array1_msk = np.unique((np.where(var1 < valid1[0])[0] or np.where(var1 > valid1[1])[0]))
- # array2_msk = np.unique((np.where(var2 < valid2[0])[0] or np.where(var2 > valid2[1])[0]))
- array1_msk = ((var1 < valid1[0]) | (var1 > valid1[1])).nonzero()[0]
- array2_msk = ((var2 < valid2[0]) | (var2 > valid2[1])).nonzero()[0]
- print(f'excluded yes-trials: {array1_msk}')
- print(f'excluded no-trials: {array2_msk}')
- psth_yes = np.delete(psth_yes, array1_msk, axis=0)
- psth_no = np.delete(psth_no, array2_msk, axis=0)
- return psth_yes, psth_no
- def plot_channels(data, ch_ids, fs, array_msk=[], arr_id=1, i_start=0, i_stop=10000, step=5, color='C0'):
- '''plot all data from array1 or array2 in the time domain'''
- if not params.lfp.zscore: # z-score only for visualization
- data = stats.zscore(data)
- if ch_ids == []:
- ch_ids = range(data.shape[1])
- if i_stop == -1:
- i_stop = data.shape[0]
- offset = np.cumsum(4 * np.var(data[:, ch_ids], axis=0)[:, np.newaxis]).T
- plt.figure()
- plt.plot(np.arange(i_start, i_stop) / fs, data[i_start:i_stop, ch_ids] + offset, color=color, lw=1)
- if array_msk.size > 0: # highlight excluded channels
- plt.plot(np.arange(i_start, i_stop) / fs, data[i_start:i_stop, array_msk] + offset[array_msk], color='C3', lw=1)
- plt.xlabel('Time (sec)')
- plt.yticks(offset[::step], range(0, len(ch_ids), step))
- plt.ylim(0, offset[-1] + 4)
- plt.title(f'raw data from array {arr_id}')
- plt.tight_layout()
- return None
- def plot_spectra(ch_ids=[0]):
- '''plot spectra of raw data and sub_bands, single channels from ch_ids and averages'''
- mpl.rcParams['axes.formatter.limits'] = (-2, 2)
- yy = data01.std() * 2
- xx = np.arange(data1.shape[0]) / params.lfp.fs # plot raw and filtered traces for visual inspection
- xmax = 150
- plt.figure(1, figsize=(10, 10))
- plt.clf()
- # column 1: first array
- plt.subplot(521) # plot raw
- plt.plot(ff1, Y01[:, ch_ids[0]], 'C0', label=f'channel: {array1[ch_ids[0]]}')
- plt.plot(ff1, Y01.mean(1), 'C3', alpha=0.2, label='average')
- plt.xlim(xmin=0, xmax=xmax)
- plt.title('array1 raw')
- # plt.legend()
- plt.gca().set_xticklabels([])
- plt.legend()
-
- plt.subplot(523)
- plt.plot(ff1, Y1[:, ch_ids[0], 0], 'C0', label='array1')
- plt.plot(ff1, Y1[:, :, 0].mean(1), 'C3', alpha=0.2)
- # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_lb[1] * 2)
- plt.xlim(xmin=0, xmax=xmax)
- plt.gca().set_xticklabels([])
- plt.title('sub_band 0')
-
- plt.subplot(525)
- plt.plot(ff1, Y1[:, ch_ids[0], 1], 'C0', label='array1')
- plt.plot(ff1, Y1[:, :, 1].mean(1), 'C3', alpha=0.2)
- # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_mb[1] * 2)
- plt.xlim(xmin=0, xmax=xmax)
- plt.gca().set_xticklabels([])
- plt.title('sub_band 1')
- plt.subplot(527)
- plt.plot(ff1, Y1[:, ch_ids[0], 2], 'C0', label='array1')
- plt.plot(ff1, Y1[:, :, 2].mean(1), 'C3', alpha=0.2)
- # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_hb[1] * 2)
- plt.xlim(xmin=0, xmax=xmax)
- plt.title('sub_band 2')
- plt.subplot(529)
- plt.loglog(ff1, Y01[:, ch_ids[0]], 'C0')
- plt.loglog(ff1, Y01.mean(1), 'C3', alpha=0.2)
- plt.title('array1 raw loglog')
- plt.xlabel('Frequency (Hz)')
- # plt.legend()
- # column 2: second array
- plt.subplot(522)
- plt.plot(ff02, Y02[:, ch_ids[0]], 'C0', label=f'channel: {array2[ch_ids[0]]}')
- plt.plot(ff02, Y02.mean(1), 'C3', alpha=0.2, label='average')
- plt.xlim(xmin=0, xmax=xmax)
- # plt.gca().set_yticklabels([])
- plt.title('array2 raw')
- # xxx
- # plt.legend()
- plt.gca().set_xticklabels([])
- plt.legend()
-
- plt.subplot(524)
- plt.plot(ff2, Y2[:, ch_ids[0], 0], 'C0', label='array1')
- plt.plot(ff2, Y2[:, :, 0].mean(1), 'C3', alpha=0.2)
- # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_lb[1] * 2)
- plt.xlim(xmin=0, xmax=xmax)
- plt.gca().set_xticklabels([])
- plt.title('sub_band 0')
-
- plt.subplot(526)
- plt.plot(ff2, Y2[:, ch_ids[0], 1], 'C0', label='array1')
- plt.plot(ff2, Y2[:, :, 1].mean(1), 'C3', alpha=0.2)
- # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_mb[1] * 2)
- plt.xlim(xmin=0, xmax=xmax)
- plt.gca().set_xticklabels([])
- # plt.gca().set_yticklabels([])
- plt.title('sub_band 1')
- plt.subplot(528)
- plt.plot(ff2, Y2[:, ch_ids[0], 2], 'C0', label='array1')
- plt.plot(ff2, Y2[:, :, 2].mean(1), 'C3', alpha=0.2)
- # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_hb[1] * 2)
- plt.xlim(xmin=0, xmax=xmax)
- plt.title('sub_band 2')
- # plt.gca().set_yticklabels([])
- plt.subplot(5, 2, 10)
- plt.loglog(ff01, Y02[:, ch_ids[0]], 'C0')
- plt.loglog(ff01, Y02.mean(1), 'C3', alpha=0.2)
- # plt.gca().set_yticklabels([])
- plt.title('raw loglog')
- plt.xlabel('Frequency (Hz)')
- plt.tight_layout()
- plt.draw()
- plt.show()
- return None
- def plot_spectra2(arr_nr=1):
- '''plot spectra of raw data for all channels of arr_nr, compare with spectrum of white noise'''
- mpl.rcParams['axes.formatter.limits'] = (-2, 2)
- if arr_nr == 1:
- YY = Y01
- d0,d1 = data01.shape
- elif arr_nr == 2:
- YY = Y02
- d0,d1 = data02.shape
- # Y_GWN, _ = analytics1.calc_fft(np.random.randn(d0, d1), params.lfp.fs, i_stop=1e5, axis=0)
- plt.figure(1, figsize=(10, 10))
- plt.clf()
- for ii in range(YY.shape[1]):
- ax = plt.subplot(8, 8, ii + 1)
- # plt.semilogy(ff, Y[:, ii], 'C0')
- plt.semilogy(ff, Y[:, ii, 0], 'C0')
- # plt.semilogy(ff1, Y_GWN[:, ii], 'C1', alpha=0.5)
- plt.ylim(10e-2, 10e4)
- plt.xlim(0, 30)
- # if ii<56:
- if (ii // 8) < (d1 // 8):
- ax.set_xticks([])
- if np.mod(ii, 8) != 0:
- ax.set_yticks([])
- plt.draw()
- return None
- def plot_psth(ff=[], sub_band=0, ch_id=0, trial_id=0, step=10):
- ''' plot psth for no and yes responses separately'''
- yy = max(data01.std(), data1[:, ch_id, sub_band].std()) * 2
- xx1 = np.arange(data1.shape[0]) / params.lfp.fs
- xx2 = np.arange(psth_win[0], psth_win[1]) / params.lfp.fs
- f_idx = ff < 150
- plt.figure(figsize=(10, 10))
- plt.clf()
- log.info(f'Selected sub-band: {sub_band}')
- plt.subplot(411)
- plt.plot(xx1[::step], data01[::step, ch_id], label='raw ns2') # plot raw
- plt.plot(xx1[::step], data1[::step, ch_id, sub_band], 'C1', alpha=0.5, label=f'sub-band: {sub_band}') # plot sub_band (low, middle, high)
- plt.stem(triggers_no_ns2 / params.lfp.fs, triggers_no_ns2 * 0 + yy * 0.8, markerfmt='C3o', label='no') # red
- plt.stem(triggers_yes_ns2 / params.lfp.fs, triggers_yes_ns2 * 0 + yy * 0.8, markerfmt='C2o', label='yes') # green
- plt.ylim(-yy, yy)
- plt.xlabel('sec')
- plt.legend(loc=1)
- plt.subplot(423) # plot psth no-questions
- plt.plot(xx2, psth_no.mean(0)[:, ch_id], label='trial av. no') # average accross trials, plot 1 channel
- plt.plot(xx2, psth_no.mean(0).mean(1), 'C3', alpha=0.4, label='trial-ch av. no') # average accross trials and channels
- plt.xlabel('sec')
- plt.legend()
- plt.subplot(424) # plot psth yes-questions
- plt.plot(xx2, psth_yes.mean(0)[:, ch_id], label='trial av. yes') # average accross trials, plot 1 channel
- plt.plot(xx2, psth_yes.mean(0).mean(1), 'C3', alpha=0.4, label='trial-ch av. yes') # average accross trials and channels
- plt.xlabel('sec')
- plt.legend()
- plt.subplot(425) # plot spectrogram no-question, single channel and trial
- # plt.pcolormesh(tt, ff[f_idx], Sxx1[f_idx, ch_id, :])
- if params.lfp.normalize:
- plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :] / Sxx1_norm[f_idx, :, ch_id])
- else:
- plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :])
- vmax = max(Sxx1[trial_id, f_idx, ch_id, :].max(), Sxx2[trial_id, f_idx, ch_id, :].max())
- plt.clim(vmax=vmax)
- plt.xlabel('sec')
- plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
- plt.subplot(426) # plot spectrogram yes-question, single channel and trial
- if params.lfp.normalize:
- plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :] / Sxx2_norm[f_idx, :, ch_id])
- else:
- plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :])
- plt.clim(vmax=vmax)
- plt.xlabel('sec')
- plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
- plt.subplot(427) # plot spectrogram no-question, averaged
- # plt.pcolormesh(tt, ff[f_idx], Sxx1[f_idx, ch_id, :])
- if params.lfp.normalize:
- plt.pcolormesh(tt, ff[f_idx], Sxx1[:, f_idx, ch_id, :].mean(0) / Sxx1_norm[f_idx, :, ch_id])
- else:
- plt.pcolormesh(tt, ff[f_idx], Sxx1[:, f_idx, ch_id, :].mean(0))
- vmax = max(Sxx1[trial_id, f_idx, ch_id, :].max(), Sxx2[trial_id, f_idx, ch_id, :].max())
- plt.clim(vmax=vmax)
- plt.xlabel('sec')
- plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
- plt.subplot(428) # plot spectrogram yes-question, averaged
- if params.lfp.normalize:
- plt.pcolormesh(tt, ff[f_idx], Sxx2[:, f_idx, ch_id, :].mean(0) / Sxx2_norm[f_idx, :, ch_id])
- else:
- plt.pcolormesh(tt, ff[f_idx], Sxx2[:, f_idx, ch_id, :].mean(0))
- plt.clim(vmax=vmax)
- plt.xlabel('sec')
- plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
- plt.tight_layout()
- plt.show()
- return None
- params = aux.load_config(force_reload=True)
- # params.lfp.array1 = np.delete(params.lfp.array1, params.lfp.array1_exclude)
- # params.lfp.array2 = np.delete(params.lfp.array2, params.lfp.array2_exclude)
- psth_win = params.lfp.psth_win
- # file_name = '/home/vlachos/data_kiap/20190321-135311/20190321-135311-001'
- # path = '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
- # path = '/kiap/data/clinical/neural_new/nsx/'
- path = '/media/vlachos/bck_disk1/kiap/recordings/20190705/20190705-143437/'
- # file_name = path + '20190326-160445-001.ns2'
- file_name = path + '20190705-143437-001.ns2'
- array1 = params.lfp.array1
- array2 = params.lfp.array2
- if not 'data0' in locals(): # load data if not already loaded by previous call
- # READ DATA
- # ----------------------------------
- reader = BlackrockIO(filename=file_name)
- data0 = read_data() # (samples, channels)
- data0 = data0.copy().astype(np.float64)
- # IMPORT TRIGGERS FROM NEV FILE
- # ----------------------------------
- triggers_no_ns2, triggers_yes_ns2 = get_triggers(n_triggers=20, speller=params.speller.type)
- trials_msk = get_valid_trials(triggers_no_ns2, triggers_yes_ns2)
- # data1[data1 > params.lfp.artifact_thr] = 0
- # data1[data1 < -params.lfp.artifact_thr] = 0
- # data0 = data0 - np.mean(data0, 1)[:, np.newaxis]
- reader.nev_data['Comments'][0]
- # Z-SCORE DATA
- # ----------------------------------
- if params.lfp.zscore:
- data0 = stats.zscore(data0)
- # SEPARATE DATA INTO CHANNELS FROM ARRAY-1 AND ARRAY-2
- # ----------------------------------
- data01 = data0[:, :len(array1)] # array-1 electrodes
- data02 = data0[:, len(array1):len(array1) + len(array2)] # array-2 electrodes
- # EXCLUDE CHANNELS IF VARIANCE IS OUTSIDE PERCENTILES
- # ----------------------------------------------------------------------
- if params.lfp.exclude:
- data01, data02, array1_msk, array2_msk = remove_channels(data01, data02)
- ch_nr1 = len(params.lfp.array1) - len(array1_msk)
- ch_nr2 = len(params.lfp.array2) - len(array2_msk)
- else:
- ch_nr1 = len(params.lfp.array1)
- ch_nr2 = len(params.lfp.array2)
- # PERFORM COMMON AVERAGE REFERENCING
- # ----------------------------------------------------------------------
- if params.lfp.car:
- data01 = data01 - np.repeat(np.mean(data01, axis=1)[:, np.newaxis], data01.shape[1], 1)
- data02 = data02 - np.repeat(np.mean(data02, axis=1)[:, np.newaxis], data02.shape[1], 1)
- # DEVIDE DATA INTO 3 SUB-BANDS
- # ----------------------------------
- data1 = np.zeros((data01.shape[0], data01.shape[1], 3)) # (samples, channels, 3)
- data2 = np.zeros((data02.shape[0], data02.shape[1], 3)) # (samples, channels, 3)
- # Y1 = np.zeros((data01.shape[0], data01.shape[1], 3))
- # FILTER IN THE THREE DIFFERENT BANDS
- # ----------------------------------
- data1[:, :, 0], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_lb, params.lfp.fs, params.lfp.filter_order_lb, plot=params.lfp.plot.filters)
- data1[:, :, 1], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_mb, params.lfp.fs, params.lfp.filter_order_mb, plot=params.lfp.plot.filters)
- data1[:, :, 2], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_hb, params.lfp.fs, params.lfp.filter_order_hb, plot=params.lfp.plot.filters)
- data2[:, :, 0], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_lb, params.lfp.fs, params.lfp.filter_order_lb, plot=params.lfp.plot.filters)
- data2[:, :, 1], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_mb, params.lfp.fs, params.lfp.filter_order_mb, plot=params.lfp.plot.filters)
- data2[:, :, 2], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_hb, params.lfp.fs, params.lfp.filter_order_hb, plot=params.lfp.plot.filters)
- # PERFORM STFT USING FFT
- # ----------------------------------
- Y01, ff01 = analytics1.calc_fft(data01, params.lfp.fs, i_stop=1e5, axis=0)
- Y02, ff02 = analytics1.calc_fft(data02, params.lfp.fs, i_stop=1e5, axis=0)
- Y1, ff1 = analytics1.calc_fft(data1, params.lfp.fs, i_stop=1e5, axis=0)
- Y2, ff2 = analytics1.calc_fft(data2, params.lfp.fs, i_stop=1e5, axis=0)
- nperseg = 10000
- # ff, tt, S = signal.spectrogram(data2[:, :, 0], params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- ff, Y = signal.welch(data2, fs=params.lfp.fs, axis=0, nperseg=10000)
- # fidx = (ff > 60) & (ff < 140)
- fidx = (ff > 0) & (ff < 20)
- # tidx = tt < 1000
- plt.figure(1)
- plt.clf()
- # plt.pcolormesh(tt, ff[fidx], np.log10(S[fidx, :, :].mean(1)))
- # plt.plot(ff, np.log10(Y[:, :, 2]), 'C0')
- # plt.pcolormesh(ff, range(Y.shape[1]), np.log10(Y[:, :, 2].T))
- # plt.pcolormesh(ff, tt, np.log10(S[:, 4, :].T))
- # plt.colorbar()
- # plt.xlim(0,20)
- plt.plot(ff, np.log10(Y[:, :, 1]), 'C1')
- plt.plot(ff, np.log10(Y[:, :, 2]), 'C2')
- # plt.xlim(0, 100)
- plt.draw()
- plt.show()
- # COMPUTE PSTH
- # ----------------------------------
- # psth shape: (trials, time, channels)
- offset = 600 * params.lfp.fs # offset in sec to get data for normalization
- triggers_yes_norm = np.hstack((triggers_yes_ns2 + offset, triggers_yes_ns2 + offset + 10))
- triggers_no_norm = np.hstack((triggers_yes_ns2 + offset, triggers_yes_ns2 + offset + 10))
- if np.any(triggers_yes_norm > data01.shape[1]):
- log.error('triggers_norm outside imported data')
- psth_no, psth_yes = compute_psth(triggers_yes_ns2, triggers_no_ns2, arr_nr=1, sub_band=params.lfp.sub_band)
- psth_norm1, psth_norm2 = compute_psth(triggers_yes_norm, triggers_no_norm, arr_nr=1, sub_band=params.lfp.sub_band)
- # COMPUTE PSTH SPECTROGRAMS
- # ----------------------------------
- nperseg = params.lfp.spectra.spgr_len
- nperseg = min(nperseg, psth_no.shape[1])
- # ff, tt, Sxx1 = signal.spectrogram(psth_no.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- # ff, tt, Sxx2 = signal.spectrogram(psth_yes.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- # Sxx1.shape = (trials, ff, channels, time)
- # Sxx1_norm.shape = (ff,time, channels)
- ff, tt, Sxx1 = signal.spectrogram(psth_no, params.lfp.fs, axis=1, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- ff, tt, Sxx2 = signal.spectrogram(psth_yes, params.lfp.fs, axis=1, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- ff, tt, Sxx1_norm = signal.spectrogram(psth_norm1.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- ff, tt, Sxx2_norm = signal.spectrogram(psth_norm2.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- # Sxx1 = Sxx1.mean(1)
- # Sxx2 = Sxx2.mean(1)
- Sxx1_norm = Sxx1_norm.mean(2)
- Sxx2_norm = Sxx2_norm.mean(2)
- tt = tt + psth_win[0] / params.lfp.fs
- t_idx = tt < 1
- Sxx1_norm = Sxx1_norm[:, np.newaxis].repeat(len(tt), 1)
- Sxx2_norm = Sxx2_norm[:, np.newaxis].repeat(len(tt), 1)
- # if params.lfp.normalize:
- # Sxx1 = Sxx1 / Sxx1_norm
- # Sxx2 = Sxx2 / Sxx2_norm
- # 11. PLOT RESULTS
- # ----------------------------------
- if params.lfp.plot.general:
- # plot_spectra()
- plot_psth(ff=ff, sub_band=params.lfp.sub_band, ch_id=0, trial_id=0, step=100)
- pass
- # xxx
- plt.ioff()
- # for trial_id in range(4, 5):
- # for ch_id in range(ch_nr1 - 1, ch_nr1):
-
- # plot_psth(ff=ff, sub_band=0, ch_id=ch_id, trial_id=0, step=100)
- # fig2 = plt.gcf()
- # fname = path + f'results/trial_{trial_id}_ch_{ch_id}.png'
- # # fig2.savefig(fname)
- # print(ch_id)
|