123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391 |
- '''
- description: offline analysis of kiap data
- author: Ioannis Vlachos
- date: 02.04.2019
- Copyright (c) 2019 Ioannis Vlachos.
- All rights reserved.'''
- import re
- import os
- import glob
- import importlib
- import urllib
- import time
- import numpy as np
- import matplotlib.pyplot as plt
- import matplotlib as mpl
- from scipy import signal
- from scipy import stats
- from sklearn.decomposition.pca import PCA
- from cachetools import cached, LRUCache, TTLCache
- from neo.io.blackrockio import BlackrockIO
- import aux
- from aux import log
- from analytics import analytics1
- importlib.reload(analytics1)
- # @cached(cache={})
- def read_data():
- '''returns first array1 and then array2 channels'''
- data = reader.get_analogsignal_chunk(i_stop=eval(str(params.lfp.i_stop)),
- channel_indexes=list(set(params.lfp.array1) | set(params.lfp.array2)))
- return data
- def get_triggers(n_triggers=10, verbose=True):
- try:
- triggers_ts = [tt[0] for tt in reader.nev_data['Comments'][0]]
- triggers_txt = [tt[5].decode('utf-8') for tt in reader.nev_data['Comments'][0]]
- except:
- log.error('Triggers could not be loaded. Does Nev file exist?')
- return None, None
- if verbose:
- print(reader.nev_data['Comments'])
- triggers_yes = []
- triggers_no = []
- for ii in range(len(triggers_txt)):
- if 'yes, response, start' in triggers_txt[ii]:
- triggers_yes.append(triggers_ts[ii])
- elif 'no, response, start' in triggers_txt[ii]:
- triggers_no.append(triggers_ts[ii])
- triggers_yes = np.array(triggers_yes)
- triggers_no = np.array(triggers_no)
- # get timestamps for ns2
- triggers_yes_ns2 = triggers_yes // params.lfp.sampling_ratio
- triggers_no_ns2 = triggers_no // params.lfp.sampling_ratio
- return triggers_no_ns2[:n_triggers], triggers_yes_ns2[:n_triggers]
- def get_valid_trials(triggers_no_ns2, triggers_yes_ns2):
- trials_msk = np.ones((10, 2), dtype=bool)
- for ii, tt in enumerate(triggers_no_ns2):
- tmp = data1[tt + psth_win[0]:tt + psth_win[1], :]
- if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
- trials_msk[ii, 0] = False
- for ii, tt in enumerate(triggers_yes_ns2):
- tmp = data1[tt + psth_win[0]:tt + psth_win[1], :]
- if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
- trials_msk[ii, 1] = False
- print(ii)
- print('\nTrials excluded', np.where(trials_msk[:, 0] == False)[0], np.where(trials_msk[:, 1] == False)[0])
- return trials_msk
- def calc_cc():
- '''calculate time resolved correlation coefficients betweech channels of each array'''
-
- cc_step = 5000
- rng = range(0, data1.shape[0], cc_step)
- cc1 = np.zeros((len(rng), 3))
- cc2 = np.zeros((len(rng), 3))
- hist1 = np.zeros((len(rng), 100))
- hist2 = np.zeros((len(rng), 100))
- for ii, idx in enumerate(rng):
- # cc1[ii, 0] = np.min(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
- # cc1[ii, 1] = np.max(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
- # cc1[ii, 2] = np.mean(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
- cc1[ii, 0] = np.min(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
- cc1[ii, 1] = np.max(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
- cc1[ii, 2] = np.mean(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
- # hist1[ii, :] = np.histogram(np.triu(np.corrcoef(data0[idx:idx + cc_step, array1].T)).flatten(), 100)[1][:-1]
- # cc2[ii, 0] = np.min(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
- # cc2[ii, 1] = np.max(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
- # cc2[ii, 2] = np.mean(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
- cc2[ii, 0] = np.min(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
- cc2[ii, 1] = np.max(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
- cc2[ii, 2] = np.mean(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
- # hist2[ii, :] = np.histogram(np.triu(np.corrcoef(data0[idx:idx + cc_step, array2].T)), 100)[1][:-1]
- # print(ii)
- if params.lfp.plot.general:
- plt.figure()
- plt.plot(cc1)
- plt.gca().set_prop_cycle(None)
- plt.plot(cc2, '-.')
- plt.show()
- return None
- def compute_psth(triggers_yes_ns2, triggers_no_ns2):
- psth_len = np.diff(psth_win)[0]
- psth_no = np.zeros((len(triggers_no_ns2), psth_len, ch_nr1))
- psth_yes = np.zeros((len(triggers_yes_ns2), psth_len, ch_nr1))
- for ii in range(len(triggers_no_ns2)):
- if (triggers_no_ns2[ii] + psth_win[0] > 0) & (triggers_no_ns2[ii] + psth_win[1] < len(data2)):
- tmp = data01[(triggers_no_ns2[ii] + psth_win[0]):(triggers_no_ns2[ii] + psth_win[1]), :]
- # if not (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
- psth_no[ii, :, :] = tmp
- # else:
- # print(f'Excluded no-trial: {ii}')
- for ii in range(len(triggers_yes_ns2)):
- if (triggers_yes_ns2[ii] + psth_win[0] > 0) & (triggers_yes_ns2[ii] + psth_win[1] < len(data2)):
- tmp = data01[(triggers_yes_ns2[ii] + psth_win[0]):(triggers_yes_ns2[ii] + psth_win[1]), :]
- # if not (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
- psth_yes[ii, :, :] = tmp
- # else:
- # print(f'Excluded yes-trial: {ii}')
- psth_yes, psth_no = remove_trials(psth_yes, psth_no)
- return psth_no, psth_yes
- def remove_trials(psth_yes, psth_no):
- var1 = np.var(psth_yes, axis=1)
- var2 = np.var(psth_no, axis=1)
- L1 = np.percentile(var1, 5, axis=0, keepdims=True)
- L2 = np.percentile(var2, 5, axis=0, keepdims=True)
- U1 = np.percentile(var1, 95, axis=0, keepdims=True)
- U2 = np.percentile(var2, 95, axis=0, keepdims=True)
- w = 10
- valid1 = [L1 - w * (U1 - L1), U1 + w * (U1 - L1)]
- valid2 = [L2 - w * (U2 - L2), U2 + w * (U2 - L2)]
- # array1_msk = np.unique((np.where(var1 < valid1[0])[0] or np.where(var1 > valid1[1])[0]))
- # array2_msk = np.unique((np.where(var2 < valid2[0])[0] or np.where(var2 > valid2[1])[0]))
- array1_msk = ((var1 < valid1[0]) | (var1 > valid1[1])).nonzero()[0]
- array2_msk = ((var2 < valid2[0]) | (var2 > valid2[1])).nonzero()[0]
- print(f'excluded yes-trials: {array1_msk}')
- print(f'excluded no-trials: {array2_msk}')
- psth_yes = np.delete(psth_yes, array1_msk, axis=0)
- psth_no = np.delete(psth_no, array2_msk, axis=0)
- return psth_yes, psth_no
- def plot_channels(data, ch_ids, fs, i_start=0, i_stop=10000, step=5, color='C0'):
- if ch_ids == []:
- ch_ids = range(data.shape[1])
- if i_stop == -1:
- i_stop = data.shape[0]
- # data = data
- offset = np.cumsum(4 * np.var(data[:, ch_ids], axis=0)[:, np.newaxis]).T
- plt.figure()
- plt.plot(np.arange(i_start, i_stop) / fs, data[i_start:i_stop, ch_ids] + offset, color=color, lw=1)
- plt.xlabel('Time (sec)')
- plt.yticks(offset[::step], range(1, len(ch_ids), step))
- plt.ylim(0, offset[-1] + 4)
- plt.tight_layout()
- return None
- def plot_psth(ff=[], ch_id=0, trial_id=0, step=10):
- yy = data01.std()
- # plot psth for visual inspection
- xx1 = np.arange(data1.shape[0]) / params.lfp.fs
- xx2 = np.arange(psth_win[0], psth_win[1]) / params.lfp.fs
- f_idx = ff < 150
- plt.figure(2, figsize=(10, 10))
- plt.clf()
- plt.subplot(311)
- plt.plot(xx1[::step], data01[::step, 0])
- plt.plot(xx1[::step], data1[::step, :, 2], 'C1', alpha=0.2) # low band
- plt.stem(triggers_yes_ns2 / params.lfp.fs, triggers_yes_ns2 * 0 + yy * 0.8, markerfmt='C2o') # green
- plt.stem(triggers_no_ns2 / params.lfp.fs, triggers_no_ns2 * 0 + yy * 0.8, markerfmt='C3o') # red
- plt.ylim(-yy, yy)
- plt.subplot(323)
- plt.plot(xx2, psth_no.mean(0)[:, params.lfp.plot.ch_ids]) # average accross trials, 1 channel
- plt.plot(xx2, psth_no.mean(0).mean(1), 'C3', alpha=0.5)
-
- plt.subplot(324)
- plt.plot(xx2, psth_yes.mean(0)[:, params.lfp.plot.ch_ids])
- plt.plot(xx2, psth_yes.mean(0).mean(1), 'C3', alpha=0.5)
- plt.subplot(325)
- # plt.pcolormesh(tt, ff[f_idx], Sxx1[f_idx, ch_id, :])
- if params.lfp.normalize:
- plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :] / Sxx1_norm[f_idx, :, ch_id])
- else:
- plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :])
- vmax = max(Sxx1[trial_id, f_idx, ch_id, :].max(), Sxx2[trial_id, f_idx, ch_id, :].max())
- plt.clim(vmax=vmax)
- plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
- plt.subplot(326)
- if params.lfp.normalize:
- plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :] / Sxx2_norm[f_idx, :, ch_id])
- else:
- plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :])
- plt.clim(vmax=vmax)
- plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
- plt.tight_layout()
- plt.show()
- return None
- params = aux.load_config()
- params.lfp.array1 = np.delete(params.lfp.array1, params.lfp.array1_exclude)
- params.lfp.array2 = np.delete(params.lfp.array2, params.lfp.array2_exclude)
- ch_nr1 = len(params.lfp.array1)
- ch_nr2 = len(params.lfp.array2)
- psth_win = params.lfp.psth_win
- # file_name = '/home/vlachos/data_kiap/20190321-135311/20190321-135311-001'
- # path = '/home/kiap/Documents/kiap/k01/20190414-123429/'
- path = '/media/vlachos/kiap_backup/Recordings/K01/Recordings/20190415-145905/'
- # file_name = '/media/vlachos/kiap_backup/Recordings/K01/bci_sessions/20190321-140130/20190321-140130-003'
- # file_name = path + '20190414-123429-001.ns2'
- file_name = path + '20190415-145905-004'
- if not 'data0' in locals():
- reader = BlackrockIO(filename=file_name, nsx_to_load=6)
- data0 = read_data() # (samples, channels)
- data0 = data0.copy().astype(np.float64)
- triggers_no_ns2, triggers_yes_ns2 = get_triggers(n_triggers=20)
- # trials_msk = get_valid_trials(triggers_no_ns2, triggers_yes_ns2)
- array1 = params.lfp.array1
- array2 = params.lfp.array2
- # data1[data1 > params.lfp.artifact_thr] = 0
- # data1[data1 < -params.lfp.artifact_thr] = 0
- # data0 = data0 - np.mean(data0, 1)[:, np.newaxis]
- # reader.nev_data['Comments'][0]
- data0 = stats.zscore(data0)
- data01 = data0[:, :len(array1)]
- data02 = data0[:, len(array1):len(array1) + len(array2)]
- data01 = signal.decimate(data01, 2, axis=0) # downsample to 15 kHz
- data01, _, _ = analytics1.bw_filter(data0.T, [250, 5000], fs=15000) # bandpass
- nperseg = 10000
- ff, tt, Sxx1 = signal.spectrogram(data01, 15000, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.5))
- f_idx = (ff > 0) & (ff < 5000)
- width = 10000
- rms = np.empty((0, data01.shape[1]))
- for ii in range(0, data01.shape[0], width):
- # rms = np.vstack((rms, np.var(data01[ii: ii + width], axis=0)))
- rms = np.vstack((rms, np.sqrt(np.mean(data01[ii: ii + width], axis=0)**2)))
- print(ii, ii + width)
- res = PCA(n_components=3).fit_transform(rms)
- plt.clf()
- # plt.pcolormesh(tt, ff[f_idx], np.log10(Sxx1[f_idx, 0]))
- # plt.plot(tt, Sxx1[:, 3, :].sum(axis=0))
- # plt.plot(rms)
- plt.plot(res[:, 0], res[:, 1], '.')
- # data1 = signal.resample_poly(data0, 1, 2, axis=0)
- # y1, ff1 = analytics1.calc_fft(data1, fs=30000, axis=0)
- # y11, ff11 = analytics1.calc_fft(data11, fs=15000, axis=0)
- # f_idx1 = (ff1 > 0) & (ff1 < 10000)
- # f_idx11 = (ff11 > 0) & (ff11 < 10000)
- # plt.clf()
- # # plt.plot(ff1[f_idx1], y1[f_idx1, 0])
- # plt.plot(ff11[f_idx11], y11[f_idx11, 0])
- # COMPUTE PSTH
- # ----------------------------------
- # offset = 600 * params.lfp.fs
- # psth_no, psth_yes = compute_psth(triggers_yes_ns2, triggers_no_ns2)
- # psth_norm1, psth_norm2 = compute_psth(triggers_yes_ns2 + offset, triggers_no_ns2 + offset)
- # # PSTH SPECTROGRAMS
- # # ----------------------------------
- # nperseg = params.lfp.spectra.spgr_len
- # nperseg = min(nperseg, psth_no.shape[1])
- # # ff, tt, Sxx1 = signal.spectrogram(psth_no.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- # # ff, tt, Sxx2 = signal.spectrogram(psth_yes.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- # ff, tt, Sxx1 = signal.spectrogram(psth_no, params.lfp.fs, axis=1, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- # ff, tt, Sxx2 = signal.spectrogram(psth_yes, params.lfp.fs, axis=1, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- # ff, tt, Sxx1_norm = signal.spectrogram(psth_norm1.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- # ff, tt, Sxx2_norm = signal.spectrogram(psth_norm2.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
- # # Sxx1 = Sxx1.mean(1)
- # # Sxx2 = Sxx2.mean(1)
- # Sxx1_norm = Sxx1_norm.mean(2)
- # Sxx2_norm = Sxx2_norm.mean(2)
- # tt = tt + psth_win[0] / params.lfp.fs
- # t_idx = tt < 1
- # Sxx1_norm = Sxx1_norm[:, np.newaxis].repeat(len(tt), 1)
- # Sxx2_norm = Sxx2_norm[:, np.newaxis].repeat(len(tt), 1)
- # # if params.lfp.normalize:
- # # Sxx1 = Sxx1 / Sxx1_norm
- # # Sxx2 = Sxx2 / Sxx2_norm
- # # PLOT RESULTS
- # # ----------------------------------
- # if params.lfp.plot.general:
- # # plot_spectra()
- # plot_psth(ff=ff, ch_id=0, trial_id=0, step=100)
- # pass
- # # xxx
- # plt.ioff()
- # for trial_id in range(4, 5):
- # for ch_id in range(ch_nr1):
- # plot_psth(ff=ff, ch_id=ch_id, trial_id=0, step=100)
- # fig2 = plt.gcf()
- # fname = path + f'results/trial_{trial_id}_ch_{ch_id}.png'
- # fig2.savefig(fname)
- # print(ch_id)
|