lfp_analysis_exploration.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. '''
  2. description: offline analysis of kiap data
  3. author: Ioannis Vlachos
  4. date: 02.04.2019
  5. Copyright (c) 2019 Ioannis Vlachos.
  6. All rights reserved.'''
  7. import re
  8. import os
  9. import glob
  10. import importlib
  11. import urllib
  12. import time
  13. import numpy as np
  14. import matplotlib.pyplot as plt
  15. import matplotlib as mpl
  16. from scipy import signal
  17. from scipy import stats
  18. from cachetools import cached, LRUCache, TTLCache
  19. from neo.io.blackrockio import BlackrockIO
  20. import aux
  21. from aux import log
  22. from analytics import analytics1
  23. importlib.reload(analytics1)
  24. # @cached(cache={})
  25. def read_data():
  26. '''returns first array1 and then array2 channels'''
  27. data = reader.get_analogsignal_chunk(i_stop=eval(str(params.lfp.i_stop)),
  28. channel_indexes=list(set(params.lfp.array1) | set(params.lfp.array2)))
  29. return data
  30. def get_triggers(n_triggers=10, verbose=True):
  31. '''get triggers from corresponding nev files'''
  32. try:
  33. triggers_ts = [tt[0] for tt in reader.nev_data['Comments'][0]]
  34. triggers_txt = [tt[5].decode('utf-8') for tt in reader.nev_data['Comments'][0]]
  35. except:
  36. log.error('Triggers could not be loaded. Does Nev file exist?')
  37. return None, None
  38. if verbose:
  39. print(reader.nev_data['Comments'])
  40. triggers= [list() for ii in range(8)]
  41. for ii in range(len(triggers_txt)):
  42. if 'SchliesseHand, response, start' in triggers_txt[ii]:
  43. triggers[0].append(triggers_ts[ii]// params.lfp.sampling_ratio)
  44. elif 'BeugeRechtenMittelfinger, response, start' in triggers_txt[ii]:
  45. triggers[1].append(triggers_ts[ii]// params.lfp.sampling_ratio)
  46. elif 'BeugeRechtenZeigefinger, response, start' in triggers_txt[ii]:
  47. triggers[2].append(triggers_ts[ii]// params.lfp.sampling_ratio)
  48. elif 'BeugeRechtenDaumen, response, start' in triggers_txt[ii]:
  49. triggers[3].append(triggers_ts[ii]// params.lfp.sampling_ratio)
  50. elif 'OeffneHand, response, start' in triggers_txt[ii]:
  51. triggers[4].append(triggers_ts[ii]// params.lfp.sampling_ratio)
  52. elif 'StreckeRechtenMittelfinger, response, start' in triggers_txt[ii]:
  53. triggers[5].append(triggers_ts[ii]// params.lfp.sampling_ratio)
  54. elif 'StreckeRechtenZeigefinger, response, start' in triggers_txt[ii]:
  55. triggers[6].append(triggers_ts[ii]// params.lfp.sampling_ratio)
  56. elif 'StreckeRechtenDaumen, response, start' in triggers_txt[ii]:
  57. triggers[7].append(triggers_ts[ii]// params.lfp.sampling_ratio)
  58. for ii in range(8):
  59. triggers[ii] = np.array(triggers[ii])
  60. # triggers = np.array(triggers_yes)
  61. # triggers_no = np.array(triggers_no)
  62. # get timestamps for ns2
  63. # triggers = triggers_yes // params.lfp.sampling_ratio
  64. # triggers_no_ns2 = triggers_no // params.lfp.sampling_ratio
  65. return triggers
  66. def get_valid_trials(triggers_no_ns2, triggers_yes_ns2):
  67. trials_msk = np.ones((10, 2), dtype=bool)
  68. for ii, tt in enumerate(triggers_no_ns2):
  69. tmp = data1[tt + psth_win[0]:tt + psth_win[1], :]
  70. if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
  71. trials_msk[ii, 0] = False
  72. for ii, tt in enumerate(triggers_yes_ns2):
  73. tmp = data1[tt + psth_win[0]:tt + psth_win[1], :]
  74. if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
  75. trials_msk[ii, 1] = False
  76. print(ii)
  77. print('\nTrials excluded', np.where(trials_msk[:, 0] == False)[0], np.where(trials_msk[:, 1] == False)[0])
  78. return trials_msk
  79. def calc_cc():
  80. '''calculate time resolved correlation coefficients betweech channels of each array'''
  81. cc_step = 5000
  82. rng = range(0, data1.shape[0], cc_step)
  83. cc1 = np.zeros((len(rng), 3))
  84. cc2 = np.zeros((len(rng), 3))
  85. hist1 = np.zeros((len(rng), 100))
  86. hist2 = np.zeros((len(rng), 100))
  87. for ii, idx in enumerate(rng):
  88. # cc1[ii, 0] = np.min(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
  89. # cc1[ii, 1] = np.max(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
  90. # cc1[ii, 2] = np.mean(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
  91. cc1[ii, 0] = np.min(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
  92. cc1[ii, 1] = np.max(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
  93. cc1[ii, 2] = np.mean(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
  94. # hist1[ii, :] = np.histogram(np.triu(np.corrcoef(data0[idx:idx + cc_step, array1].T)).flatten(), 100)[1][:-1]
  95. # cc2[ii, 0] = np.min(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
  96. # cc2[ii, 1] = np.max(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
  97. # cc2[ii, 2] = np.mean(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
  98. cc2[ii, 0] = np.min(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
  99. cc2[ii, 1] = np.max(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
  100. cc2[ii, 2] = np.mean(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
  101. # hist2[ii, :] = np.histogram(np.triu(np.corrcoef(data0[idx:idx + cc_step, array2].T)), 100)[1][:-1]
  102. # print(ii)
  103. if params.lfp.plot.general:
  104. plt.figure()
  105. plt.plot(cc1)
  106. plt.gca().set_prop_cycle(None)
  107. plt.plot(c2, '-.')
  108. plt.show()
  109. return None
  110. def compute_psth(triggers, arr_nr=1, sub_band=-1):
  111. '''compute the psth for no and yes questions'''
  112. psth_len = np.diff(psth_win)[0]
  113. if arr_nr == 1:
  114. ch_nr = ch_nr1
  115. if sub_band == -1:
  116. data_tmp = data01 # raw data
  117. else:
  118. data_tmp = data1[:, :, sub_band] # sub_band
  119. elif arr_nr == 2:
  120. ch_nr = ch_nr2
  121. if sub_band == -1:
  122. data_tmp = data02 # raw data
  123. else:
  124. data_tmp = data2[:, :, sub_band] # sub_band
  125. psth = [list() for ii in range(8)]
  126. for ii in range(8):
  127. psth[ii] = np.zeros((len(triggers[ii]), psth_len, ch_nr))
  128. for jj in range(8):
  129. for ii in range(len(triggers[jj])):
  130. if (triggers[jj][ii] + psth_win[0] > 0) & (triggers[jj][ii] + psth_win[1] < len(data2)):
  131. tmp = data_tmp[(triggers[jj][ii] + psth_win[0]):(triggers[jj][ii] + psth_win[1]), :]
  132. psth[jj][ii, :, :] = tmp
  133. # psth_yes, psth_no = remove_trials(psth_yes, psth_no)
  134. return psth
  135. def remove_channels(data01, data02):
  136. '''remove channels for which variance lies outside 25-75 percentiles'''
  137. var1 = np.var(data01, axis=0)
  138. var2 = np.var(data02, axis=0)
  139. L1 = np.percentile(var1, 25, axis=0, keepdims=True)
  140. L2 = np.percentile(var2, 25, axis=0, keepdims=True)
  141. U1 = np.percentile(var1, 75, axis=0, keepdims=True)
  142. U2 = np.percentile(var2, 75, axis=0, keepdims=True)
  143. w = 1 # manual parameter that affects the valid range
  144. valid1 = [L1 - w * (U1 - L1), U1 + w * (U1 - L1)]
  145. valid2 = [L2 - w * (U2 - L2), U2 + w * (U2 - L2)]
  146. # plt.plot(var1, 'C0')
  147. # plt.plot(var2, 'C1')
  148. # array1_msk = (np.where(var1 < valid1[0]) and np.where(var1 > valid1[1]))[0]
  149. # array1_msk = (np.where(var1 < valid1[0]) and np.where(var1 > valid1[1]))[0]
  150. array1_msk = np.unique(((var1 < valid1[0]) | (var1 > valid1[1])).nonzero()[0])
  151. array2_msk = np.unique(((var2 < valid2[0]) | (var2 > valid2[1])).nonzero()[0])
  152. array1_msk = np.hstack((array1_msk, params.lfp.array1_exclude)).astype(int)
  153. array2_msk = np.hstack((array2_msk, params.lfp.array2_exclude)).astype(int)
  154. array1_msk.sort()
  155. array2_msk.sort()
  156. print(f'excluded channels, array1: {array1_msk}')
  157. print(f'excluded channels, array2: {array2_msk}')
  158. plot_channels(data01, [], arr_id=1, fs=params.lfp.fs, array_msk=array1_msk, i_stop=1000, step=1, color='C0')
  159. plot_channels(data02, [], arr_id=2, fs=params.lfp.fs, array_msk=array2_msk, i_stop=1000, step=1, color='C1')
  160. data01 = np.delete(data01, array1_msk, axis=1)
  161. data02 = np.delete(data02, array2_msk, axis=1)
  162. return data01, data02, array1_msk, array2_msk
  163. def remove_trials(psth_yes, psth_no):
  164. '''remove trials for which variance lies outside 25-75 percentiles'''
  165. var1 = np.var(psth_yes, axis=1)
  166. var2 = np.var(psth_no, axis=1)
  167. L1 = np.percentile(var1, 25, axis=0, keepdims=True)
  168. L2 = np.percentile(var2, 25, axis=0, keepdims=True)
  169. U1 = np.percentile(var1, 75, axis=0, keepdims=True)
  170. U2 = np.percentile(var2, 75, axis=0, keepdims=True)
  171. w = 10
  172. valid1 = [L1 - w * (U1 - L1), U1 + w * (U1 - L1)]
  173. valid2 = [L2 - w * (U2 - L2), U2 + w * (U2 - L2)]
  174. # array1_msk = np.unique((np.where(var1 < valid1[0])[0] or np.where(var1 > valid1[1])[0]))
  175. # array2_msk = np.unique((np.where(var2 < valid2[0])[0] or np.where(var2 > valid2[1])[0]))
  176. array1_msk = ((var1 < valid1[0]) | (var1 > valid1[1])).nonzero()[0]
  177. array2_msk = ((var2 < valid2[0]) | (var2 > valid2[1])).nonzero()[0]
  178. print(f'excluded yes-trials: {array1_msk}')
  179. print(f'excluded no-trials: {array2_msk}')
  180. psth_yes = np.delete(psth_yes, array1_msk, axis=0)
  181. psth_no = np.delete(psth_no, array2_msk, axis=0)
  182. return psth_yes, psth_no
  183. def plot_channels(data, ch_ids, fs, array_msk=[], arr_id=1, i_start=0, i_stop=10000, step=5, color='C0'):
  184. '''plot all data from array1 or array2 in the time domain'''
  185. if not params.lfp.zscore: # z-score only for visualization
  186. data = stats.zscore(data)
  187. if ch_ids == []:
  188. ch_ids = range(data.shape[1])
  189. if i_stop == -1:
  190. i_stop = data.shape[0]
  191. offset = np.cumsum(4 * np.var(data[:, ch_ids], axis=0)[:, np.newaxis]).T
  192. plt.figure()
  193. plt.plot(np.arange(i_start, i_stop) / fs, data[i_start:i_stop, ch_ids] + offset, color=color, lw=1)
  194. if array_msk.size > 0: # highlight excluded channels
  195. plt.plot(np.arange(i_start, i_stop) / fs, data[i_start:i_stop, array_msk] + offset[array_msk], color='C3', lw=1)
  196. plt.xlabel('Time (sec)')
  197. plt.yticks(offset[::step], range(0, len(ch_ids), step))
  198. plt.ylim(0, offset[-1] + 4)
  199. plt.title(f'raw data from array {arr_id}')
  200. plt.tight_layout()
  201. return None
  202. def plot_spectra(ch_ids=[0]):
  203. '''plot spectra of raw data and sub_bands, single channels from ch_ids and averages'''
  204. mpl.rcParams['axes.formatter.limits'] = (-2, 2)
  205. yy = data01.std() * 2
  206. xx = np.arange(data1.shape[0]) / params.lfp.fs # plot raw and filtered traces for visual inspection
  207. xmax = 150
  208. plt.figure(1, figsize=(10, 10))
  209. plt.clf()
  210. # column 1: first array
  211. plt.subplot(521) # plot raw
  212. plt.plot(ff1, Y01[:, ch_ids[0]], 'C0', label=f'channel: {array1[ch_ids[0]]}')
  213. plt.plot(ff1, Y01.mean(1), 'C3', alpha=0.2, label='average')
  214. plt.xlim(xmin=0, xmax=xmax)
  215. plt.title('array1 raw')
  216. # plt.legend()
  217. plt.gca().set_xticklabels([])
  218. plt.legend()
  219. plt.subplot(523)
  220. plt.plot(ff1, Y1[:, ch_ids[0], 0], 'C0', label='array1')
  221. plt.plot(ff1, Y1[:, :, 0].mean(1), 'C3', alpha=0.2)
  222. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_lb[1] * 2)
  223. plt.xlim(xmin=0, xmax=xmax)
  224. plt.gca().set_xticklabels([])
  225. plt.title('sub_band 0')
  226. plt.subplot(525)
  227. plt.plot(ff1, Y1[:, ch_ids[0], 1], 'C0', label='array1')
  228. plt.plot(ff1, Y1[:, :, 1].mean(1), 'C3', alpha=0.2)
  229. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_mb[1] * 2)
  230. plt.xlim(xmin=0, xmax=xmax)
  231. plt.gca().set_xticklabels([])
  232. plt.title('sub_band 1')
  233. plt.subplot(527)
  234. plt.plot(ff1, Y1[:, ch_ids[0], 2], 'C0', label='array1')
  235. plt.plot(ff1, Y1[:, :, 2].mean(1), 'C3', alpha=0.2)
  236. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_hb[1] * 2)
  237. plt.xlim(xmin=0, xmax=xmax)
  238. plt.title('sub_band 2')
  239. plt.subplot(529)
  240. plt.loglog(ff1, Y01[:, ch_ids[0]], 'C0')
  241. plt.loglog(ff1, Y01.mean(1), 'C3', alpha=0.2)
  242. plt.title('array1 raw loglog')
  243. plt.xlabel('Frequency (Hz)')
  244. # plt.legend()
  245. # column 2: second array
  246. plt.subplot(522)
  247. plt.plot(ff02, Y02[:, ch_ids[0]], 'C0', label=f'channel: {array2[ch_ids[0]]}')
  248. plt.plot(ff02, Y02.mean(1), 'C3', alpha=0.2, label='average')
  249. plt.xlim(xmin=0, xmax=xmax)
  250. # plt.gca().set_yticklabels([])
  251. plt.title('array2 raw')
  252. # xxx
  253. # plt.legend()
  254. plt.gca().set_xticklabels([])
  255. plt.legend()
  256. plt.subplot(524)
  257. plt.plot(ff2, Y2[:, ch_ids[0], 0], 'C0', label='array1')
  258. plt.plot(ff2, Y2[:, :, 0].mean(1), 'C3', alpha=0.2)
  259. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_lb[1] * 2)
  260. plt.xlim(xmin=0, xmax=xmax)
  261. plt.gca().set_xticklabels([])
  262. plt.title('sub_band 0')
  263. plt.subplot(526)
  264. plt.plot(ff2, Y2[:, ch_ids[0], 1], 'C0', label='array1')
  265. plt.plot(ff2, Y2[:, :, 1].mean(1), 'C3', alpha=0.2)
  266. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_mb[1] * 2)
  267. plt.xlim(xmin=0, xmax=xmax)
  268. plt.gca().set_xticklabels([])
  269. # plt.gca().set_yticklabels([])
  270. plt.title('sub_band 1')
  271. plt.subplot(528)
  272. plt.plot(ff2, Y2[:, ch_ids[0], 2], 'C0', label='array1')
  273. plt.plot(ff2, Y2[:, :, 2].mean(1), 'C3', alpha=0.2)
  274. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_hb[1] * 2)
  275. plt.xlim(xmin=0, xmax=xmax)
  276. plt.title('sub_band 2')
  277. # plt.gca().set_yticklabels([])
  278. plt.subplot(5, 2, 10)
  279. plt.loglog(ff01, Y02[:, ch_ids[0]], 'C0')
  280. plt.loglog(ff01, Y02.mean(1), 'C3', alpha=0.2)
  281. # plt.gca().set_yticklabels([])
  282. plt.title('raw loglog')
  283. plt.xlabel('Frequency (Hz)')
  284. plt.tight_layout()
  285. plt.draw()
  286. plt.show()
  287. return None
  288. def plot_spectra2(arr_nr=1):
  289. '''plot spectra of raw data for all channels of arr_nr, compare with spectrum of white noise'''
  290. mpl.rcParams['axes.formatter.limits'] = (-2, 2)
  291. if arr_nr == 1:
  292. YY = Y01
  293. d0,d1 = data01.shape
  294. elif arr_nr == 2:
  295. YY = Y02
  296. d0,d1 = data02.shape
  297. Y_GWN, _ = analytics1.calc_fft(np.random.randn(d0, d1), params.lfp.fs, i_stop=1e5, axis=0)
  298. plt.figure(1, figsize=(10, 10))
  299. plt.clf()
  300. for ii in range(YY.shape[1]):
  301. ax = plt.subplot(8,8,ii+1)
  302. plt.loglog(ff1, YY[:, ii], 'C0')
  303. plt.loglog(ff1, Y_GWN[:, ii], 'C1', alpha=0.5)
  304. plt.ylim(10e-16, 10e2)
  305. # if ii<56:
  306. if (ii // 8) < (d1 // 8):
  307. ax.set_xticks([])
  308. if np.mod(ii, 8) != 0:
  309. ax.set_yticks([])
  310. plt.draw()
  311. return None
  312. def plot_psth(ff=[], sub_band=0, ch_id=0, trial_id=0, step=10):
  313. ''' plot psth for no and yes responses separately'''
  314. yy = max(data01.std(), data1[:, ch_id, sub_band].std()) * 2
  315. xx1 = np.arange(data1.shape[0]) / params.lfp.fs
  316. xx2 = np.arange(psth_win[0], psth_win[1]) / params.lfp.fs
  317. f_idx = ff < 150
  318. plt.figure(figsize=(10, 10))
  319. plt.clf()
  320. log.info(f'Selected sub-band: {sub_band}')
  321. plt.subplot(411)
  322. plt.plot(xx1[::step], data01[::step, ch_id], label='raw ns2') # plot raw
  323. plt.plot(xx1[::step], data1[::step, ch_id, sub_band], 'C1', alpha=0.5, label=f'sub-band: {sub_band}') # plot sub_band (low, middle, high)
  324. for ii in range(8):
  325. plt.stem(triggers[ii] / params.lfp.fs, triggers[ii] * 0 + yy * 0.8, markerfmt='C3o') # red
  326. # plt.stem(triggers_yes_ns2 / params.lfp.fs, triggers_yes_ns2 * 0 + yy * 0.8, markerfmt='C2o', label='yes') # green
  327. plt.ylim(-yy, yy)
  328. plt.xlabel('sec')
  329. plt.legend(loc=1)
  330. plt.subplot(423) # plot psth no-questions
  331. plt.plot(xx2, psth_no.mean(0)[:, ch_id], label='trial av. no') # average accross trials, plot 1 channel
  332. plt.plot(xx2, psth_no.mean(0).mean(1), 'C3', alpha=0.4, label='trial-ch av. no') # average accross trials and channels
  333. plt.xlabel('sec')
  334. plt.legend()
  335. plt.subplot(424) # plot psth yes-questions
  336. plt.plot(xx2, psth_yes.mean(0)[:, ch_id], label='trial av. yes') # average accross trials, plot 1 channel
  337. plt.plot(xx2, psth_yes.mean(0).mean(1), 'C3', alpha=0.4, label='trial-ch av. yes') # average accross trials and channels
  338. plt.xlabel('sec')
  339. plt.legend()
  340. plt.subplot(425) # plot spectrogram no-question, single channel and trial
  341. # plt.pcolormesh(tt, ff[f_idx], Sxx1[f_idx, ch_id, :])
  342. if params.lfp.normalize:
  343. plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :] / Sxx1_norm[f_idx, :, ch_id])
  344. else:
  345. plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :])
  346. vmax = max(Sxx1[trial_id, f_idx, ch_id, :].max(), Sxx2[trial_id, f_idx, ch_id, :].max())
  347. plt.clim(vmax=vmax)
  348. plt.xlabel('sec')
  349. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  350. plt.subplot(426) # plot spectrogram yes-question, single channel and trial
  351. if params.lfp.normalize:
  352. plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :] / Sxx2_norm[f_idx, :, ch_id])
  353. else:
  354. plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :])
  355. plt.clim(vmax=vmax)
  356. plt.xlabel('sec')
  357. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  358. plt.subplot(427) # plot spectrogram no-question, averaged
  359. # plt.pcolormesh(tt, ff[f_idx], Sxx1[f_idx, ch_id, :])
  360. if params.lfp.normalize:
  361. plt.pcolormesh(tt, ff[f_idx], Sxx1[:, f_idx, ch_id, :].mean(0) / Sxx1_norm[f_idx, :, ch_id])
  362. else:
  363. plt.pcolormesh(tt, ff[f_idx], Sxx1[:, f_idx, ch_id, :].mean(0))
  364. vmax = max(Sxx1[trial_id, f_idx, ch_id, :].max(), Sxx2[trial_id, f_idx, ch_id, :].max())
  365. plt.clim(vmax=vmax)
  366. plt.xlabel('sec')
  367. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  368. plt.subplot(428) # plot spectrogram yes-question, averaged
  369. if params.lfp.normalize:
  370. plt.pcolormesh(tt, ff[f_idx], Sxx2[:, f_idx, ch_id, :].mean(0) / Sxx2_norm[f_idx, :, ch_id])
  371. else:
  372. plt.pcolormesh(tt, ff[f_idx], Sxx2[:, f_idx, ch_id, :].mean(0))
  373. plt.clim(vmax=vmax)
  374. plt.xlabel('sec')
  375. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  376. plt.tight_layout()
  377. plt.show()
  378. return None
  379. def plot_psth(ff=[], sub_band=0, ch_id=0, trial_id=0, step=10):
  380. ''' plot psth for no and yes responses separately'''
  381. xx1 = np.arange(data1.shape[0]) / params.lfp.fs
  382. xx2 = np.arange(psth_win[0], psth_win[1]) / params.lfp.fs
  383. f_idx = ff < 150
  384. plt.figure(figsize=(10, 10))
  385. plt.clf()
  386. for ii in range(8):
  387. plt.subplot(4,4,ii+1)
  388. plt.pcolormesh(tt, ff[f_idx], Sxx1[ii][trial_id, f_idx, ch_id, :])
  389. if params.lfp.sub_band<1:
  390. fct = 10
  391. if params.lfp.sub_band==1:
  392. fct = 10
  393. elif params.lfp.sub_band==2:
  394. fct = 5
  395. cmax = np.array([Sxx1[ii][:,f_idx, ch_id,:].mean() for ii in range(8)]).max()*fct
  396. for ii in range(8):
  397. plt.subplot(4,4,8+ii+1)
  398. plt.pcolormesh(tt, ff[f_idx], np.mean(Sxx1[ii][:, f_idx, ch_id, :], axis=0))
  399. plt.clim(0,cmax)
  400. plt.tight_layout()
  401. # plt.show()
  402. return None
  403. params = aux.load_config()
  404. # params.lfp.array1 = np.delete(params.lfp.array1, params.lfp.array1_exclude)
  405. # params.lfp.array2 = np.delete(params.lfp.array2, params.lfp.array2_exclude)
  406. psth_win = params.lfp.psth_win
  407. # file_name = '/home/vlachos/data_kiap/20190321-135311/20190321-135311-001'
  408. # path = '/home/kiap/Documents/kiap/k01/20190414-123429/'
  409. # path = '/media/vlachos/kiap_backup/Recordings/K01/Recordings/20190415-145905/'
  410. path = '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
  411. # file_name = path + '20190415-145905-004.ns2'
  412. file_name = path + '20190326-160445-001.ns2'
  413. # file_name = '/media/vlachos/kiap_backup/Recordings/K01/bci_sessions/20190321-140130/20190321-140130-003'
  414. # file_name = path + '20190414-123429-001.ns2'
  415. array1 = params.lfp.array1
  416. array2 = params.lfp.array2
  417. if not 'data0' in locals(): # load data if not already loaded by previous call
  418. # READ DATA
  419. # ----------------------------------
  420. reader = BlackrockIO(filename=file_name)
  421. data0 = read_data() # (samples, channels)
  422. data0 = data0.copy().astype(np.float64)
  423. # IMPORT TRIGGERS FROM NEV FILE
  424. # ----------------------------------
  425. triggers = get_triggers(n_triggers=20)
  426. # trials_msk = get_valid_trials(triggers_no_ns2, triggers_yes_ns2)
  427. # data1[data1 > params.lfp.artifact_thr] = 0
  428. # data1[data1 < -params.lfp.artifact_thr] = 0
  429. # data0 = data0 - np.mean(data0, 1)[:, np.newaxis]
  430. # reader.nev_data['Comments'][0]
  431. # Z-SCORE DATA
  432. # ----------------------------------
  433. if params.lfp.zscore:
  434. data0 = stats.zscore(data0)
  435. # SEPARATE DATA INTO CHANNELS FROM ARRAY-1 AND ARRAY-2
  436. # ----------------------------------
  437. data01 = data0[:, :len(array1)] # array-1 electrodes
  438. data02 = data0[:, len(array1):len(array1) + len(array2)] # array-2 electrodes
  439. # EXCLUDE CHANNELS IF VARIANCE IS OUTSIDE PERCENTILES
  440. # ----------------------------------------------------------------------
  441. if params.lfp.exclude:
  442. data01, data02, array1_msk, array2_msk = remove_channels(data01, data02)
  443. ch_nr1 = len(params.lfp.array1) - len(array1_msk)
  444. ch_nr2 = len(params.lfp.array2) - len(array2_msk)
  445. else:
  446. ch_nr1 = len(params.lfp.array1)
  447. ch_nr2 = len(params.lfp.array2)
  448. # PERFORM COMMON AVERAGE REFERENCING
  449. # ----------------------------------------------------------------------
  450. if params.lfp.car:
  451. data01 = data01 - np.repeat(np.mean(data01, axis=1)[:, np.newaxis], data01.shape[1], 1)
  452. data02 = data02 - np.repeat(np.mean(data02, axis=1)[:, np.newaxis], data02.shape[1], 1)
  453. # DEVIDE DATA INTO 3 SUB-BANDS
  454. # ----------------------------------
  455. data1 = np.zeros((data01.shape[0], data01.shape[1], 3)) # (samples, channels, 3)
  456. data2 = np.zeros((data02.shape[0], data02.shape[1], 3)) # (samples, channels, 3)
  457. # Y1 = np.zeros((data01.shape[0], data01.shape[1], 3))
  458. # FILTER IN THE THREE DIFFERENT BANDS
  459. # ----------------------------------
  460. data1[:, :, 0], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_lb, params.lfp.fs, params.lfp.filter_order_lb, plot=params.lfp.plot.filters)
  461. data1[:, :, 1], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_mb, params.lfp.fs, params.lfp.filter_order_mb, plot=params.lfp.plot.filters)
  462. data1[:, :, 2], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_hb, params.lfp.fs, params.lfp.filter_order_hb, plot=params.lfp.plot.filters)
  463. data2[:, :, 0], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_lb, params.lfp.fs, params.lfp.filter_order_lb, plot=params.lfp.plot.filters)
  464. data2[:, :, 1], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_mb, params.lfp.fs, params.lfp.filter_order_mb, plot=params.lfp.plot.filters)
  465. data2[:, :, 2], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_hb, params.lfp.fs, params.lfp.filter_order_hb, plot=params.lfp.plot.filters)
  466. # PERFORM STFT USING FFT
  467. # ----------------------------------
  468. Y01, ff01 = analytics1.calc_fft(data01, params.lfp.fs, i_stop=1e5, axis=0)
  469. Y02, ff02 = analytics1.calc_fft(data02, params.lfp.fs, i_stop=1e5, axis=0)
  470. Y1, ff1 = analytics1.calc_fft(data1, params.lfp.fs, i_stop=1e5, axis=0)
  471. Y2, ff2 = analytics1.calc_fft(data2, params.lfp.fs, i_stop=1e5, axis=0)
  472. # COMPUTE PSTH
  473. # ----------------------------------
  474. # psth shape: (trials, time, channels)
  475. # offset = 600 * params.lfp.fs # offset in sec to get data for normalization
  476. # triggers_yes_norm = np.hstack((triggers_yes_ns2 + offset, triggers_yes_ns2 + offset + 10))
  477. # triggers_no_norm = np.hstack((triggers_yes_ns2 + offset, triggers_yes_ns2 + offset + 10))
  478. # if np.any(triggers_yes_norm > data01.shape[1]):
  479. # log.error('triggers_norm outside imported data')
  480. psth = compute_psth(triggers, arr_nr=1, sub_band=params.lfp.sub_band)
  481. # psth_norm1, psth_norm2 = compute_psth(triggers_yes_norm, triggers_no_norm, arr_nr=1, sub_band=params.lfp.sub_band)
  482. # COMPUTE PSTH SPECTROGRAMS
  483. # ----------------------------------
  484. nperseg = params.lfp.spectra.spgr_len
  485. nperseg = min(nperseg, psth[0].shape[1])
  486. # ff, tt, Sxx1 = signal.spectrogram(psth_no.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  487. # ff, tt, Sxx2 = signal.spectrogram(psth_yes.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  488. # Sxx1.shape = (trials, ff, channels, time)
  489. # Sxx1_norm.shape = (ff,time, channels)
  490. Sxx1 = [list() for ii in range(8)]
  491. for ii in range(8):
  492. ff, tt, Sxx1_tmp = signal.spectrogram(psth[ii], params.lfp.fs, axis=1, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  493. Sxx1[ii] = Sxx1_tmp
  494. # ff, tt, Sxx1_norm = signal.spectrogram(psth_norm1.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  495. # ff, tt, Sxx2_norm = signal.spectrogram(psth_norm2.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  496. # Sxx1 = Sxx1.mean(1)
  497. # Sxx2 = Sxx2.mean(1)
  498. # Sxx1_norm = Sxx1_norm.mean(2)
  499. # Sxx2_norm = Sxx2_norm.mean(2)
  500. tt = tt + psth_win[0] / params.lfp.fs
  501. t_idx = tt < 1
  502. # Sxx1_norm = Sxx1_norm[:, np.newaxis].repeat(len(tt), 1)
  503. # Sxx2_norm = Sxx2_norm[:, np.newaxis].repeat(len(tt), 1)
  504. # if params.lfp.normalize:
  505. # Sxx1 = Sxx1 / Sxx1_norm
  506. # Sxx2 = Sxx2 / Sxx2_norm
  507. # 11. PLOT RESULTS
  508. # ----------------------------------
  509. if params.lfp.plot.general:
  510. # plot_spectra()
  511. plot_psth(ff=ff, sub_band=params.lfp.sub_band, ch_id=0, trial_id=0, step=100)
  512. pass
  513. # xxx
  514. plt.ioff()
  515. for trial_id in range(0, 1):
  516. # for ch_id in range(ch_nr1 - 1, ch_nr1):
  517. for ch_id in range(ch_nr1):
  518. plot_psth(ff=ff, sub_band=params.lfp.sub_band, ch_id=ch_id, trial_id=0, step=100)
  519. fig2 = plt.gcf()
  520. fname = path + f'results/trial_{trial_id}_ch_{ch_id}_sband_{params.lfp.sub_band}.png'
  521. fig2.savefig(fname)
  522. print(ch_id)