lfp_analysis1.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. '''
  2. description: offline analysis of kiap data
  3. author: Ioannis Vlachos
  4. date: 02.04.2019
  5. Copyright (c) 2019 Ioannis Vlachos.
  6. All rights reserved.'''
  7. import re
  8. import os
  9. import glob
  10. import importlib
  11. import urllib
  12. import time
  13. import numpy as np
  14. import matplotlib.pyplot as plt
  15. import matplotlib as mpl
  16. from scipy import signal
  17. from scipy import stats
  18. from cachetools import cached, LRUCache, TTLCache
  19. from neo.io.blackrockio import BlackrockIO
  20. import aux
  21. from aux import log
  22. from analytics import analytics1
  23. importlib.reload(analytics1)
  24. # @cached(cache={})
  25. def read_data():
  26. '''returns first array1 and then array2 channels'''
  27. data = reader.get_analogsignal_chunk(i_stop=eval(str(params.lfp.i_stop)),
  28. channel_indexes=list(set(params.lfp.array1) | set(params.lfp.array2)))
  29. return data
  30. def get_triggers(n_triggers=10, verbose=True, speller='question'):
  31. '''get triggers from corresponding nev files'''
  32. try:
  33. triggers_ts = [tt[0] for tt in reader.nev_data['Comments'][0]]
  34. triggers_txt = [tt[5].decode('utf-8') for tt in reader.nev_data['Comments'][0]]
  35. except:
  36. log.error('Triggers could not be loaded. Does Nev file exist?')
  37. return None, None
  38. if verbose:
  39. print(reader.nev_data['Comments'])
  40. triggers_yes = []
  41. triggers_no = []
  42. if speller == 'question':
  43. class1 = 'yes'
  44. class2 = 'no'
  45. elif speller == 'feedback':
  46. class1 = 'up'
  47. class2 = 'down'
  48. for ii in range(len(triggers_txt)):
  49. if f'{class1}, response, start' in triggers_txt[ii]:
  50. triggers_yes.append(triggers_ts[ii])
  51. elif f'{class2}, response, start' in triggers_txt[ii]:
  52. triggers_no.append(triggers_ts[ii])
  53. triggers_yes = np.array(triggers_yes)
  54. triggers_no = np.array(triggers_no)
  55. # get timestamps for ns2
  56. triggers_yes_ns2 = triggers_yes // params.lfp.sampling_ratio
  57. triggers_no_ns2 = triggers_no // params.lfp.sampling_ratio
  58. return triggers_no_ns2[:n_triggers], triggers_yes_ns2[:n_triggers]
  59. def get_valid_trials(triggers_no_ns2, triggers_yes_ns2):
  60. trials_msk = np.ones((10, 2), dtype=bool)
  61. for ii, tt in enumerate(triggers_no_ns2):
  62. tmp = data1[tt + psth_win[0]:tt + psth_win[1], :]
  63. if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
  64. trials_msk[ii, 0] = False
  65. for ii, tt in enumerate(triggers_yes_ns2):
  66. tmp = data1[tt + psth_win[0]:tt + psth_win[1], :]
  67. if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
  68. trials_msk[ii, 1] = False
  69. print(ii)
  70. print('\nTrials excluded', np.where(trials_msk[:, 0] == False)[0], np.where(trials_msk[:, 1] == False)[0])
  71. return trials_msk
  72. def calc_cc():
  73. '''calculate time resolved correlation coefficients betweech channels of each array'''
  74. cc_step = 5000
  75. rng = range(0, data1.shape[0], cc_step)
  76. cc1 = np.zeros((len(rng), 3))
  77. cc2 = np.zeros((len(rng), 3))
  78. hist1 = np.zeros((len(rng), 100))
  79. hist2 = np.zeros((len(rng), 100))
  80. for ii, idx in enumerate(rng):
  81. # cc1[ii, 0] = np.min(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
  82. # cc1[ii, 1] = np.max(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
  83. # cc1[ii, 2] = np.mean(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
  84. cc1[ii, 0] = np.min(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
  85. cc1[ii, 1] = np.max(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
  86. cc1[ii, 2] = np.mean(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1))
  87. # hist1[ii, :] = np.histogram(np.triu(np.corrcoef(data0[idx:idx + cc_step, array1].T)).flatten(), 100)[1][:-1]
  88. # cc2[ii, 0] = np.min(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
  89. # cc2[ii, 1] = np.max(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
  90. # cc2[ii, 2] = np.mean(np.abs(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1)))
  91. cc2[ii, 0] = np.min(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
  92. cc2[ii, 1] = np.max(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
  93. cc2[ii, 2] = np.mean(np.triu(np.corrcoef(data02[idx:idx + cc_step, :].T), k=1))
  94. # hist2[ii, :] = np.histogram(np.triu(np.corrcoef(data0[idx:idx + cc_step, array2].T)), 100)[1][:-1]
  95. # print(ii)
  96. if params.lfp.plot.general:
  97. plt.figure()
  98. plt.plot(cc1)
  99. plt.gca().set_prop_cycle(None)
  100. plt.plot(c2, '-.')
  101. plt.show()
  102. return None
  103. def compute_psth(triggers_yes_ns2, triggers_no_ns2, arr_nr=1, sub_band=-1):
  104. '''compute the psth for no and yes questions'''
  105. psth_len = np.diff(psth_win)[0]
  106. if arr_nr == 1:
  107. ch_nr = ch_nr1
  108. if sub_band == -1:
  109. data_tmp = data01 # raw data
  110. else:
  111. data_tmp = data1[:, :, sub_band] # sub_band
  112. elif arr_nr == 2:
  113. ch_nr = ch_nr2
  114. if sub_band == -1:
  115. data_tmp = data02 # raw data
  116. else:
  117. data_tmp = data2[:, :, sub_band] # sub_band
  118. psth_no = np.zeros((len(triggers_no_ns2), psth_len, ch_nr))
  119. psth_yes = np.zeros((len(triggers_yes_ns2), psth_len, ch_nr))
  120. for ii in range(len(triggers_no_ns2)):
  121. if (triggers_no_ns2[ii] + psth_win[0] > 0) & (triggers_no_ns2[ii] + psth_win[1] < len(data2)):
  122. tmp = data_tmp[(triggers_no_ns2[ii] + psth_win[0]):(triggers_no_ns2[ii] + psth_win[1]), :]
  123. # if not (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
  124. psth_no[ii, :, :] = tmp
  125. # else:
  126. # print(f'Excluded no-trial: {ii}')
  127. for ii in range(len(triggers_yes_ns2)):
  128. if (triggers_yes_ns2[ii] + psth_win[0] > 0) & (triggers_yes_ns2[ii] + psth_win[1] < len(data2)):
  129. tmp = data_tmp[(triggers_yes_ns2[ii] + psth_win[0]):(triggers_yes_ns2[ii] + psth_win[1]), :]
  130. # if not (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
  131. psth_yes[ii, :, :] = tmp
  132. # else:
  133. # print(f'Excluded yes-trial: {ii}')
  134. psth_yes, psth_no = remove_trials(psth_yes, psth_no)
  135. return psth_no, psth_yes
  136. def remove_channels(data01, data02):
  137. '''remove channels for which variance lies outside 25-75 percentiles'''
  138. var1 = np.var(data01, axis=0)
  139. var2 = np.var(data02, axis=0)
  140. L1 = np.percentile(var1, 25, axis=0, keepdims=True)
  141. L2 = np.percentile(var2, 25, axis=0, keepdims=True)
  142. U1 = np.percentile(var1, 75, axis=0, keepdims=True)
  143. U2 = np.percentile(var2, 75, axis=0, keepdims=True)
  144. w = 1 # manual parameter that affects the valid range
  145. valid1 = [L1 - w * (U1 - L1), U1 + w * (U1 - L1)]
  146. valid2 = [L2 - w * (U2 - L2), U2 + w * (U2 - L2)]
  147. # plt.plot(var1, 'C0')
  148. # plt.plot(var2, 'C1')
  149. # array1_msk = (np.where(var1 < valid1[0]) and np.where(var1 > valid1[1]))[0]
  150. # array1_msk = (np.where(var1 < valid1[0]) and np.where(var1 > valid1[1]))[0]
  151. array1_msk = np.unique(((var1 < valid1[0]) | (var1 > valid1[1])).nonzero()[0])
  152. array2_msk = np.unique(((var2 < valid2[0]) | (var2 > valid2[1])).nonzero()[0])
  153. array1_msk = np.hstack((array1_msk, params.lfp.array1_exclude)).astype(int)
  154. array2_msk = np.hstack((array2_msk, params.lfp.array2_exclude)).astype(int)
  155. array1_msk.sort()
  156. array2_msk.sort()
  157. print(f'excluded channels, array1: {array1_msk}')
  158. print(f'excluded channels, array2: {array2_msk}')
  159. plot_channels(data01, [], arr_id=1, fs=params.lfp.fs, array_msk=array1_msk, i_stop=1000, step=1, color='C0')
  160. plot_channels(data02, [], arr_id=2, fs=params.lfp.fs, array_msk=array2_msk, i_stop=1000, step=1, color='C1')
  161. data01 = np.delete(data01, array1_msk, axis=1)
  162. data02 = np.delete(data02, array2_msk, axis=1)
  163. return data01, data02, array1_msk, array2_msk
  164. def remove_trials(psth_yes, psth_no):
  165. '''remove trials for which variance lies outside 25-75 percentiles'''
  166. var1 = np.var(psth_yes, axis=1)
  167. var2 = np.var(psth_no, axis=1)
  168. L1 = np.percentile(var1, 25, axis=0, keepdims=True)
  169. L2 = np.percentile(var2, 25, axis=0, keepdims=True)
  170. U1 = np.percentile(var1, 75, axis=0, keepdims=True)
  171. U2 = np.percentile(var2, 75, axis=0, keepdims=True)
  172. w = 10
  173. valid1 = [L1 - w * (U1 - L1), U1 + w * (U1 - L1)]
  174. valid2 = [L2 - w * (U2 - L2), U2 + w * (U2 - L2)]
  175. # array1_msk = np.unique((np.where(var1 < valid1[0])[0] or np.where(var1 > valid1[1])[0]))
  176. # array2_msk = np.unique((np.where(var2 < valid2[0])[0] or np.where(var2 > valid2[1])[0]))
  177. array1_msk = ((var1 < valid1[0]) | (var1 > valid1[1])).nonzero()[0]
  178. array2_msk = ((var2 < valid2[0]) | (var2 > valid2[1])).nonzero()[0]
  179. print(f'excluded yes-trials: {array1_msk}')
  180. print(f'excluded no-trials: {array2_msk}')
  181. psth_yes = np.delete(psth_yes, array1_msk, axis=0)
  182. psth_no = np.delete(psth_no, array2_msk, axis=0)
  183. return psth_yes, psth_no
  184. def plot_channels(data, ch_ids, fs, array_msk=[], arr_id=1, i_start=0, i_stop=10000, step=5, color='C0'):
  185. '''plot all data from array1 or array2 in the time domain'''
  186. if not params.lfp.zscore: # z-score only for visualization
  187. data = stats.zscore(data)
  188. if ch_ids == []:
  189. ch_ids = range(data.shape[1])
  190. if i_stop == -1:
  191. i_stop = data.shape[0]
  192. offset = np.cumsum(4 * np.var(data[:, ch_ids], axis=0)[:, np.newaxis]).T
  193. plt.figure()
  194. plt.plot(np.arange(i_start, i_stop) / fs, data[i_start:i_stop, ch_ids] + offset, color=color, lw=1)
  195. if array_msk.size > 0: # highlight excluded channels
  196. plt.plot(np.arange(i_start, i_stop) / fs, data[i_start:i_stop, array_msk] + offset[array_msk], color='C3', lw=1)
  197. plt.xlabel('Time (sec)')
  198. plt.yticks(offset[::step], range(0, len(ch_ids), step))
  199. plt.ylim(0, offset[-1] + 4)
  200. plt.title(f'raw data from array {arr_id}')
  201. plt.tight_layout()
  202. return None
  203. def plot_spectra(ch_ids=[0]):
  204. '''plot spectra of raw data and sub_bands, single channels from ch_ids and averages'''
  205. mpl.rcParams['axes.formatter.limits'] = (-2, 2)
  206. yy = data01.std() * 2
  207. xx = np.arange(data1.shape[0]) / params.lfp.fs # plot raw and filtered traces for visual inspection
  208. xmax = 150
  209. plt.figure(1, figsize=(10, 10))
  210. plt.clf()
  211. # column 1: first array
  212. plt.subplot(521) # plot raw
  213. plt.plot(ff1, Y01[:, ch_ids[0]], 'C0', label=f'channel: {array1[ch_ids[0]]}')
  214. plt.plot(ff1, Y01.mean(1), 'C3', alpha=0.2, label='average')
  215. plt.xlim(xmin=0, xmax=xmax)
  216. plt.title('array1 raw')
  217. # plt.legend()
  218. plt.gca().set_xticklabels([])
  219. plt.legend()
  220. plt.subplot(523)
  221. plt.plot(ff1, Y1[:, ch_ids[0], 0], 'C0', label='array1')
  222. plt.plot(ff1, Y1[:, :, 0].mean(1), 'C3', alpha=0.2)
  223. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_lb[1] * 2)
  224. plt.xlim(xmin=0, xmax=xmax)
  225. plt.gca().set_xticklabels([])
  226. plt.title('sub_band 0')
  227. plt.subplot(525)
  228. plt.plot(ff1, Y1[:, ch_ids[0], 1], 'C0', label='array1')
  229. plt.plot(ff1, Y1[:, :, 1].mean(1), 'C3', alpha=0.2)
  230. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_mb[1] * 2)
  231. plt.xlim(xmin=0, xmax=xmax)
  232. plt.gca().set_xticklabels([])
  233. plt.title('sub_band 1')
  234. plt.subplot(527)
  235. plt.plot(ff1, Y1[:, ch_ids[0], 2], 'C0', label='array1')
  236. plt.plot(ff1, Y1[:, :, 2].mean(1), 'C3', alpha=0.2)
  237. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_hb[1] * 2)
  238. plt.xlim(xmin=0, xmax=xmax)
  239. plt.title('sub_band 2')
  240. plt.subplot(529)
  241. plt.loglog(ff1, Y01[:, ch_ids[0]], 'C0')
  242. plt.loglog(ff1, Y01.mean(1), 'C3', alpha=0.2)
  243. plt.title('array1 raw loglog')
  244. plt.xlabel('Frequency (Hz)')
  245. # plt.legend()
  246. # column 2: second array
  247. plt.subplot(522)
  248. plt.plot(ff02, Y02[:, ch_ids[0]], 'C0', label=f'channel: {array2[ch_ids[0]]}')
  249. plt.plot(ff02, Y02.mean(1), 'C3', alpha=0.2, label='average')
  250. plt.xlim(xmin=0, xmax=xmax)
  251. # plt.gca().set_yticklabels([])
  252. plt.title('array2 raw')
  253. # xxx
  254. # plt.legend()
  255. plt.gca().set_xticklabels([])
  256. plt.legend()
  257. plt.subplot(524)
  258. plt.plot(ff2, Y2[:, ch_ids[0], 0], 'C0', label='array1')
  259. plt.plot(ff2, Y2[:, :, 0].mean(1), 'C3', alpha=0.2)
  260. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_lb[1] * 2)
  261. plt.xlim(xmin=0, xmax=xmax)
  262. plt.gca().set_xticklabels([])
  263. plt.title('sub_band 0')
  264. plt.subplot(526)
  265. plt.plot(ff2, Y2[:, ch_ids[0], 1], 'C0', label='array1')
  266. plt.plot(ff2, Y2[:, :, 1].mean(1), 'C3', alpha=0.2)
  267. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_mb[1] * 2)
  268. plt.xlim(xmin=0, xmax=xmax)
  269. plt.gca().set_xticklabels([])
  270. # plt.gca().set_yticklabels([])
  271. plt.title('sub_band 1')
  272. plt.subplot(528)
  273. plt.plot(ff2, Y2[:, ch_ids[0], 2], 'C0', label='array1')
  274. plt.plot(ff2, Y2[:, :, 2].mean(1), 'C3', alpha=0.2)
  275. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_hb[1] * 2)
  276. plt.xlim(xmin=0, xmax=xmax)
  277. plt.title('sub_band 2')
  278. # plt.gca().set_yticklabels([])
  279. plt.subplot(5, 2, 10)
  280. plt.loglog(ff01, Y02[:, ch_ids[0]], 'C0')
  281. plt.loglog(ff01, Y02.mean(1), 'C3', alpha=0.2)
  282. # plt.gca().set_yticklabels([])
  283. plt.title('raw loglog')
  284. plt.xlabel('Frequency (Hz)')
  285. plt.tight_layout()
  286. plt.draw()
  287. plt.show()
  288. return None
  289. def plot_spectra2(arr_nr=1):
  290. '''plot spectra of raw data for all channels of arr_nr, compare with spectrum of white noise'''
  291. mpl.rcParams['axes.formatter.limits'] = (-2, 2)
  292. if arr_nr == 1:
  293. YY = Y01
  294. d0,d1 = data01.shape
  295. elif arr_nr == 2:
  296. YY = Y02
  297. d0,d1 = data02.shape
  298. # Y_GWN, _ = analytics1.calc_fft(np.random.randn(d0, d1), params.lfp.fs, i_stop=1e5, axis=0)
  299. plt.figure(1, figsize=(10, 10))
  300. plt.clf()
  301. for ii in range(YY.shape[1]):
  302. ax = plt.subplot(8, 8, ii + 1)
  303. # plt.semilogy(ff, Y[:, ii], 'C0')
  304. plt.semilogy(ff, Y[:, ii, 0], 'C0')
  305. # plt.semilogy(ff1, Y_GWN[:, ii], 'C1', alpha=0.5)
  306. plt.ylim(10e-2, 10e4)
  307. plt.xlim(0, 30)
  308. # if ii<56:
  309. if (ii // 8) < (d1 // 8):
  310. ax.set_xticks([])
  311. if np.mod(ii, 8) != 0:
  312. ax.set_yticks([])
  313. plt.draw()
  314. return None
  315. def plot_psth(ff=[], sub_band=0, ch_id=0, trial_id=0, step=10):
  316. ''' plot psth for no and yes responses separately'''
  317. yy = max(data01.std(), data1[:, ch_id, sub_band].std()) * 2
  318. xx1 = np.arange(data1.shape[0]) / params.lfp.fs
  319. xx2 = np.arange(psth_win[0], psth_win[1]) / params.lfp.fs
  320. f_idx = ff < 150
  321. plt.figure(figsize=(10, 10))
  322. plt.clf()
  323. log.info(f'Selected sub-band: {sub_band}')
  324. plt.subplot(411)
  325. plt.plot(xx1[::step], data01[::step, ch_id], label='raw ns2') # plot raw
  326. plt.plot(xx1[::step], data1[::step, ch_id, sub_band], 'C1', alpha=0.5, label=f'sub-band: {sub_band}') # plot sub_band (low, middle, high)
  327. plt.stem(triggers_no_ns2 / params.lfp.fs, triggers_no_ns2 * 0 + yy * 0.8, markerfmt='C3o', label='no') # red
  328. plt.stem(triggers_yes_ns2 / params.lfp.fs, triggers_yes_ns2 * 0 + yy * 0.8, markerfmt='C2o', label='yes') # green
  329. plt.ylim(-yy, yy)
  330. plt.xlabel('sec')
  331. plt.legend(loc=1)
  332. plt.subplot(423) # plot psth no-questions
  333. plt.plot(xx2, psth_no.mean(0)[:, ch_id], label='trial av. no') # average accross trials, plot 1 channel
  334. plt.plot(xx2, psth_no.mean(0).mean(1), 'C3', alpha=0.4, label='trial-ch av. no') # average accross trials and channels
  335. plt.xlabel('sec')
  336. plt.legend()
  337. plt.subplot(424) # plot psth yes-questions
  338. plt.plot(xx2, psth_yes.mean(0)[:, ch_id], label='trial av. yes') # average accross trials, plot 1 channel
  339. plt.plot(xx2, psth_yes.mean(0).mean(1), 'C3', alpha=0.4, label='trial-ch av. yes') # average accross trials and channels
  340. plt.xlabel('sec')
  341. plt.legend()
  342. plt.subplot(425) # plot spectrogram no-question, single channel and trial
  343. # plt.pcolormesh(tt, ff[f_idx], Sxx1[f_idx, ch_id, :])
  344. if params.lfp.normalize:
  345. plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :] / Sxx1_norm[f_idx, :, ch_id])
  346. else:
  347. plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :])
  348. vmax = max(Sxx1[trial_id, f_idx, ch_id, :].max(), Sxx2[trial_id, f_idx, ch_id, :].max())
  349. plt.clim(vmax=vmax)
  350. plt.xlabel('sec')
  351. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  352. plt.subplot(426) # plot spectrogram yes-question, single channel and trial
  353. if params.lfp.normalize:
  354. plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :] / Sxx2_norm[f_idx, :, ch_id])
  355. else:
  356. plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :])
  357. plt.clim(vmax=vmax)
  358. plt.xlabel('sec')
  359. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  360. plt.subplot(427) # plot spectrogram no-question, averaged
  361. # plt.pcolormesh(tt, ff[f_idx], Sxx1[f_idx, ch_id, :])
  362. if params.lfp.normalize:
  363. plt.pcolormesh(tt, ff[f_idx], Sxx1[:, f_idx, ch_id, :].mean(0) / Sxx1_norm[f_idx, :, ch_id])
  364. else:
  365. plt.pcolormesh(tt, ff[f_idx], Sxx1[:, f_idx, ch_id, :].mean(0))
  366. vmax = max(Sxx1[trial_id, f_idx, ch_id, :].max(), Sxx2[trial_id, f_idx, ch_id, :].max())
  367. plt.clim(vmax=vmax)
  368. plt.xlabel('sec')
  369. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  370. plt.subplot(428) # plot spectrogram yes-question, averaged
  371. if params.lfp.normalize:
  372. plt.pcolormesh(tt, ff[f_idx], Sxx2[:, f_idx, ch_id, :].mean(0) / Sxx2_norm[f_idx, :, ch_id])
  373. else:
  374. plt.pcolormesh(tt, ff[f_idx], Sxx2[:, f_idx, ch_id, :].mean(0))
  375. plt.clim(vmax=vmax)
  376. plt.xlabel('sec')
  377. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  378. plt.tight_layout()
  379. plt.show()
  380. return None
  381. params = aux.load_config(force_reload=True)
  382. # params.lfp.array1 = np.delete(params.lfp.array1, params.lfp.array1_exclude)
  383. # params.lfp.array2 = np.delete(params.lfp.array2, params.lfp.array2_exclude)
  384. psth_win = params.lfp.psth_win
  385. # file_name = '/home/vlachos/data_kiap/20190321-135311/20190321-135311-001'
  386. # path = '/media/kiap/kiap_backup/Recordings/K01/Recordings/20190326-160445/'
  387. # path = '/kiap/data/clinical/neural_new/nsx/'
  388. path = '/media/vlachos/bck_disk1/kiap/recordings/20190705/20190705-143437/'
  389. # file_name = path + '20190326-160445-001.ns2'
  390. file_name = path + '20190705-143437-001.ns2'
  391. array1 = params.lfp.array1
  392. array2 = params.lfp.array2
  393. if not 'data0' in locals(): # load data if not already loaded by previous call
  394. # READ DATA
  395. # ----------------------------------
  396. reader = BlackrockIO(filename=file_name)
  397. data0 = read_data() # (samples, channels)
  398. data0 = data0.copy().astype(np.float64)
  399. # IMPORT TRIGGERS FROM NEV FILE
  400. # ----------------------------------
  401. triggers_no_ns2, triggers_yes_ns2 = get_triggers(n_triggers=20, speller=params.speller.type)
  402. trials_msk = get_valid_trials(triggers_no_ns2, triggers_yes_ns2)
  403. # data1[data1 > params.lfp.artifact_thr] = 0
  404. # data1[data1 < -params.lfp.artifact_thr] = 0
  405. # data0 = data0 - np.mean(data0, 1)[:, np.newaxis]
  406. reader.nev_data['Comments'][0]
  407. # Z-SCORE DATA
  408. # ----------------------------------
  409. if params.lfp.zscore:
  410. data0 = stats.zscore(data0)
  411. # SEPARATE DATA INTO CHANNELS FROM ARRAY-1 AND ARRAY-2
  412. # ----------------------------------
  413. data01 = data0[:, :len(array1)] # array-1 electrodes
  414. data02 = data0[:, len(array1):len(array1) + len(array2)] # array-2 electrodes
  415. # EXCLUDE CHANNELS IF VARIANCE IS OUTSIDE PERCENTILES
  416. # ----------------------------------------------------------------------
  417. if params.lfp.exclude:
  418. data01, data02, array1_msk, array2_msk = remove_channels(data01, data02)
  419. ch_nr1 = len(params.lfp.array1) - len(array1_msk)
  420. ch_nr2 = len(params.lfp.array2) - len(array2_msk)
  421. else:
  422. ch_nr1 = len(params.lfp.array1)
  423. ch_nr2 = len(params.lfp.array2)
  424. # PERFORM COMMON AVERAGE REFERENCING
  425. # ----------------------------------------------------------------------
  426. if params.lfp.car:
  427. data01 = data01 - np.repeat(np.mean(data01, axis=1)[:, np.newaxis], data01.shape[1], 1)
  428. data02 = data02 - np.repeat(np.mean(data02, axis=1)[:, np.newaxis], data02.shape[1], 1)
  429. # DEVIDE DATA INTO 3 SUB-BANDS
  430. # ----------------------------------
  431. data1 = np.zeros((data01.shape[0], data01.shape[1], 3)) # (samples, channels, 3)
  432. data2 = np.zeros((data02.shape[0], data02.shape[1], 3)) # (samples, channels, 3)
  433. # Y1 = np.zeros((data01.shape[0], data01.shape[1], 3))
  434. # FILTER IN THE THREE DIFFERENT BANDS
  435. # ----------------------------------
  436. data1[:, :, 0], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_lb, params.lfp.fs, params.lfp.filter_order_lb, plot=params.lfp.plot.filters)
  437. data1[:, :, 1], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_mb, params.lfp.fs, params.lfp.filter_order_mb, plot=params.lfp.plot.filters)
  438. data1[:, :, 2], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_hb, params.lfp.fs, params.lfp.filter_order_hb, plot=params.lfp.plot.filters)
  439. data2[:, :, 0], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_lb, params.lfp.fs, params.lfp.filter_order_lb, plot=params.lfp.plot.filters)
  440. data2[:, :, 1], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_mb, params.lfp.fs, params.lfp.filter_order_mb, plot=params.lfp.plot.filters)
  441. data2[:, :, 2], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_hb, params.lfp.fs, params.lfp.filter_order_hb, plot=params.lfp.plot.filters)
  442. # PERFORM STFT USING FFT
  443. # ----------------------------------
  444. Y01, ff01 = analytics1.calc_fft(data01, params.lfp.fs, i_stop=1e5, axis=0)
  445. Y02, ff02 = analytics1.calc_fft(data02, params.lfp.fs, i_stop=1e5, axis=0)
  446. Y1, ff1 = analytics1.calc_fft(data1, params.lfp.fs, i_stop=1e5, axis=0)
  447. Y2, ff2 = analytics1.calc_fft(data2, params.lfp.fs, i_stop=1e5, axis=0)
  448. nperseg = 10000
  449. # ff, tt, S = signal.spectrogram(data2[:, :, 0], params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  450. ff, Y = signal.welch(data2, fs=params.lfp.fs, axis=0, nperseg=10000)
  451. # fidx = (ff > 60) & (ff < 140)
  452. fidx = (ff > 0) & (ff < 20)
  453. # tidx = tt < 1000
  454. plt.figure(1)
  455. plt.clf()
  456. # plt.pcolormesh(tt, ff[fidx], np.log10(S[fidx, :, :].mean(1)))
  457. # plt.plot(ff, np.log10(Y[:, :, 2]), 'C0')
  458. # plt.pcolormesh(ff, range(Y.shape[1]), np.log10(Y[:, :, 2].T))
  459. # plt.pcolormesh(ff, tt, np.log10(S[:, 4, :].T))
  460. # plt.colorbar()
  461. # plt.xlim(0,20)
  462. plt.plot(ff, np.log10(Y[:, :, 1]), 'C1')
  463. plt.plot(ff, np.log10(Y[:, :, 2]), 'C2')
  464. # plt.xlim(0, 100)
  465. plt.draw()
  466. plt.show()
  467. # COMPUTE PSTH
  468. # ----------------------------------
  469. # psth shape: (trials, time, channels)
  470. offset = 600 * params.lfp.fs # offset in sec to get data for normalization
  471. triggers_yes_norm = np.hstack((triggers_yes_ns2 + offset, triggers_yes_ns2 + offset + 10))
  472. triggers_no_norm = np.hstack((triggers_yes_ns2 + offset, triggers_yes_ns2 + offset + 10))
  473. if np.any(triggers_yes_norm > data01.shape[1]):
  474. log.error('triggers_norm outside imported data')
  475. psth_no, psth_yes = compute_psth(triggers_yes_ns2, triggers_no_ns2, arr_nr=1, sub_band=params.lfp.sub_band)
  476. psth_norm1, psth_norm2 = compute_psth(triggers_yes_norm, triggers_no_norm, arr_nr=1, sub_band=params.lfp.sub_band)
  477. # COMPUTE PSTH SPECTROGRAMS
  478. # ----------------------------------
  479. nperseg = params.lfp.spectra.spgr_len
  480. nperseg = min(nperseg, psth_no.shape[1])
  481. # ff, tt, Sxx1 = signal.spectrogram(psth_no.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  482. # ff, tt, Sxx2 = signal.spectrogram(psth_yes.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  483. # Sxx1.shape = (trials, ff, channels, time)
  484. # Sxx1_norm.shape = (ff,time, channels)
  485. ff, tt, Sxx1 = signal.spectrogram(psth_no, params.lfp.fs, axis=1, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  486. ff, tt, Sxx2 = signal.spectrogram(psth_yes, params.lfp.fs, axis=1, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  487. ff, tt, Sxx1_norm = signal.spectrogram(psth_norm1.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  488. ff, tt, Sxx2_norm = signal.spectrogram(psth_norm2.mean(0), params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  489. # Sxx1 = Sxx1.mean(1)
  490. # Sxx2 = Sxx2.mean(1)
  491. Sxx1_norm = Sxx1_norm.mean(2)
  492. Sxx2_norm = Sxx2_norm.mean(2)
  493. tt = tt + psth_win[0] / params.lfp.fs
  494. t_idx = tt < 1
  495. Sxx1_norm = Sxx1_norm[:, np.newaxis].repeat(len(tt), 1)
  496. Sxx2_norm = Sxx2_norm[:, np.newaxis].repeat(len(tt), 1)
  497. # if params.lfp.normalize:
  498. # Sxx1 = Sxx1 / Sxx1_norm
  499. # Sxx2 = Sxx2 / Sxx2_norm
  500. # 11. PLOT RESULTS
  501. # ----------------------------------
  502. if params.lfp.plot.general:
  503. # plot_spectra()
  504. plot_psth(ff=ff, sub_band=params.lfp.sub_band, ch_id=0, trial_id=0, step=100)
  505. pass
  506. # xxx
  507. plt.ioff()
  508. # for trial_id in range(4, 5):
  509. # for ch_id in range(ch_nr1 - 1, ch_nr1):
  510. # plot_psth(ff=ff, sub_band=0, ch_id=ch_id, trial_id=0, step=100)
  511. # fig2 = plt.gcf()
  512. # fname = path + f'results/trial_{trial_id}_ch_{ch_id}.png'
  513. # # fig2.savefig(fname)
  514. # print(ch_id)