lfp_analytics.py 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010
  1. '''
  2. description: offline analysis of kiap data
  3. author: Ioannis Vlachos
  4. date: 02.04.2019
  5. Copyright (c) 2019 Ioannis Vlachos.
  6. All rights reserved.'''
  7. import re
  8. import os
  9. import glob
  10. import importlib
  11. import urllib
  12. import time
  13. import numpy as np
  14. import matplotlib.pyplot as plt
  15. import matplotlib as mpl
  16. from scipy import signal
  17. from scipy.fftpack import fftshift
  18. from scipy import stats
  19. from cachetools import cached, LRUCache, TTLCache
  20. from neo.io.blackrockio import BlackrockIO
  21. import aux
  22. from aux import log
  23. from analytics import analytics1
  24. importlib.reload(analytics1)
  25. class lfp_analytics:
  26. def __init__(self, params):
  27. self.params =params
  28. return None
  29. # @cached(cache={})
  30. def read_data(self, file_name):
  31. '''reads first array1 and then array2 channels'''
  32. if hasattr(self, 'data0'):
  33. return None
  34. reader = BlackrockIO(filename=file_name)
  35. data0 = reader.get_analogsignal_chunk(i_start=eval(str(self.params.lfp.i_start)), i_stop=eval(str(self.params.lfp.i_stop)),
  36. channel_indexes=list(set(self.params.lfp.array1) | set(self.params.lfp.array2)))
  37. params = self.params
  38. if self.params.lfp.zscore: # z-score
  39. self.data0 = stats.zscore(self.data0)
  40. array1 = params.lfp.array1
  41. array2 = params.lfp.array2
  42. data01 = data0[:, :len(array1)] # array-1 electrodes
  43. data02 = data0[:, len(array1):len(array1) + len(array2)] # array-2 electrodes
  44. self.data0 = data0
  45. self.data01 = data01
  46. self.data02 = data02
  47. self.reader = reader
  48. self.file_name = file_name
  49. self.base_name = os.path.basename(file_name).split('.')[0]
  50. self.results_path = params.file_handling.results +'lfp/'#+ self.base_name.split('-')[0]
  51. os.makedirs(self.results_path, exist_ok=True)
  52. self.ch_nr1 = len(self.params.lfp.array1)
  53. self.ch_nr2 = len(self.params.lfp.array1)
  54. return None
  55. # @cached(cache={})
  56. def preprocess(self):
  57. if hasattr(self, 'data1'):
  58. print('preprocessed data is already in memory')
  59. return None
  60. data01 = self.data01
  61. data02 = self.data02
  62. params = self.params
  63. if params.lfp.exclude: # EXCLUDE CHANNELS IF VARIANCE IS OUTSIDE PERCENTILES
  64. self.remove_channels()
  65. self.ch_nr1 = len(params.lfp.array1) - len(self.array1_msk)
  66. self.ch_nr2 = len(params.lfp.array2) - len(self.array2_msk)
  67. else:
  68. self.ch_nr1 = len(params.lfp.array1)
  69. self.ch_nr2 = len(params.lfp.array2)
  70. self.array1_msk = []
  71. self.array2_msk = []
  72. if params.lfp.car: # PERFORM COMMON AVERAGE REFERENCING
  73. data01 = data01 - np.repeat(np.mean(data01, axis=1)[:, np.newaxis], data01.shape[1], 1)
  74. data02 = data02 - np.repeat(np.mean(data02, axis=1)[:, np.newaxis], data02.shape[1], 1)
  75. # DEVIDE DATA INTO 3 SUB-BANDS
  76. self.data1 = np.zeros((data01.shape[0], data01.shape[1], 3)) # (samples, channels, 3)
  77. self.data2 = np.zeros((data02.shape[0], data02.shape[1], 3)) # (samples, channels, 3)
  78. # FILTER IN THE THREE DIFFERENT BANDS
  79. self.data1[:, :, 0], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_lb, params.lfp.fs, params.lfp.filter_order_lb, plot=params.lfp.plot.filters)
  80. self.data1[:, :, 1], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_mb, params.lfp.fs, params.lfp.filter_order_mb, plot=params.lfp.plot.filters)
  81. self.data1[:, :, 2], _, _ = analytics1.bw_filter(data01.T, params.lfp.filter_fc_hb, params.lfp.fs, params.lfp.filter_order_hb, plot=params.lfp.plot.filters)
  82. self.data2[:, :, 0], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_lb, params.lfp.fs, params.lfp.filter_order_lb, plot=params.lfp.plot.filters)
  83. self.data2[:, :, 1], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_mb, params.lfp.fs, params.lfp.filter_order_mb, plot=params.lfp.plot.filters)
  84. self.data2[:, :, 2], _, _ = analytics1.bw_filter(data02.T, params.lfp.filter_fc_hb, params.lfp.fs, params.lfp.filter_order_hb, plot=params.lfp.plot.filters)
  85. # PERFORM STFT USING FFT
  86. # ----------------------------------
  87. # Y01, ff01 = analytics1.calc_fft(data01, params.lfp.fs, i_stop=1e5, axis=0)
  88. # Y02, ff02 = analytics1.calc_fft(data02, params.lfp.fs, i_stop=1e5, axis=0)
  89. # Y1, ff1 = analytics1.calc_fft(self.data1, params.lfp.fs, i_stop=1e5, axis=0)
  90. # Y2, ff2 = analytics1.calc_fft(self.data2, params.lfp.fs, i_stop=1e5, axis=0)
  91. self.data01 = data01
  92. self.data02 = data02
  93. # self.Y01 = Y01
  94. # self.Y02 = Y02
  95. # self.Y1 = Y1
  96. # self.Y2 = Y2
  97. # self.ff01 = ff01
  98. # self.ff02 = ff02
  99. # self.ff1 = ff1
  100. # self.ff2 = ff2
  101. return None
  102. def get_triggers(self, n_triggers=10, verbose=True, speller='question'):
  103. '''get triggers from corresponding nev files'''
  104. if speller == 'question':
  105. tname1, tname2 = 'yes', 'no'
  106. elif speller == 'feedback':
  107. tname1, tname2 = 'up', 'down'
  108. try:
  109. triggers_ts = [tt[0] for tt in reader.nev_data['Comments'][0]]
  110. triggers_txt = [tt[5].decode('utf-8') for tt in reader.nev_data['Comments'][0]]
  111. except:
  112. log.error('Triggers could not be loaded. Does Nev file exist?')
  113. return None, None
  114. if verbose:
  115. print(reader.nev_data['Comments'])
  116. triggers_01 = []
  117. triggers_02 = []
  118. for ii in range(len(triggers_txt)):
  119. if f'{tname1}, response, start' in triggers_txt[ii]:
  120. triggers_01.append(triggers_ts[ii])
  121. elif f'{tname2}, response, start' in triggers_txt[ii]:
  122. triggers_02.append(triggers_ts[ii])
  123. triggers_01 = np.array(triggers_01)
  124. triggers_02 = np.array(triggers_02)
  125. # get timestamps for ns2
  126. triggers1 = triggers_yes // params.lfp.sampling_ratio # map nev triggers to ns2
  127. triggers2 = triggers_02 // params.lfp.sampling_ratio
  128. self.triggers1 = triggers1[:n_triggers]
  129. self.triggers2 = triggers2[:n_triggers]
  130. return None
  131. def get_triggers_mapping(self, verbose=True):
  132. '''get triggers from corresponding nev files'''
  133. states = self.params.lfp.motor_mapping
  134. try:
  135. triggers_ts = [tt[0] for tt in self.reader.nev_data['Comments'][0]]
  136. triggers_txt = [tt[5].decode('utf-8') for tt in self.reader.nev_data['Comments'][0]]
  137. except:
  138. log.error('Triggers could not be loaded. Does Nev file exist?')
  139. return None, None
  140. if verbose:
  141. print(self.reader.nev_data['Comments'])
  142. triggers = [[] for x in range(len(states))]
  143. for ii in range(len(triggers_txt)):
  144. if triggers_ts[ii] // self.params.lfp.sampling_ratio <= len(self.data02):
  145. if 'Zunge, response, start' in triggers_txt[ii]:
  146. triggers[0].append(triggers_ts[ii])
  147. print(ii, triggers_ts[ii])
  148. elif 'Schliesse_Hand, response, start' in triggers_txt[ii]:
  149. triggers[1].append(triggers_ts[ii])
  150. elif 'Oeffne_Hand, response, start' in triggers_txt[ii]:
  151. triggers[2].append(triggers_ts[ii])
  152. elif 'Bewege_Augen, response, start' in triggers_txt[ii]:
  153. triggers[3].append(triggers_ts[ii])
  154. elif 'Bewege_Kopf, response, start' in triggers_txt[ii]:
  155. triggers[4].append(triggers_ts[ii])
  156. for ii in range(5):
  157. triggers[ii] = np.array(triggers[ii]) // self.params.lfp.sampling_ratio # map nev triggers to ns2
  158. self.triggers_mapping = triggers
  159. return None
  160. def get_valid_trials(triggers_no_ns2, triggers_yes_ns2):
  161. trials_msk = np.ones((10, 2), dtype=bool)
  162. for ii, tt in enumerate(triggers_no_ns2):
  163. tmp = data1[tt + psth_win[0]:tt + psth_win[1], :]
  164. if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
  165. trials_msk[ii, 0] = False
  166. for ii, tt in enumerate(triggers_yes_ns2):
  167. tmp = data1[tt + psth_win[0]:tt + psth_win[1], :]
  168. if (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
  169. trials_msk[ii, 1] = False
  170. print(ii)
  171. print('\nTrials excluded', np.where(trials_msk[:, 0] == False)[0], np.where(trials_msk[:, 1] == False)[0])
  172. return trials_msk
  173. def calc_cc_time(self, save_fig=False):
  174. '''calculate time resolved correlation coefficients betweech channels of each array'''
  175. cc_step = 1000
  176. rng = range(0, self.data0.shape[0], cc_step)
  177. cc1 = np.zeros((len(rng), 3))
  178. cc2 = np.zeros((len(rng), 3))
  179. # hist1 = np.zeros((len(rng), 100))
  180. # hist2 = np.zeros((len(rng), 100))
  181. cc01 = np.tril(np.corrcoef(self.data01.T), k=-1)
  182. cc02 = np.tril(np.corrcoef(self.data02.T), k=-1)
  183. for ii, idx in enumerate(rng):
  184. # cc1[ii, 0] = np.min(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
  185. # cc1[ii, 1] = np.max(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
  186. # cc1[ii, 2] = np.mean(np.abs(np.triu(np.corrcoef(data01[idx:idx + cc_step, :].T), k=1)))
  187. cc1[ii, 0] = np.min(np.tril(np.corrcoef(self.data01[idx:idx + cc_step, :].T), k=-1))
  188. cc1[ii, 1] = np.max(np.tril(np.corrcoef(self.data01[idx:idx + cc_step, :].T), k=-1))
  189. cc1[ii, 2] = np.mean(np.tril(np.corrcoef(self.data01[idx:idx + cc_step, :].T), k=-1))
  190. # hist1[ii, :] = np.histogram(np.tril(np.corrcoef(data0[idx:idx + cc_step, array1].T)).flatten(), 100)[1][:-1]
  191. # cc2[ii, 0] = np.min(np.abs(np.tril(np.corrcoef(data02[idx:idx + cc_step, :].T), k=-1)))
  192. # cc2[ii, 1] = np.max(np.abs(np.tril(np.corrcoef(data02[idx:idx + cc_step, :].T), k=-1)))
  193. # cc2[ii, 2] = np.mean(np.abs(np.tril(np.corrcoef(data02[idx:idx + cc_step, :].T), k=-1)))
  194. cc2[ii, 0] = np.min(np.tril(np.corrcoef(self.data02[idx:idx + cc_step, :].T), k=-1))
  195. cc2[ii, 1] = np.max(np.tril(np.corrcoef(self.data02[idx:idx + cc_step, :].T), k=-1))
  196. cc2[ii, 2] = np.mean(np.tril(np.corrcoef(self.data02[idx:idx + cc_step, :].T), k=-1))
  197. # hist2[ii, :] = np.histogram(np.tril(np.corrcoef(data0[idx:idx + cc_step, array2].T)), 100)[1][:-1]
  198. # print(ii)
  199. fh = plt.figure(figsize=(7, 10))
  200. plt.clf()
  201. plt.subplot(3, 2, 1)
  202. plt.pcolormesh(cc01)
  203. plt.clim(-1, 1)
  204. plt.title('cc, arr 1')
  205. plt.subplot(3, 2, 2)
  206. plt.pcolormesh(cc02)
  207. plt.clim(-1, 1)
  208. plt.title('cc, arr 2')
  209. plt.subplot(3, 2, 3)
  210. plt.hist(cc01.flatten(), color='C0', label=f'{cc01.flatten().mean():.2f}')
  211. plt.legend(loc=1)
  212. plt.subplot(3, 2, 4)
  213. plt.hist(cc02.flatten(), color='C1', label=f'{cc02.flatten().mean():.2f}')
  214. plt.legend(loc=1)
  215. ax = plt.subplot(6, 1, 5)
  216. plt.plot(cc1, 'C0')
  217. plt.ylim(-1, 1)
  218. plt.xticks([])
  219. plt.subplot(6, 1, 6)
  220. plt.plot(cc2, 'C1')
  221. plt.ylim(-1, 1)
  222. # plt.show()
  223. if save_fig:
  224. fig_name = f'{self.results_path}{self.base_name.split("-")[1]}_cc.png'
  225. plt.savefig(fig_name)
  226. print(f'saving figure {fh.number} in {fig_name}\n')
  227. return None
  228. def compute_psth(arr_nr=1, sub_band=-1):
  229. '''compute the psth for no and yes questions'''
  230. triggers1 = self.triggers1
  231. triggers2 = self.triggers2
  232. psth_len = np.diff(psth_win)[0]
  233. if arr_nr == 1:
  234. ch_nr = ch_nr1
  235. if sub_band == -1:
  236. data_tmp = data01 # raw data
  237. else:
  238. data_tmp = data1[:, :, sub_band] # sub_band
  239. elif arr_nr == 2:
  240. ch_nr = ch_nr2
  241. if sub_band == -1:
  242. data_tmp = data02 # raw data
  243. else:
  244. data_tmp = data2[:, :, sub_band] # sub_band
  245. psth_no = np.zeros((len(triggers2), psth_len, ch_nr))
  246. psth_yes = np.zeros((len(triggers1), psth_len, ch_nr))
  247. for ii in range(len(triggers2)):
  248. if (triggers2[ii] + psth_win[0] > 0) & (triggers2[ii] + psth_win[1] < len(data2)):
  249. tmp = data_tmp[(triggers2[ii] + psth_win[0]):(triggers2[ii] + psth_win[1]), :]
  250. # if not (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
  251. psth_no[ii, :, :] = tmp
  252. # else:
  253. # print(f'Excluded no-trial: {ii}')
  254. for ii in range(len(triggers1)):
  255. if (triggers1[ii] + psth_win[0] > 0) & (triggers1[ii] + psth_win[1] < len(data2)):
  256. tmp = data_tmp[(triggers1[ii] + psth_win[0]):(triggers1[ii] + psth_win[1]), :]
  257. # if not (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
  258. psth_yes[ii, :, :] = tmp
  259. # else:
  260. # print(f'Excluded yes-trial: {ii}')
  261. psth_yes, psth_no = remove_trials(psth_yes, psth_no)
  262. return psth_no, psth_yes
  263. def compute_psth_mapping(self, arr_nr=1, ch_nr=np.arange(32), sub_band=-1):
  264. '''compute the psth for no and yes questions and up and down feedback'''
  265. triggers = self.triggers_mapping
  266. states_nr = len(triggers)
  267. psth_win = self.params.lfp.psth_win
  268. psth_len = np.diff(psth_win)[0]
  269. if arr_nr == 1:
  270. ch_nr = self.ch_nr1
  271. if sub_band == -1:
  272. data_tmp = self.data01 # raw data
  273. else:
  274. data_tmp = self.data1[:, :, sub_band] # sub_band
  275. elif arr_nr == 2:
  276. ch_nr = self.ch_nr2
  277. if sub_band == -1:
  278. data_tmp = self.data02 # raw data
  279. else:
  280. data_tmp = self.data2[:, :, sub_band] # sub_band
  281. psth = []
  282. for jj in range(states_nr):
  283. psth.append(np.zeros((len(triggers[jj]), psth_len, ch_nr)))
  284. for jj in range(states_nr):
  285. for ii in range(len(triggers[jj])):
  286. if (triggers[jj][ii] + psth_win[0] > 0) & (triggers[jj][ii] + psth_win[1] < len(self.data02)):
  287. tmp = data_tmp[(triggers[jj][ii] + psth_win[0]):(triggers[jj][ii] + psth_win[1]), :]
  288. # if not (np.any(tmp > params.lfp.artifact_thr) or np.any(tmp < -params.lfp.artifact_thr)):
  289. psth[jj][ii, :, :] = tmp
  290. # else:
  291. # print(f'Excluded no-trial: {ii}')
  292. # psth_yes, psth_no = remove_trials(psth_yes, psth_no)
  293. if arr_nr == 1:
  294. self.psth_mapping1 = psth
  295. elif arr_nr == 2:
  296. self.psth_mapping2 = psth
  297. return None
  298. def remove_channels(self):
  299. '''remove channels for which variance lies outside 25-75 percentiles'''
  300. var1 = np.var(self.data01, axis=0)
  301. var2 = np.var(self.data02, axis=0)
  302. L1 = np.percentile(var1, 25, axis=0, keepdims=True)
  303. L2 = np.percentile(var2, 25, axis=0, keepdims=True)
  304. U1 = np.percentile(var1, 75, axis=0, keepdims=True)
  305. U2 = np.percentile(var2, 75, axis=0, keepdims=True)
  306. w = 3 # manual parameter that affects the valid range
  307. valid1 = [L1 - w * (U1 - L1), U1 + w * (U1 - L1)]
  308. valid2 = [L2 - w * (U2 - L2), U2 + w * (U2 - L2)]
  309. # plt.plot(var1, 'C0')
  310. # plt.plot(var2, 'C1')
  311. # array1_msk = (np.where(var1 < valid1[0]) and np.where(var1 > valid1[1]))[0]
  312. # array1_msk = (np.where(var1 < valid1[0]) and np.where(var1 > valid1[1]))[0]
  313. array1_msk = np.unique(((var1 < valid1[0]) | (var1 > valid1[1])).nonzero()[0])
  314. array2_msk = np.unique(((var2 < valid2[0]) | (var2 > valid2[1])).nonzero()[0])
  315. array1_msk = np.hstack((array1_msk, self.params.lfp.array1_exclude)).astype(int)
  316. array2_msk = np.hstack((array2_msk, self.params.lfp.array2_exclude)).astype(int)
  317. array1_msk.sort()
  318. array2_msk.sort()
  319. print(f'excluded channels, array1: {array1_msk}')
  320. print(f'excluded channels, array2: {array2_msk}')
  321. # plot_channels(self.data01, [], arr_id=1, fs=self.params.lfp.fs, array_msk=array1_msk, i_stop=1000, step=1, color='C0')
  322. # plot_channels(self.data02, [], arr_id=2, fs=self.params.lfp.fs, array_msk=array2_msk, i_stop=1000, step=1, color='C1')
  323. self.data01 = np.delete(self.data01, array1_msk, axis=1)
  324. self.data02 = np.delete(self.data02, array2_msk, axis=1)
  325. self.array1_msk = array1_msk
  326. self.array2_msk = array2_msk
  327. self.ch_nr1 = len(self.params.lfp.array1) - len(array1_msk)
  328. self.ch_nr2 = len(self.params.lfp.array2) - len(array2_msk)
  329. return None
  330. def remove_trials(psth_yes, psth_no):
  331. '''remove trials for which variance lies outside 25-75 percentiles'''
  332. var1 = np.var(psth_yes, axis=1)
  333. var2 = np.var(psth_no, axis=1)
  334. L1 = np.percentile(var1, 25, axis=0, keepdims=True)
  335. L2 = np.percentile(var2, 25, axis=0, keepdims=True)
  336. U1 = np.percentile(var1, 75, axis=0, keepdims=True)
  337. U2 = np.percentile(var2, 75, axis=0, keepdims=True)
  338. w = 10
  339. valid1 = [L1 - w * (U1 - L1), U1 + w * (U1 - L1)]
  340. valid2 = [L2 - w * (U2 - L2), U2 + w * (U2 - L2)]
  341. # array1_msk = np.unique((np.where(var1 < valid1[0])[0] or np.where(var1 > valid1[1])[0]))
  342. # array2_msk = np.unique((np.where(var2 < valid2[0])[0] or np.where(var2 > valid2[1])[0]))
  343. array1_msk = ((var1 < valid1[0]) | (var1 > valid1[1])).nonzero()[0]
  344. array2_msk = ((var2 < valid2[0]) | (var2 > valid2[1])).nonzero()[0]
  345. print(f'excluded yes-trials: {array1_msk}')
  346. print(f'excluded no-trials: {array2_msk}')
  347. psth_yes = np.delete(psth_yes, array1_msk, axis=0)
  348. psth_no = np.delete(psth_no, array2_msk, axis=0)
  349. return psth_yes, psth_no
  350. def plot_channels(self, ch_ids, arr_id=1, i_start=0, i_stop=10000, step=5, color='C0', save_fig=False):
  351. '''plot all data from array1 or array2 in the time domain'''
  352. i_start = int(i_start)
  353. i_stop = int(i_stop)
  354. fs = self.params.lfp.fs
  355. if arr_id == 1:
  356. data = self.data01
  357. array_msk = self.array1_msk
  358. elif arr_id == 2:
  359. data = self.data02
  360. array_msk = self.array2_msk
  361. if not self.params.lfp.zscore: # z-score only for visualization
  362. data = stats.zscore(data)
  363. if ch_ids == []:
  364. ch_ids = range(data.shape[1])
  365. if i_stop == -1:
  366. i_stop = data.shape[0]
  367. offset = np.cumsum(4 * np.var(data[:, ch_ids], axis=0)[:, np.newaxis]).T
  368. fh = plt.figure(figsize=(19, 10))
  369. plt.plot(np.arange(i_start, i_stop) / fs, data[i_start:i_stop, ch_ids] + offset, color=color, lw=1, alpha=0.8)
  370. if array_msk != []: # highlight excluded channels
  371. plt.plot(np.arange(i_start, i_stop) / fs, data[i_start:i_stop, array_msk] + offset[array_msk], color='C3', lw=1)
  372. plt.xlabel('Time (sec)')
  373. plt.yticks(offset[::step], range(0, len(ch_ids), step))
  374. plt.ylim(0, offset[-1] + 4)
  375. plt.title(f'raw data from array {arr_id}, n_ch={data.shape[1]}')
  376. plt.tight_layout()
  377. if save_fig:
  378. fig_name = f'{self.results_path}{self.base_name.split("-")[1]}_channels_time.png'
  379. plt.savefig(fig_name)
  380. print(f'saving figure {fh.number} in {fig_name}\n')
  381. return None
  382. def plot_spectra(ch_ids=[0]):
  383. '''plot spectra of raw data and sub_bands, single channels from ch_ids and averages'''
  384. mpl.rcParams['axes.formatter.limits'] = (-2, 2)
  385. yy = data01.std() * 2
  386. xx = np.arange(data1.shape[0]) / params.lfp.fs # plot raw and filtered traces for visual inspection
  387. xmax = 150
  388. plt.figure(1, figsize=(10, 10))
  389. plt.clf()
  390. # column 1: first array
  391. plt.subplot(521) # plot raw
  392. plt.plot(ff1, Y01[:, ch_ids[0]], 'C0', label=f'channel: {array1[ch_ids[0]]}')
  393. plt.plot(ff1, Y01.mean(1), 'C3', alpha=0.2, label='average')
  394. plt.xlim(xmin=0, xmax=xmax)
  395. plt.title('array1 raw')
  396. # plt.legend()
  397. plt.gca().set_xticklabels([])
  398. plt.legend()
  399. plt.subplot(523)
  400. plt.plot(ff1, Y1[:, ch_ids[0], 0], 'C0', label='array1')
  401. plt.plot(ff1, Y1[:, :, 0].mean(1), 'C3', alpha=0.2)
  402. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_lb[1] * 2)
  403. plt.xlim(xmin=0, xmax=xmax)
  404. plt.gca().set_xticklabels([])
  405. plt.title('sub_band 0')
  406. plt.subplot(525)
  407. plt.plot(ff1, Y1[:, ch_ids[0], 1], 'C0', label='array1')
  408. plt.plot(ff1, Y1[:, :, 1].mean(1), 'C3', alpha=0.2)
  409. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_mb[1] * 2)
  410. plt.xlim(xmin=0, xmax=xmax)
  411. plt.gca().set_xticklabels([])
  412. plt.title('sub_band 1')
  413. plt.subplot(527)
  414. plt.plot(ff1, Y1[:, ch_ids[0], 2], 'C0', label='array1')
  415. plt.plot(ff1, Y1[:, :, 2].mean(1), 'C3', alpha=0.2)
  416. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_hb[1] * 2)
  417. plt.xlim(xmin=0, xmax=xmax)
  418. plt.title('sub_band 2')
  419. plt.subplot(529)
  420. plt.loglog(ff1, Y01[:, ch_ids[0]], 'C0')
  421. plt.loglog(ff1, Y01.mean(1), 'C3', alpha=0.2)
  422. plt.title('array1 raw loglog')
  423. plt.xlabel('Frequency (Hz)')
  424. # plt.legend()
  425. # column 2: second array
  426. plt.subplot(522)
  427. plt.plot(ff02, Y02[:, ch_ids[0]], 'C0', label=f'channel: {array2[ch_ids[0]]}')
  428. plt.plot(ff02, Y02.mean(1), 'C3', alpha=0.2, label='average')
  429. plt.xlim(xmin=0, xmax=xmax)
  430. # plt.gca().set_yticklabels([])
  431. plt.title('array2 raw')
  432. # xxx
  433. # plt.legend()
  434. plt.gca().set_xticklabels([])
  435. plt.legend()
  436. plt.subplot(524)
  437. plt.plot(ff2, Y2[:, ch_ids[0], 0], 'C0', label='array1')
  438. plt.plot(ff2, Y2[:, :, 0].mean(1), 'C3', alpha=0.2)
  439. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_lb[1] * 2)
  440. plt.xlim(xmin=0, xmax=xmax)
  441. plt.gca().set_xticklabels([])
  442. plt.title('sub_band 0')
  443. plt.subplot(526)
  444. plt.plot(ff2, Y2[:, ch_ids[0], 1], 'C0', label='array1')
  445. plt.plot(ff2, Y2[:, :, 1].mean(1), 'C3', alpha=0.2)
  446. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_mb[1] * 2)
  447. plt.xlim(xmin=0, xmax=xmax)
  448. plt.gca().set_xticklabels([])
  449. # plt.gca().set_yticklabels([])
  450. plt.title('sub_band 1')
  451. plt.subplot(528)
  452. plt.plot(ff2, Y2[:, ch_ids[0], 2], 'C0', label='array1')
  453. plt.plot(ff2, Y2[:, :, 2].mean(1), 'C3', alpha=0.2)
  454. # plt.xlim(xmin=0, xmax=params.lfp.filter_fc_hb[1] * 2)
  455. plt.xlim(xmin=0, xmax=xmax)
  456. plt.title('sub_band 2')
  457. # plt.gca().set_yticklabels([])
  458. plt.subplot(5, 2, 10)
  459. plt.loglog(ff01, Y02[:, ch_ids[0]], 'C0')
  460. plt.loglog(ff01, Y02.mean(1), 'C3', alpha=0.2)
  461. # plt.gca().set_yticklabels([])
  462. plt.title('raw loglog')
  463. plt.xlabel('Frequency (Hz)')
  464. plt.tight_layout()
  465. plt.draw()
  466. plt.show()
  467. return None
  468. def calc_spectra(self, arr_nr=1, axis=0, nperseg=10000, force=False, detrend='constant', onesided=True):
  469. if arr_nr == 1 and (not hasattr(self, 'Y1') or force):
  470. ff01, Y01 = signal.welch(self.data01, fs=self.params.lfp.fs, axis=0, nperseg=nperseg, detrend=detrend, return_onesided=onesided, scaling='spectrum')
  471. Y01 = fftshift(Y01, axes=axis)
  472. ff01 = fftshift(ff01)
  473. ff, Y = signal.welch(self.data1, fs=self.params.lfp.fs, axis=0, nperseg=nperseg, detrend=detrend, return_onesided=onesided, scaling='spectrum')
  474. Y = fftshift(Y, axes=axis)
  475. ff = fftshift(ff)
  476. self.Y01 = Y01
  477. self.ff01 = ff01
  478. self.Y1 = Y
  479. self.ff1 = ff
  480. elif arr_nr == 2 and (not hasattr(self, 'Y2') or force):
  481. ff02, Y02 = signal.welch(self.data02, fs=self.params.lfp.fs, axis=0, nperseg=nperseg, detrend=detrend, return_onesided=onesided, scaling='spectrum')
  482. Y02 = fftshift(Y02, axes=axis)
  483. ff02 = fftshift(ff02)
  484. ff, Y = signal.welch(self.data2, fs=self.params.lfp.fs, axis=0, nperseg=nperseg, detrend=detrend, return_onesided=onesided, scaling='spectrum')
  485. Y = fftshift(Y, axes=axis)
  486. ff = fftshift(ff)
  487. self.Y02 = Y02
  488. self.ff02 = ff02
  489. self.Y2 = Y
  490. self.ff2 = ff
  491. return None
  492. def plot_spectra_original(self, arr_nr=1, ch_ids=[]):
  493. '''plot spectra of raw data for all channels of arr_nr, compare with spectrum of white noise'''
  494. mpl.rcParams['axes.formatter.limits'] = (-2, 2)
  495. if arr_nr == 1:
  496. Y = self.Y01
  497. ff = self.ff01
  498. d0, d1 = self.data01.shape
  499. elif arr_nr == 2:
  500. Y = self.Y02
  501. ff = self.ff02
  502. d0, d1 = self.data02.shape
  503. # Y_GWN, _ = analytics1.calc_fft(np.random.randn(d0, d1), params.lfp.fs, i_stop=1e5, axis=0)
  504. plt.figure(figsize=(10, 10))
  505. plt.clf()
  506. if ch_ids == []:
  507. ch_ids = np.arange(Y.shape[1])
  508. p1 = int(np.round(np.sqrt(len(ch_ids))))
  509. for ii in range(Y.shape[1]):
  510. ax = plt.subplot(p1, p2, ii + 1)
  511. # plt.semilogy(ff, Y[:, ii], 'C0')
  512. plt.semilogy(ff, Y[:, ii], 'C0')
  513. # plt.semilogy(ff1, Y_GWN[:, ii], 'C1', alpha=0.5)
  514. # plt.ylim(10e-2, 10e4)
  515. # plt.xlim(0, 30)
  516. # if ii<56:
  517. if (ii // 8) < (d1 // 8):
  518. ax.set_xticks([])
  519. if np.mod(ii, 8) != 0:
  520. ax.set_yticks([])
  521. plt.draw()
  522. return None
  523. def plot_spectra_bands(self, arr_nr=1, band_id=0, ch_ids=[], log=False, erase=True, xmin=0, xmax=30, ymin=0, ymax=10, save_fig=False):
  524. '''plot spectra of raw data for all channels of arr_nr, compare with spectrum of white noise'''
  525. mpl.rcParams['axes.formatter.limits'] = (-2, 2)
  526. if arr_nr == 1:
  527. Y = self.Y1
  528. ff = self.ff1
  529. d0, d1 = self.data01.shape
  530. elif arr_nr == 2:
  531. Y = self.Y2
  532. ff = self.ff2
  533. d0, d1 = self.data02.shape
  534. # Y_GWN, _ = analytics1.calc_fft(np.random.randn(d0, d1), params.lfp.fs, i_stop=1e5, axis=0)
  535. if erase:
  536. fh = plt.figure(figsize=(19, 10))
  537. plt.clf()
  538. if ch_ids == []:
  539. ch_ids = np.arange(Y.shape[1])
  540. p1 = int(np.round(np.sqrt(len(ch_ids))))
  541. fidx = (ff > xmin) & (ff < xmax)
  542. for ii, ch_id in enumerate(ch_ids):
  543. ax = plt.subplot(p1, p1 + 1, ii + 1)
  544. # plt.semilogy(ff, Y[:, ch_id], 'C0')
  545. if log:
  546. plt.semilogy(ff[fidx], Y[fidx, ch_id, band_id], lw=1, label=f'{ch_id}')
  547. if ii == 0:
  548. plt.semilogy(ff[fidx], Y[fidx, :, band_id].mean(axis=1), 'k', alpha=0.7, lw=1, label=f'{ch_id}')
  549. else:
  550. plt.plot(ff[fidx], Y[fidx, ch_id, band_id], lw=1, label=f'{ch_id}')
  551. if ii == 0:
  552. plt.plot(ff[fidx], Y[fidx, :, band_id].mean(axis=1), 'k', alpha=0.7, lw=1, label=f'{ch_id}')
  553. # plt.semilogy(ff1, Y_GWN[:, ch_id], 'C1', alpha=0.5)
  554. plt.xlim(xmin, xmax)
  555. plt.ylim(ymin, ymax)
  556. # if ch_id<56:
  557. # if (ch_id // p1) < (d1 // p1):
  558. if ch_id < Y.shape[1] - 1:
  559. ax.set_xticks([])
  560. else:
  561. plt.xlabel('Hz')
  562. # if np.mod(ch_id, p1) != 0:
  563. if ch_id > 0:
  564. ax.set_yticks([])
  565. plt.legend(loc=1, prop={'size': 6})
  566. plt.tight_layout()
  567. if save_fig:
  568. fig_name = f'{self.results_path}{self.base_name.split("-")[1]}_spectra.png'
  569. plt.savefig(fig_name)
  570. print(f'saving figure {fh.number} in {fig_name}\n')
  571. plt.draw()
  572. return None
  573. def calc_stft(self, arr_nr=1, nperseg=10000):
  574. if arr_nr == 1:
  575. data = self.data01
  576. elif arr_nr == 2:
  577. data = self.data02
  578. ff, tt, S = signal.spectrogram(data[:, :], self.params.lfp.fs, axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  579. self.ff_stft = ff
  580. self.tt_stft = tt
  581. if arr_nr == 1:
  582. self.S1 = S
  583. elif arr_nr == 2:
  584. self.S2 = S
  585. return None
  586. def calc_stft_psth(self, arr_nr=1, nperseg=1000):
  587. if arr_nr == 1:
  588. psth = self.psth_mapping1
  589. _,_, Smean = signal.spectrogram(self.data01[70000:250000:, ], self.params.lfp.fs,
  590. axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  591. elif arr_nr == 2:
  592. psth = self.psth_mapping2
  593. _,_, Smean = signal.spectrogram(self.data02[70000:250000:, ], self.params.lfp.fs,
  594. axis=0, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  595. Smean = Smean.mean(axis=2) # mean stft for normalization
  596. S_tot = []
  597. for cond in range(len(psth)):
  598. ff, tt, S = signal.spectrogram(psth[cond][:, :], self.params.lfp.fs, axis=1, nperseg=nperseg, noverlap=int(nperseg * 0.9))
  599. self.ff_stft_mapping = ff
  600. self.tt_stft_mapping = tt
  601. S_tot.append(S)
  602. Smean = np.repeat(Smean[:, :, np.newaxis], tt.size, axis=2) # adjust dims to psth_mapping array
  603. if arr_nr == 1:
  604. self.psth_S1_mapping = S_tot
  605. self.psth_S1_mean = Smean
  606. elif arr_nr == 2:
  607. self.psth_S2_mapping = S_tot
  608. self.psth_S2_mean = Smean
  609. return None
  610. def plot_stft(self, arr_nr=1, fmin=0, fmax=200, save_fig=True):
  611. if arr_nr == 1:
  612. S = self.S1
  613. elif arr_nr == 2:
  614. S = self.S2
  615. fh = plt.figure()
  616. fidx = (self.ff_stft > fmin) & (self.ff_stft < fmax)
  617. plt.pcolormesh(self.tt_stft, self.ff_stft[fidx], np.log10(S[fidx, :, :].mean(axis=1)))
  618. plt.xlabel('Time (sec)')
  619. plt.ylabel('Hz')
  620. plt.title(f'stft, arr: {arr_nr}, average {S.shape[1]} channels')
  621. if save_fig:
  622. fig_name = f'{self.results_path}{self.base_name.split("-")[1]}_arr_{arr_nr}_stft.png'
  623. plt.savefig(fig_name)
  624. print(f'saving figure {fh.number} in {fig_name}\n')
  625. return None
  626. def plot_psth(ff=[], sub_band=0, ch_id=0, trial_id=0, step=10):
  627. ''' plot psth for no and yes responses separately'''
  628. yy = max(data01.std(), data1[:, ch_id, sub_band].std()) * 2
  629. xx1 = np.arange(data1.shape[0]) / params.lfp.fs
  630. xx2 = np.arange(psth_win[0], psth_win[1]) / params.lfp.fs
  631. f_idx = ff < 150
  632. plt.figure(figsize=(10, 10))
  633. plt.clf()
  634. log.info(f'Selected sub-band: {sub_band}')
  635. plt.subplot(411)
  636. plt.plot(xx1[::step], data01[::step, ch_id], label='raw ns2') # plot raw
  637. plt.plot(xx1[::step], data1[::step, ch_id, sub_band], 'C1', alpha=0.5, label=f'sub-band: {sub_band}') # plot sub_band (low, middle, high)
  638. plt.stem(triggers_no_ns2 / params.lfp.fs, triggers_no_ns2 * 0 + yy * 0.8, markerfmt='C3o', label='no') # red
  639. plt.stem(triggers_yes_ns2 / params.lfp.fs, triggers_yes_ns2 * 0 + yy * 0.8, markerfmt='C2o', label='yes') # green
  640. plt.ylim(-yy, yy)
  641. plt.xlabel('sec')
  642. plt.legend(loc=1)
  643. plt.subplot(423) # plot psth no-questions
  644. plt.plot(xx2, psth_no.mean(0)[:, ch_id], label='trial av. no') # average accross trials, plot 1 channel
  645. plt.plot(xx2, psth_no.mean(0).mean(1), 'C3', alpha=0.4, label='trial-ch av. no') # average accross trials and channels
  646. plt.xlabel('sec')
  647. plt.legend()
  648. plt.subplot(424) # plot psth yes-questions
  649. plt.plot(xx2, psth_yes.mean(0)[:, ch_id], label='trial av. yes') # average accross trials, plot 1 channel
  650. plt.plot(xx2, psth_yes.mean(0).mean(1), 'C3', alpha=0.4, label='trial-ch av. yes') # average accross trials and channels
  651. plt.xlabel('sec')
  652. plt.legend()
  653. plt.subplot(425) # plot spectrogram no-question, single channel and trial
  654. # plt.pcolormesh(tt, ff[f_idx], Sxx1[f_idx, ch_id, :])
  655. if params.lfp.normalize:
  656. plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :] / Sxx1_norm[f_idx, :, ch_id])
  657. else:
  658. plt.pcolormesh(tt, ff[f_idx], Sxx1[trial_id, f_idx, ch_id, :])
  659. vmax = max(Sxx1[trial_id, f_idx, ch_id, :].max(), Sxx2[trial_id, f_idx, ch_id, :].max())
  660. plt.clim(vmax=vmax)
  661. plt.xlabel('sec')
  662. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  663. plt.subplot(426) # plot spectrogram yes-question, single channel and trial
  664. if params.lfp.normalize:
  665. plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :] / Sxx2_norm[f_idx, :, ch_id])
  666. else:
  667. plt.pcolormesh(tt, ff[f_idx], Sxx2[trial_id, f_idx, ch_id, :])
  668. plt.clim(vmax=vmax)
  669. plt.xlabel('sec')
  670. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  671. plt.subplot(427) # plot spectrogram no-question, averaged
  672. # plt.pcolormesh(tt, ff[f_idx], Sxx1[f_idx, ch_id, :])
  673. if params.lfp.normalize:
  674. plt.pcolormesh(tt, ff[f_idx], Sxx1[:, f_idx, ch_id, :].mean(0) / Sxx1_norm[f_idx, :, ch_id])
  675. else:
  676. plt.pcolormesh(tt, ff[f_idx], Sxx1[:, f_idx, ch_id, :].mean(0))
  677. vmax = max(Sxx1[trial_id, f_idx, ch_id, :].max(), Sxx2[trial_id, f_idx, ch_id, :].max())
  678. plt.clim(vmax=vmax)
  679. plt.xlabel('sec')
  680. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  681. plt.subplot(428) # plot spectrogram yes-question, averaged
  682. if params.lfp.normalize:
  683. plt.pcolormesh(tt, ff[f_idx], Sxx2[:, f_idx, ch_id, :].mean(0) / Sxx2_norm[f_idx, :, ch_id])
  684. else:
  685. plt.pcolormesh(tt, ff[f_idx], Sxx2[:, f_idx, ch_id, :].mean(0))
  686. plt.clim(vmax=vmax)
  687. plt.xlabel('sec')
  688. plt.colorbar(orientation='horizontal', fraction=0.05, aspect=50)
  689. plt.tight_layout()
  690. plt.show()
  691. return None
  692. def plot_psth_time(self, ch_ids=[], save_fig=False):
  693. states = self.params.lfp.motor_mapping
  694. for ch_id in ch_ids:
  695. fh = plt.figure(1)
  696. plt.clf()
  697. for ii in range(5):
  698. plt.subplot(2, 3, ii + 1)
  699. plt.plot(np.mean(self.psth_mapping[ii][:, :, ch_id], axis=0), 'C0', alpha=0.5)
  700. plt.plot(np.median(self.psth_mapping[ii][:, :, ch_id], axis=0), 'k', alpha=0.5)
  701. plt.ylim(-200, 200)
  702. plt.vlines(1000, -500, 500, alpha=0.7)
  703. plt.title(f'{states[ii]}, n={self.psth_mapping[ii].shape[0]}')
  704. if ii < 3:
  705. plt.xticks([])
  706. plt.tight_layout()
  707. if save_fig:
  708. fig_name = f'{self.results_path}psth/{self.base_name.split("-")[1]}_psth_time_ch_{ch_id}_.png'
  709. plt.savefig(fig_name)
  710. print(f'saving figure {fh.number} in {fig_name}\n')
  711. def plot_psth_stft(self, arr_nr=[], ch_ids=[], fmin=0, fmax=200, save_fig=False):
  712. states = self.params.lfp.motor_mapping
  713. if arr_nr == 1:
  714. psth = self.psth_S1_mapping
  715. S_mean = self.psth_S1_mean
  716. elif arr_nr == 2:
  717. psth = self.psth_S2_mapping
  718. S_mean = self.psth_S2_mean
  719. for ch_id in ch_ids:
  720. fh = plt.figure(1)
  721. plt.clf()
  722. ff = self.ff_stft_mapping
  723. tt = self.tt_stft_mapping
  724. fidx = (ff > fmin) & (ff < fmax)
  725. for cond in range(5):
  726. plt.subplot(2, 3, cond + 1)
  727. plt.pcolormesh(tt, ff[fidx], np.median(psth[cond][:, fidx, ch_id, :], axis=0) / S_mean[fidx, ch_id, :])
  728. # plt.plot(np.mean(self.psth_mapping[cond][:, fidx, ch_id, :], axis=0), 'C0', alpha=0.5)
  729. plt.title(f'{states[cond]}, n={psth[cond].shape[0]}')
  730. if cond < 3:
  731. plt.xticks([])
  732. plt.tight_layout()
  733. if save_fig:
  734. fig_name = f'{self.results_path}psth/stft/{self.base_name.split("-")[1]}_psth_stft_arr_{arr_nr}_ch_{ch_id}_.png'
  735. plt.savefig(fig_name)
  736. print(f'saving figure {fh.number} in {fig_name}\n')
  737. def plot_pdf(self, f_id=0, ch_ids=range(32), xmax=50, ymax=1000):
  738. data = self.data[f_id, 0]
  739. plt.figure()
  740. plt.clf()
  741. for ch_id in range(len(ch_ids)):
  742. ax = plt.subplot(8, 8, ch_id + 1)
  743. hist, bin_edges = np.histogram(data[:, ch_id], range(-1, 50))
  744. plt.bar(bin_edges[:-1], hist, width=1, label=f'{ch_ids[ch_id]}')
  745. plt.xlim(0, xmax)
  746. plt.ylim(0, ymax)
  747. plt.legend(loc=1, prop={'size': 6})
  748. if ch_id < 56:
  749. ax.set_xticks([])
  750. else:
  751. ax.set_xlabel('sp/sec')
  752. if np.mod(ch_id, 8) > 0:
  753. ax.set_yticks([])
  754. return None
  755. def plot_spectra(self, f_id=0, recompute=True, fs=20, v_thr=5, ymin=0, ymax=7, arr_id=0):
  756. data = self.data[f_id, 0]
  757. if arr_id == 0:
  758. ch_ids = range(32, 96)
  759. else:
  760. ch_ids = list(range(32)) + list(range(96, 128))
  761. if not hasattr(self, 'Y') or recompute:
  762. # Y1, ff1 = analytics1.calc_fft(data, fs=fs, i_stop=-1, axis=0)
  763. ff, Y = signal.welch(data, fs=fs, axis=0, nperseg=1000)
  764. # fidx1 = (ff1 > 0.1) & (ff1 < 5)
  765. self.Y = Y
  766. self.ff = ff
  767. fidx = (self.ff > 0.1) & (self.ff < 5)
  768. v_mean = data.mean(axis=0)
  769. v_ids = np.argwhere(v_mean > v_thr) # get ids of channels with mean firing rate higher than v_thr
  770. plt.figure()
  771. plt.clf()
  772. for ii, ch_id in enumerate(ch_ids):
  773. ax = plt.subplot(8, 8, ii + 1)
  774. # plt.plot(ff1[fidx1], Y1[fidx1, ch_id], label=f'{ch_id}')
  775. if ch_id in v_ids:
  776. plt.plot(self.ff[fidx], self.Y[fidx, ch_id], 'C0', lw=2, label=f'{ch_id}')
  777. else:
  778. plt.plot(self.ff[fidx], self.Y[fidx, ch_id], 'k', lw=1, alpha=0.5, label=f'{ch_id}')
  779. plt.legend(loc=1, prop={'size': 6})
  780. plt.ylim(ymin, ymax)
  781. if ch_id < 56:
  782. ax.set_xticks([])
  783. else:
  784. ax.set_xlabel('Hz')
  785. if np.mod(ch_id, 8) > 0:
  786. ax.set_yticks([])
  787. return None