blackrockrawio.py 81 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925
  1. # -*- coding: utf-8 -*-
  2. """
  3. Module for reading data from files in the Blackrock in raw format.
  4. This work is based on:
  5. * Chris Rodgers - first version
  6. * Michael Denker, Lyuba Zehl - second version
  7. * Samuel Garcia - third version
  8. * Lyuba Zehl, Michael Denker - fourth version
  9. * Samuel Garcia - fifth version
  10. This IO supports reading only.
  11. This IO is able to read:
  12. * the nev file which contains spikes
  13. * ns1, ns2, .., ns6 files that contain signals at different sampling rates
  14. This IO can handle the following Blackrock file specifications:
  15. * 2.1
  16. * 2.2
  17. * 2.3
  18. The neural data channels are 1 - 128.
  19. The analog inputs are 129 - 144. (129 - 137 AC coupled, 138 - 144 DC coupled)
  20. spike- and event-data; 30000 Hz in NEV file.
  21. "ns1": "analog data: 500 Hz",
  22. "ns2": "analog data: 1000 Hz",
  23. "ns3": "analog data: 2000 Hz",
  24. "ns4": "analog data: 10000 Hz",
  25. "ns5": "analog data: 30000 Hz",
  26. "ns6": "analog data: 30000 Hz (no digital filter)"
  27. The possible file extensions of the Cerebus system and their content:
  28. ns1: contains analog data; sampled at 500 Hz (+ digital filters)
  29. ns2: contains analog data; sampled at 1000 Hz (+ digital filters)
  30. ns3: contains analog data; sampled at 2000 Hz (+ digital filters)
  31. ns4: contains analog data; sampled at 10000 Hz (+ digital filters)
  32. ns5: contains analog data; sampled at 30000 Hz (+ digital filters)
  33. ns6: contains analog data; sampled at 30000 Hz (no digital filters)
  34. nev: contains spike- and event-data; sampled at 30000 Hz
  35. sif: contains institution and patient info (XML)
  36. ccf: contains Cerebus configurations
  37. TODO:
  38. * videosync events (file spec 2.3)
  39. * tracking events (file spec 2.3)
  40. * buttontrigger events (file spec 2.3)
  41. * config events (file spec 2.3)
  42. * check left sweep settings of Blackrock
  43. * check nsx offsets (file spec 2.1)
  44. * add info of nev ext header (NSASEXEX) to non-neural events
  45. (file spec 2.1 and 2.2)
  46. * read sif file information
  47. * read ccf file information
  48. * fix reading of periodic sampling events (non-neural event type)
  49. (file spec 2.1 and 2.2)
  50. """
  51. from __future__ import absolute_import, division, print_function
  52. import datetime
  53. import os
  54. import re
  55. import warnings
  56. import numpy as np
  57. import quantities as pq
  58. from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
  59. _event_channel_dtype)
  60. class BlackrockRawIO(BaseRawIO):
  61. """
  62. Class for reading data in from a file set recorded by the Blackrock
  63. (Cerebus) recording system.
  64. Upon initialization, the class is linked to the available set of Blackrock
  65. files.
  66. Note: This routine will handle files according to specification 2.1, 2.2,
  67. and 2.3. Recording pauses that may occur in file specifications 2.2 and
  68. 2.3 are automatically extracted and the data set is split into different
  69. segments.
  70. The Blackrock data format consists not of a single file, but a set of
  71. different files. This constructor associates itself with a set of files
  72. that constitute a common data set. By default, all files belonging to
  73. the file set have the same base name, but different extensions.
  74. However, by using the override parameters, individual filenames can
  75. be set.
  76. Args:
  77. filename (string):
  78. File name (without extension) of the set of Blackrock files to
  79. associate with. Any .nsX or .nev, .sif, or .ccf extensions are
  80. ignored when parsing this parameter.
  81. nsx_override (string):
  82. File name of the .nsX files (without extension). If None,
  83. filename is used.
  84. Default: None.
  85. nev_override (string):
  86. File name of the .nev file (without extension). If None,
  87. filename is used.
  88. Default: None.
  89. nsx_to_load (int):
  90. ID of nsX file from which to load data, e.g., if set to
  91. 5 only data from the ns5 file are loaded. If None, the Io
  92. will take the maximum if file is present.
  93. Contrary to previsous version of the IO:
  94. * nsx_to_load is not a list
  95. * must be set at the init before parse_header()
  96. Examples:
  97. >>> reader = BlackrockRawIO(filename='FileSpec2.3001', nsx_to_load=5)
  98. >>> reader.parse_header()
  99. Inspect a set of file consisting of files FileSpec2.3001.ns5 and
  100. FileSpec2.3001.nev
  101. >>> print(reader)
  102. Display all informations about signal channels, units, segment size....
  103. """
  104. extensions = ['ns' + str(_) for _ in range(1, 7)]
  105. extensions.extend(['nev', ]) # 'sif', 'ccf' not yet supported
  106. rawmode = 'multi-file'
  107. def __init__(self, filename=None, nsx_override=None, nev_override=None,
  108. nsx_to_load=None, verbose=False):
  109. """
  110. Initialize the BlackrockIO class.
  111. """
  112. BaseRawIO.__init__(self)
  113. self.filename = filename
  114. # remove extension from base _filenames
  115. for ext in self.extensions:
  116. if self.filename.endswith(os.path.extsep + ext):
  117. self.filename = self.filename.replace(os.path.extsep + ext, '')
  118. self.nsx_to_load = nsx_to_load
  119. # remove extensions from overrides
  120. self._filenames = {}
  121. if nsx_override:
  122. self._filenames['nsx'] = re.sub(
  123. os.path.extsep + 'ns[1,2,3,4,5,6]$', '', nsx_override)
  124. else:
  125. self._filenames['nsx'] = self.filename
  126. if nev_override:
  127. self._filenames['nev'] = re.sub(
  128. os.path.extsep + 'nev$', '', nev_override)
  129. else:
  130. self._filenames['nev'] = self.filename
  131. # check which files are available
  132. self._avail_files = dict.fromkeys(self.extensions, False)
  133. self._avail_nsx = []
  134. for ext in self.extensions:
  135. if ext.startswith('ns'):
  136. file2check = ''.join(
  137. [self._filenames['nsx'], os.path.extsep, ext])
  138. else:
  139. file2check = ''.join(
  140. [self._filenames[ext], os.path.extsep, ext])
  141. if os.path.exists(file2check):
  142. self._avail_files[ext] = True
  143. if ext.startswith('ns'):
  144. self._avail_nsx.append(int(ext[-1]))
  145. if not self._avail_files['nev'] and not self._avail_nsx:
  146. raise IOError("No Blackrock files found in specified path")
  147. # These dictionaries are used internally to map the file specification
  148. # revision of the nsx and nev files to one of the reading routines
  149. # NSX
  150. self.__nsx_header_reader = {
  151. '2.1': self.__read_nsx_header_variant_a,
  152. '2.2': self.__read_nsx_header_variant_b,
  153. '2.3': self.__read_nsx_header_variant_b}
  154. self.__nsx_dataheader_reader = {
  155. '2.1': self.__read_nsx_dataheader_variant_a,
  156. '2.2': self.__read_nsx_dataheader_variant_b,
  157. '2.3': self.__read_nsx_dataheader_variant_b}
  158. self.__nsx_data_reader = {
  159. '2.1': self.__read_nsx_data_variant_a,
  160. '2.2': self.__read_nsx_data_variant_b,
  161. '2.3': self.__read_nsx_data_variant_b}
  162. self.__nsx_params = {
  163. '2.1': self.__get_nsx_param_variant_a,
  164. '2.2': self.__get_nsx_param_variant_b,
  165. '2.3': self.__get_nsx_param_variant_b}
  166. # NEV
  167. self.__nev_header_reader = {
  168. '2.1': self.__read_nev_header_variant_a,
  169. '2.2': self.__read_nev_header_variant_b,
  170. '2.3': self.__read_nev_header_variant_c}
  171. self.__nev_data_reader = {
  172. '2.1': self.__read_nev_data_variant_a,
  173. '2.2': self.__read_nev_data_variant_a,
  174. '2.3': self.__read_nev_data_variant_b}
  175. self.__waveform_size = {
  176. '2.1': self.__get_waveform_size_variant_a,
  177. '2.2': self.__get_waveform_size_variant_a,
  178. '2.3': self.__get_waveform_size_variant_b}
  179. self.__channel_labels = {
  180. '2.1': self.__get_channel_labels_variant_a,
  181. '2.2': self.__get_channel_labels_variant_b,
  182. '2.3': self.__get_channel_labels_variant_b}
  183. self.__nonneural_evdicts = {
  184. '2.1': self.__get_nonneural_evdicts_variant_a,
  185. '2.2': self.__get_nonneural_evdicts_variant_a,
  186. '2.3': self.__get_nonneural_evdicts_variant_b}
  187. self.__comment_evdict = {
  188. '2.1': self.__get_comment_evdict_variant_a,
  189. '2.2': self.__get_comment_evdict_variant_a,
  190. '2.3': self.__get_comment_evdict_variant_a}
  191. def _parse_header(self):
  192. main_sampling_rate = 30000.
  193. event_channels = []
  194. unit_channels = []
  195. sig_channels = []
  196. # Step1 NEV file
  197. if self._avail_files['nev']:
  198. # Load file spec and headers of available
  199. # read nev file specification
  200. self.__nev_spec = self.__extract_nev_file_spec()
  201. # read nev headers
  202. self.__nev_basic_header, self.__nev_ext_header = \
  203. self.__nev_header_reader[self.__nev_spec]()
  204. self.nev_data = self.__nev_data_reader[self.__nev_spec]()
  205. spikes, spike_segment_ids = self.nev_data['Spikes']
  206. # scan all channel to get number of Unit
  207. unit_channels = []
  208. self.internal_unit_ids = [] # pair of chan['packet_id'], spikes['unit_class_nb']
  209. for i in range(len(self.__nev_ext_header[b'NEUEVWAV'])):
  210. channel_id = self.__nev_ext_header[b'NEUEVWAV']['electrode_id'][i]
  211. chan_mask = (spikes['packet_id'] == channel_id)
  212. chan_spikes = spikes[chan_mask]
  213. all_unit_id = np.unique(chan_spikes['unit_class_nb'])
  214. for u, unit_id in enumerate(all_unit_id):
  215. self.internal_unit_ids.append((channel_id, unit_id))
  216. name = "ch{}#{}".format(channel_id, unit_id)
  217. _id = "Unit {}".format(1000 * channel_id + unit_id)
  218. wf_gain = self.__nev_params('digitization_factor')[channel_id] / 1000.
  219. wf_offset = 0.
  220. wf_units = 'uV'
  221. # TODO: Double check if this is the correct assumption (10 samples)
  222. # default value: threshold crossing after 10 samples of waveform
  223. wf_left_sweep = 10
  224. wf_sampling_rate = main_sampling_rate
  225. unit_channels.append((name, _id, wf_units, wf_gain,
  226. wf_offset, wf_left_sweep, wf_sampling_rate))
  227. # scan events
  228. # NonNeural: serial and digital input
  229. events_data, event_segment_ids = self.nev_data['NonNeural']
  230. ev_dict = self.__nonneural_evdicts[self.__nev_spec](events_data)
  231. if 'Comments' in self.nev_data:
  232. comments_data, comments_segment_ids = self.nev_data['Comments']
  233. ev_dict.update(self.__comment_evdict[self.__nev_spec](comments_data))
  234. for ev_name in ev_dict:
  235. event_channels.append((ev_name, '', 'event'))
  236. # TODO: TrackingEvents
  237. # TODO: ButtonTrigger
  238. # TODO: VideoSync
  239. # Step2 NSX file
  240. # Load file spec and headers of available nsx files
  241. self.__nsx_spec = {}
  242. self.__nsx_basic_header = {}
  243. self.__nsx_ext_header = {}
  244. self.__nsx_data_header = {}
  245. for nsx_nb in self._avail_nsx:
  246. spec = self.__nsx_spec[nsx_nb] = self.__extract_nsx_file_spec(nsx_nb)
  247. # read nsx headers
  248. self.__nsx_basic_header[nsx_nb], self.__nsx_ext_header[nsx_nb] = \
  249. self.__nsx_header_reader[spec](nsx_nb)
  250. # Read nsx data header(s)
  251. # for nsxdef get_analogsignal_shape(self, block_index, seg_index):
  252. self.__nsx_data_header[nsx_nb] = self.__nsx_dataheader_reader[spec](nsx_nb)
  253. # We can load only one for one class instance
  254. if self.nsx_to_load is None and len(self._avail_nsx) > 0:
  255. self.nsx_to_load = max(self._avail_nsx)
  256. if self.nsx_to_load is not None and \
  257. self.__nsx_spec[self.nsx_to_load] == '2.1' and \
  258. not self._avail_files['nev']:
  259. pass
  260. # Because rescaling to volts requires information from nev file (dig_factor)
  261. # Remove if raw loading becomes possible
  262. # raise IOError("For loading Blackrock file version 2.1 .nev files are required!")
  263. # This requires nsX to be parsed already
  264. # Needs to be called when no nsX are available as well in order to warn the user
  265. if self._avail_files['nev']:
  266. self.__match_nsx_and_nev_segment_ids(self.nsx_to_load)
  267. if self.nsx_to_load is not None:
  268. spec = self.__nsx_spec[self.nsx_to_load]
  269. self.nsx_data = self.__nsx_data_reader[spec](self.nsx_to_load)
  270. self._nb_segment = len(self.nsx_data)
  271. sig_sampling_rate = float(
  272. main_sampling_rate / self.__nsx_basic_header[self.nsx_to_load]['period'])
  273. if spec in ['2.2', '2.3']:
  274. ext_header = self.__nsx_ext_header[self.nsx_to_load]
  275. elif spec == '2.1':
  276. ext_header = []
  277. keys = ['labels', 'units', 'min_analog_val',
  278. 'max_analog_val', 'min_digital_val', 'max_digital_val']
  279. params = self.__nsx_params[spec](self.nsx_to_load)
  280. for i in range(len(params['labels'])):
  281. d = {}
  282. for key in keys:
  283. d[key] = params[key][i]
  284. ext_header.append(d)
  285. for i, chan in enumerate(ext_header):
  286. if spec in ['2.2', '2.3']:
  287. ch_name = chan['electrode_label'].decode()
  288. ch_id = chan['electrode_id']
  289. units = chan['units'].decode()
  290. elif spec == '2.1':
  291. ch_name = chan['labels']
  292. ch_id = self.__nsx_ext_header[self.nsx_to_load][i]['electrode_id']
  293. units = chan['units']
  294. sig_dtype = 'int16'
  295. # max_analog_val/min_analog_val/max_digital_val/min_analog_val are int16!!!!!
  296. # dangarous situation so cast to float everyone
  297. if np.isnan(float(chan['min_analog_val'])):
  298. gain = 1
  299. offset = 0
  300. else:
  301. gain = (float(chan['max_analog_val']) - float(chan['min_analog_val'])) / \
  302. (float(chan['max_digital_val']) - float(chan['min_digital_val']))
  303. offset = -float(chan['min_digital_val']) \
  304. * gain + float(chan['min_analog_val'])
  305. group_id = 0
  306. sig_channels.append((ch_name, ch_id, sig_sampling_rate, sig_dtype,
  307. units, gain, offset, group_id,))
  308. self.__delete_empty_segments()
  309. # t_start/t_stop for segment are given by nsx limits or nev limits
  310. self._sigs_t_starts = []
  311. self._seg_t_starts, self._seg_t_stops = [], []
  312. for data_bl in range(self._nb_segment):
  313. length = self.nsx_data[data_bl].shape[0]
  314. if self.__nsx_data_header[self.nsx_to_load] is None:
  315. t_start = 0.
  316. else:
  317. t_start = self.__nsx_data_header[self.nsx_to_load][data_bl]['timestamp'] / \
  318. self.__nsx_basic_header[self.nsx_to_load]['timestamp_resolution']
  319. self._sigs_t_starts.append(t_start)
  320. t_stop = t_start + length / sig_sampling_rate
  321. if self._avail_files['nev']:
  322. max_nev_time = 0
  323. for k, (data, ev_ids) in self.nev_data.items():
  324. segment_mask = ev_ids == data_bl
  325. if data[segment_mask].size > 0:
  326. t = data[segment_mask][-1]['timestamp'] / self.__nev_basic_header[
  327. 'timestamp_resolution']
  328. max_nev_time = max(max_nev_time, t)
  329. if max_nev_time > t_stop:
  330. t_stop = max_nev_time
  331. min_nev_time = max_nev_time
  332. for k, (data, ev_ids) in self.nev_data.items():
  333. segment_mask = ev_ids == data_bl
  334. if data[segment_mask].size > 0:
  335. t = data[segment_mask][0]['timestamp'] / self.__nev_basic_header[
  336. 'timestamp_resolution']
  337. min_nev_time = min(min_nev_time, t)
  338. if min_nev_time < t_start:
  339. t_start = min_nev_time
  340. self._seg_t_starts.append(t_start)
  341. self._seg_t_stops.append(float(t_stop))
  342. else:
  343. # When only nev is available, only segments that are documented in nev can be detected
  344. max_nev_times = {}
  345. min_nev_times = {}
  346. # Find maximal and minimal time for each nev segment
  347. for k, (data, ev_ids) in self.nev_data.items():
  348. for i in np.unique(ev_ids):
  349. mask = [ev_ids == i]
  350. curr_data = data[mask]
  351. if curr_data.size > 0:
  352. if max(curr_data['timestamp']) >= max_nev_times.get(i, 0):
  353. max_nev_times[i] = max(curr_data['timestamp'])
  354. if min(curr_data['timestamp']) <= min_nev_times.get(i,
  355. max_nev_times[i]):
  356. min_nev_times[i] = min(curr_data['timestamp'])
  357. # Calculate t_start and t_stop for each segment in seconds
  358. resolution = self.__nev_basic_header['timestamp_resolution']
  359. self._seg_t_starts = [v / float(resolution) for k, v in sorted(min_nev_times.items())]
  360. self._seg_t_stops = [v / float(resolution) for k, v in sorted(max_nev_times.items())]
  361. self._nb_segment = len(self._seg_t_starts)
  362. self._sigs_t_starts = [None] * self._nb_segment
  363. # finalize header
  364. unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
  365. event_channels = np.array(event_channels, dtype=_event_channel_dtype)
  366. sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
  367. self.header = {}
  368. self.header['nb_block'] = 1
  369. self.header['nb_segment'] = [self._nb_segment]
  370. self.header['signal_channels'] = sig_channels
  371. self.header['unit_channels'] = unit_channels
  372. self.header['event_channels'] = event_channels
  373. rec_datetime = self.__nev_params('rec_datetime') if self._avail_files['nev'] else None
  374. # Put annotations at some places for compatibility
  375. # with previous BlackrockIO version
  376. self._generate_minimal_annotations()
  377. block_ann = self.raw_annotations['blocks'][0]
  378. block_ann['description'] = 'Block of data from Blackrock file set.'
  379. block_ann['file_origin'] = self.filename
  380. block_ann['name'] = "Blackrock Data Block"
  381. block_ann['rec_datetime'] = rec_datetime
  382. block_ann['avail_file_set'] = [k for k, v in self._avail_files.items() if v]
  383. block_ann['avail_nsx'] = self._avail_nsx
  384. block_ann['avail_nev'] = self._avail_files['nev']
  385. # 'sif' and 'ccf' files not yet supported
  386. # block_ann['avail_sif'] = self._avail_files['sif']
  387. # block_ann['avail_ccf'] = self._avail_files['ccf']
  388. block_ann['rec_pauses'] = False
  389. for c in range(unit_channels.size):
  390. unit_ann = self.raw_annotations['unit_channels'][c]
  391. channel_id, unit_id = self.internal_unit_ids[c]
  392. unit_ann['channel_id'] = self.internal_unit_ids[c][0]
  393. unit_ann['unit_id'] = self.internal_unit_ids[c][1]
  394. unit_ann['unit_tag'] = {0: 'unclassified', 255: 'noise'}.get(unit_id, str(unit_id))
  395. unit_ann['description'] = 'Unit channel_id: {}, unit_id: {}, unit_tag: {}'.format(
  396. channel_id, unit_id, unit_ann['unit_tag'])
  397. flt_type = {0: 'None', 1: 'Butterworth'}
  398. for c in range(sig_channels.size):
  399. chidx_ann = self.raw_annotations['signal_channels'][c]
  400. if self._avail_files['nev']:
  401. neuevwav = self.__nev_ext_header[b'NEUEVWAV']
  402. if sig_channels[c]['id'] in neuevwav['electrode_id']:
  403. get_idx = list(neuevwav['electrode_id']).index(sig_channels[c]['id'])
  404. chidx_ann['connector_ID'] = neuevwav['physical_connector'][get_idx]
  405. chidx_ann['connector_pinID'] = neuevwav['connector_pin'][get_idx]
  406. chidx_ann['nev_dig_factor'] = neuevwav['digitization_factor'][get_idx]
  407. chidx_ann['nev_energy_threshold'] = neuevwav['energy_threshold'][
  408. get_idx] * pq.uV
  409. chidx_ann['nev_hi_threshold'] = neuevwav['hi_threshold'][get_idx] * pq.uV
  410. chidx_ann['nev_lo_threshold'] = neuevwav['lo_threshold'][get_idx] * pq.uV
  411. chidx_ann['nb_sorted_units'] = neuevwav['nb_sorted_units'][get_idx]
  412. chidx_ann['waveform_size'] = self.__waveform_size[self.__nev_spec](
  413. )[sig_channels[c]['id']] * self.__nev_params('waveform_time_unit')
  414. if self.__nev_spec in ['2.2', '2.3']:
  415. neuevflt = self.__nev_ext_header[b'NEUEVFLT']
  416. get_idx = list(
  417. neuevflt['electrode_id']).index(
  418. sig_channels[c]['id'])
  419. # filter type codes (extracted from blackrock manual)
  420. chidx_ann['nev_hi_freq_corner'] = neuevflt['hi_freq_corner'][
  421. get_idx] / 1000. * pq.Hz
  422. chidx_ann['nev_hi_freq_order'] = neuevflt['hi_freq_order'][get_idx]
  423. chidx_ann['nev_hi_freq_type'] = flt_type[neuevflt['hi_freq_type'][
  424. get_idx]]
  425. chidx_ann['nev_lo_freq_corner'] = neuevflt['lo_freq_corner'][
  426. get_idx] / 1000. * pq.Hz
  427. chidx_ann['nev_lo_freq_order'] = neuevflt['lo_freq_order'][get_idx]
  428. chidx_ann['nev_lo_freq_type'] = flt_type[neuevflt['lo_freq_type'][
  429. get_idx]]
  430. if self.__nsx_spec[self.nsx_to_load] in ['2.2', '2.3'] and self.__nsx_ext_header:
  431. # It does not matter which nsX file to ask for this info
  432. k = list(self.__nsx_ext_header.keys())[0]
  433. if sig_channels[c]['id'] in self.__nsx_ext_header[k]['electrode_id']:
  434. get_idx = list(
  435. self.__nsx_ext_header[k]['electrode_id']).index(
  436. sig_channels[c]['id'])
  437. chidx_ann['connector_ID'] = self.__nsx_ext_header[k]['physical_connector'][
  438. get_idx]
  439. chidx_ann['connector_pinID'] = self.__nsx_ext_header[k]['connector_pin'][
  440. get_idx]
  441. chidx_ann['nsx_hi_freq_corner'] = \
  442. self.__nsx_ext_header[k]['hi_freq_corner'][get_idx] / 1000. * pq.Hz
  443. chidx_ann['nsx_lo_freq_corner'] = \
  444. self.__nsx_ext_header[k]['lo_freq_corner'][get_idx] / 1000. * pq.Hz
  445. chidx_ann['nsx_hi_freq_order'] = self.__nsx_ext_header[k][
  446. 'hi_freq_order'][get_idx]
  447. chidx_ann['nsx_lo_freq_order'] = self.__nsx_ext_header[k][
  448. 'lo_freq_order'][get_idx]
  449. chidx_ann['nsx_hi_freq_type'] = flt_type[
  450. self.__nsx_ext_header[k]['hi_freq_type'][get_idx]]
  451. chidx_ann['nsx_lo_freq_type'] = flt_type[
  452. self.__nsx_ext_header[k]['hi_freq_type'][get_idx]]
  453. for seg_index in range(self._nb_segment):
  454. seg_ann = block_ann['segments'][seg_index]
  455. seg_ann['file_origin'] = self.filename
  456. seg_ann['name'] = "Segment {}".format(seg_index)
  457. seg_ann['description'] = "Segment containing data from t_start to t_stop"
  458. if seg_index == 0:
  459. # if more than 1 segment means pause
  460. # so datetime is valide only for seg_index=0
  461. seg_ann['rec_datetime'] = rec_datetime
  462. for c in range(sig_channels.size):
  463. anasig_an = seg_ann['signals'][c]
  464. desc = "AnalogSignal {} from channel_id: {}, label: {}, nsx: {}".format(
  465. c, sig_channels['id'][c], sig_channels['name'][c], self.nsx_to_load)
  466. anasig_an['description'] = desc
  467. anasig_an['file_origin'] = self._filenames['nsx'] + '.ns' + str(self.nsx_to_load)
  468. anasig_an['nsx'] = self.nsx_to_load
  469. chidx_ann = self.raw_annotations['signal_channels'][c]
  470. chidx_ann['description'] = 'Container for Units and AnalogSignals of ' \
  471. 'one recording channel across segments.'
  472. for c in range(unit_channels.size):
  473. channel_id, unit_id = self.internal_unit_ids[c]
  474. st_ann = seg_ann['units'][c]
  475. unit_ann = self.raw_annotations['unit_channels'][c]
  476. st_ann.update(unit_ann)
  477. st_ann['description'] = 'SpikeTrain channel_id: {}, unit_id: {}'.format(
  478. channel_id, unit_id)
  479. st_ann['file_origin'] = self._filenames['nev'] + '.nev'
  480. if self._avail_files['nev']:
  481. ev_dict = self.__nonneural_evdicts[self.__nev_spec](events_data)
  482. if 'Comments' in self.nev_data:
  483. ev_dict.update(self.__comment_evdict[self.__nev_spec](comments_data))
  484. color_codes = ["#{:08X}".format(code) for code in comments_data['color']]
  485. color_codes = np.array(color_codes, dtype='S9')
  486. for c in range(event_channels.size):
  487. # Next line makes ev_ann a reference to seg_ann['events'][c]
  488. ev_ann = seg_ann['events'][c]
  489. name = event_channels['name'][c]
  490. ev_ann['description'] = ev_dict[name]['desc']
  491. ev_ann['file_origin'] = self._filenames['nev'] + '.nev'
  492. if name == 'comments':
  493. ev_ann['color_codes'] = color_codes
  494. def _source_name(self):
  495. return self.filename
  496. def _segment_t_start(self, block_index, seg_index):
  497. return self._seg_t_starts[seg_index]
  498. def _segment_t_stop(self, block_index, seg_index):
  499. return self._seg_t_stops[seg_index]
  500. def _get_signal_size(self, block_index, seg_index, channel_indexes):
  501. memmap_data = self.nsx_data[seg_index]
  502. return memmap_data.shape[0]
  503. def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
  504. return self._sigs_t_starts[seg_index]
  505. def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
  506. assert block_index == 0
  507. memmap_data = self.nsx_data[seg_index]
  508. if channel_indexes is None:
  509. channel_indexes = slice(None)
  510. sig_chunk = memmap_data[i_start:i_stop, channel_indexes]
  511. return sig_chunk
  512. def _spike_count(self, block_index, seg_index, unit_index):
  513. channel_id, unit_id = self.internal_unit_ids[unit_index]
  514. all_spikes = self.nev_data['Spikes'][0]
  515. mask = (all_spikes['packet_id'] == channel_id) & (all_spikes['unit_class_nb'] == unit_id)
  516. if self._nb_segment == 1:
  517. # very fast
  518. nb = int(np.sum(mask))
  519. else:
  520. # must clip in time time range
  521. timestamp = all_spikes[mask]['timestamp']
  522. sl = self._get_timestamp_slice(timestamp, seg_index, None, None)
  523. timestamp = timestamp[sl]
  524. nb = timestamp.size
  525. return nb
  526. def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
  527. channel_id, unit_id = self.internal_unit_ids[unit_index]
  528. all_spikes, event_segment_ids = self.nev_data['Spikes']
  529. # select by channel_id and unit_id
  530. mask = ((all_spikes['packet_id'] == channel_id) & (all_spikes['unit_class_nb'] == unit_id)
  531. & (event_segment_ids == seg_index))
  532. unit_spikes = all_spikes[mask]
  533. timestamp = unit_spikes['timestamp']
  534. sl = self._get_timestamp_slice(timestamp, seg_index, t_start, t_stop)
  535. timestamp = timestamp[sl]
  536. return timestamp
  537. def _get_timestamp_slice(self, timestamp, seg_index, t_start, t_stop):
  538. if self._nb_segment > 1:
  539. # we must clip event in seg time limits
  540. if t_start is None:
  541. t_start = self._seg_t_starts[seg_index]
  542. if t_stop is None:
  543. t_stop = self._seg_t_stops[seg_index]
  544. if t_start is None:
  545. ind_start = None
  546. else:
  547. ts = np.math.ceil(t_start * self.__nev_basic_header['timestamp_resolution'])
  548. ind_start = np.searchsorted(timestamp, ts)
  549. if t_stop is None:
  550. ind_stop = None
  551. else:
  552. ts = int(t_stop * self.__nev_basic_header['timestamp_resolution'])
  553. ind_stop = np.searchsorted(timestamp, ts) # +1
  554. sl = slice(ind_start, ind_stop)
  555. return sl
  556. def _rescale_spike_timestamp(self, spike_timestamps, dtype):
  557. spike_times = spike_timestamps.astype(dtype)
  558. spike_times /= self.__nev_basic_header['timestamp_resolution']
  559. return spike_times
  560. def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
  561. channel_id, unit_id = self.internal_unit_ids[unit_index]
  562. all_spikes, event_segment_ids = self.nev_data['Spikes']
  563. mask = ((all_spikes['packet_id'] == channel_id) & (all_spikes['unit_class_nb'] == unit_id)
  564. & (event_segment_ids == seg_index))
  565. unit_spikes = all_spikes[mask]
  566. wf_dtype = self.__nev_params('waveform_dtypes')[channel_id]
  567. wf_size = self.__nev_params('waveform_size')[channel_id]
  568. waveforms = unit_spikes['waveform'].flatten().view(wf_dtype)
  569. waveforms = waveforms.reshape(int(unit_spikes.size), 1, int(wf_size))
  570. timestamp = unit_spikes['timestamp']
  571. sl = self._get_timestamp_slice(timestamp, seg_index, t_start, t_stop)
  572. waveforms = waveforms[sl]
  573. return waveforms
  574. def _event_count(self, block_index, seg_index, event_channel_index):
  575. name = self.header['event_channels']['name'][event_channel_index]
  576. if name == 'comments':
  577. events_data, event_segment_ids = self.nev_data['Comments']
  578. ev_dict = self.__comment_evdict[self.__nev_spec](events_data)[name]
  579. else:
  580. events_data, event_segment_ids = self.nev_data['NonNeural']
  581. ev_dict = self.__nonneural_evdicts[self.__nev_spec](events_data)[name]
  582. mask = ev_dict['mask'] & (event_segment_ids == seg_index)
  583. if self._nb_segment == 1:
  584. # very fast
  585. nb = int(np.sum(mask))
  586. else:
  587. # must clip in time time range
  588. timestamp = events_data[ev_dict['mask']]['timestamp']
  589. sl = self._get_timestamp_slice(timestamp, seg_index, None, None)
  590. timestamp = timestamp[sl]
  591. nb = timestamp.size
  592. return nb
  593. def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
  594. name = self.header['event_channels']['name'][event_channel_index]
  595. if name == 'comments':
  596. events_data, event_segment_ids = self.nev_data['Comments']
  597. ev_dict = self.__comment_evdict[self.__nev_spec](events_data)[name]
  598. # If immediate decoding is desired:
  599. encoding = {0: 'latin_1', 1: 'utf_16', 255: 'latin_1'}
  600. labels = [data[ev_dict['field']].decode(
  601. encoding[data['char_set']]) for data in events_data]
  602. # Only ASCII can be allowed due to using numpy
  603. # labels.astype('S') forces to use bytes in BaseFromRaw 401, in read_segment
  604. # This is not recommended
  605. # TODO: Maybe switch to astype('U')
  606. labels = np.array([data.encode('ASCII', errors='ignore') for data in labels])
  607. else:
  608. events_data, event_segment_ids = self.nev_data['NonNeural']
  609. ev_dict = self.__nonneural_evdicts[self.__nev_spec](events_data)[name]
  610. labels = events_data[ev_dict['field']]
  611. mask = ev_dict['mask'] & (event_segment_ids == seg_index)
  612. timestamp = events_data[mask]['timestamp']
  613. labels = labels[mask]
  614. # time clip
  615. sl = self._get_timestamp_slice(timestamp, seg_index, t_start, t_stop)
  616. timestamp = timestamp[sl]
  617. labels = labels[sl]
  618. durations = None
  619. return timestamp, durations, labels
  620. def _rescale_event_timestamp(self, event_timestamps, dtype):
  621. ev_times = event_timestamps.astype(dtype)
  622. ev_times /= self.__nev_basic_header['timestamp_resolution']
  623. return ev_times
  624. ###################################################
  625. ###################################################
  626. # Above here code from Lyuba Zehl, Michael Denker
  627. # coming from previous BlackrockIO
  628. def __extract_nsx_file_spec(self, nsx_nb):
  629. """
  630. Extract file specification from an .nsx file.
  631. """
  632. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  633. # Header structure of files specification 2.2 and higher. For files 2.1
  634. # and lower, the entries ver_major and ver_minor are not supported.
  635. dt0 = [
  636. ('file_id', 'S8'),
  637. ('ver_major', 'uint8'),
  638. ('ver_minor', 'uint8')]
  639. nsx_file_id = np.fromfile(filename, count=1, dtype=dt0)[0]
  640. if nsx_file_id['file_id'].decode() == 'NEURALSG':
  641. spec = '2.1'
  642. elif nsx_file_id['file_id'].decode() == 'NEURALCD':
  643. spec = '{0}.{1}'.format(
  644. nsx_file_id['ver_major'], nsx_file_id['ver_minor'])
  645. else:
  646. raise IOError('Unsupported NSX file type.')
  647. return spec
  648. def __extract_nev_file_spec(self):
  649. """
  650. Extract file specification from an .nev file
  651. """
  652. filename = '.'.join([self._filenames['nev'], 'nev'])
  653. # Header structure of files specification 2.2 and higher. For files 2.1
  654. # and lower, the entries ver_major and ver_minor are not supported.
  655. dt0 = [
  656. ('file_id', 'S8'),
  657. ('ver_major', 'uint8'),
  658. ('ver_minor', 'uint8')]
  659. nev_file_id = np.fromfile(filename, count=1, dtype=dt0)[0]
  660. if nev_file_id['file_id'].decode() == 'NEURALEV':
  661. spec = '{0}.{1}'.format(
  662. nev_file_id['ver_major'], nev_file_id['ver_minor'])
  663. else:
  664. raise IOError('NEV file type {0} is not supported'.format(
  665. nev_file_id['file_id']))
  666. return spec
  667. def __read_nsx_header_variant_a(self, nsx_nb):
  668. """
  669. Extract nsx header information from a 2.1 .nsx file
  670. """
  671. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  672. # basic header (file_id: NEURALCD)
  673. dt0 = [
  674. ('file_id', 'S8'),
  675. # label of sampling groun (e.g. "1kS/s" or "LFP Low")
  676. ('label', 'S16'),
  677. # number of 1/30000 seconds between data points
  678. # (e.g., if sampling rate "1 kS/s", period equals "30")
  679. ('period', 'uint32'),
  680. ('channel_count', 'uint32')]
  681. nsx_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  682. # "extended" header (last field of file_id: NEURALCD)
  683. # (to facilitate compatibility with higher file specs)
  684. offset_dt0 = np.dtype(dt0).itemsize
  685. shape = nsx_basic_header['channel_count']
  686. # originally called channel_id in Blackrock user manual
  687. # (to facilitate compatibility with higher file specs)
  688. dt1 = [('electrode_id', 'uint32')]
  689. nsx_ext_header = np.memmap(
  690. filename, shape=shape, offset=offset_dt0, dtype=dt1, mode='r')
  691. return nsx_basic_header, nsx_ext_header
  692. def __read_nsx_header_variant_b(self, nsx_nb):
  693. """
  694. Extract nsx header information from a 2.2 or 2.3 .nsx file
  695. """
  696. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  697. # basic header (file_id: NEURALCD)
  698. dt0 = [
  699. ('file_id', 'S8'),
  700. # file specification split into major and minor version number
  701. ('ver_major', 'uint8'),
  702. ('ver_minor', 'uint8'),
  703. # bytes of basic & extended header
  704. ('bytes_in_headers', 'uint32'),
  705. # label of the sampling group (e.g., "1 kS/s" or "LFP low")
  706. ('label', 'S16'),
  707. ('comment', 'S256'),
  708. ('period', 'uint32'),
  709. ('timestamp_resolution', 'uint32'),
  710. # time origin: 2byte uint16 values for ...
  711. ('year', 'uint16'),
  712. ('month', 'uint16'),
  713. ('weekday', 'uint16'),
  714. ('day', 'uint16'),
  715. ('hour', 'uint16'),
  716. ('minute', 'uint16'),
  717. ('second', 'uint16'),
  718. ('millisecond', 'uint16'),
  719. # number of channel_count match number of extended headers
  720. ('channel_count', 'uint32')]
  721. nsx_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  722. # extended header (type: CC)
  723. offset_dt0 = np.dtype(dt0).itemsize
  724. shape = nsx_basic_header['channel_count']
  725. dt1 = [
  726. ('type', 'S2'),
  727. ('electrode_id', 'uint16'),
  728. ('electrode_label', 'S16'),
  729. # used front-end amplifier bank (e.g., A, B, C, D)
  730. ('physical_connector', 'uint8'),
  731. # used connector pin (e.g., 1-37 on bank A, B, C or D)
  732. ('connector_pin', 'uint8'),
  733. # digital and analog value ranges of the signal
  734. ('min_digital_val', 'int16'),
  735. ('max_digital_val', 'int16'),
  736. ('min_analog_val', 'int16'),
  737. ('max_analog_val', 'int16'),
  738. # units of the analog range values ("mV" or "uV")
  739. ('units', 'S16'),
  740. # filter settings used to create nsx from source signal
  741. ('hi_freq_corner', 'uint32'),
  742. ('hi_freq_order', 'uint32'),
  743. ('hi_freq_type', 'uint16'), # 0=None, 1=Butterworth
  744. ('lo_freq_corner', 'uint32'),
  745. ('lo_freq_order', 'uint32'),
  746. ('lo_freq_type', 'uint16')] # 0=None, 1=Butterworth
  747. nsx_ext_header = np.memmap(
  748. filename, shape=shape, offset=offset_dt0, dtype=dt1, mode='r')
  749. return nsx_basic_header, nsx_ext_header
  750. def __read_nsx_dataheader(self, nsx_nb, offset):
  751. """
  752. Reads data header following the given offset of an nsx file.
  753. """
  754. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  755. # dtypes data header
  756. dt2 = [
  757. ('header', 'uint8'),
  758. ('timestamp', 'uint32'),
  759. ('nb_data_points', 'uint32')]
  760. return np.memmap(filename, dtype=dt2, shape=1, offset=offset, mode='r')[0]
  761. def __read_nsx_dataheader_variant_a(
  762. self, nsx_nb, filesize=None, offset=None):
  763. """
  764. Reads None for the nsx data header of file spec 2.1. Introduced to
  765. facilitate compatibility with higher file spec.
  766. """
  767. return None
  768. def __read_nsx_dataheader_variant_b(
  769. self, nsx_nb, filesize=None, offset=None, ):
  770. """
  771. Reads the nsx data header for each data block following the offset of
  772. file spec 2.2 and 2.3.
  773. """
  774. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  775. filesize = self.__get_file_size(filename)
  776. data_header = {}
  777. index = 0
  778. if offset is None:
  779. offset = self.__nsx_basic_header[nsx_nb]['bytes_in_headers']
  780. while offset < filesize:
  781. dh = self.__read_nsx_dataheader(nsx_nb, offset)
  782. data_header[index] = {
  783. 'header': dh['header'],
  784. 'timestamp': dh['timestamp'],
  785. 'nb_data_points': dh['nb_data_points'],
  786. 'offset_to_data_block': offset + dh.dtype.itemsize}
  787. # data size = number of data points * (2bytes * number of channels)
  788. # use of `int` avoids overflow problem
  789. data_size = int(dh['nb_data_points']) *\
  790. int(self.__nsx_basic_header[nsx_nb]['channel_count']) * 2
  791. # define new offset (to possible next data block)
  792. offset = data_header[index]['offset_to_data_block'] + data_size
  793. index += 1
  794. return data_header
  795. def __read_nsx_data_variant_a(self, nsx_nb):
  796. """
  797. Extract nsx data from a 2.1 .nsx file
  798. """
  799. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  800. # get shape of data
  801. shape = (
  802. self.__nsx_params['2.1'](nsx_nb)['nb_data_points'],
  803. self.__nsx_basic_header[nsx_nb]['channel_count'])
  804. offset = self.__nsx_params['2.1'](nsx_nb)['bytes_in_headers']
  805. # read nsx data
  806. # store as dict for compatibility with higher file specs
  807. data = {0: np.memmap(
  808. filename, dtype='int16', shape=shape, offset=offset, mode='r')}
  809. return data
  810. def __read_nsx_data_variant_b(self, nsx_nb):
  811. """
  812. Extract nsx data (blocks) from a 2.2 or 2.3 .nsx file. Blocks can arise
  813. if the recording was paused by the user.
  814. """
  815. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  816. data = {}
  817. for data_bl in self.__nsx_data_header[nsx_nb].keys():
  818. # get shape and offset of data
  819. shape = (
  820. self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points'],
  821. self.__nsx_basic_header[nsx_nb]['channel_count'])
  822. offset = \
  823. self.__nsx_data_header[nsx_nb][data_bl]['offset_to_data_block']
  824. # read data
  825. data[data_bl] = np.memmap(
  826. filename, dtype='int16', shape=shape, offset=offset, mode='r')
  827. return data
  828. def __read_nev_header(self, ext_header_variants):
  829. """
  830. Extract nev header information from a of specific .nsx header variant
  831. """
  832. filename = '.'.join([self._filenames['nev'], 'nev'])
  833. # basic header
  834. dt0 = [
  835. # Set to "NEURALEV"
  836. ('file_type_id', 'S8'),
  837. ('ver_major', 'uint8'),
  838. ('ver_minor', 'uint8'),
  839. # Flags
  840. ('additionnal_flags', 'uint16'),
  841. # File index of first data sample
  842. ('bytes_in_headers', 'uint32'),
  843. # Number of bytes per data packet (sample)
  844. ('bytes_in_data_packets', 'uint32'),
  845. # Time resolution of time stamps in Hz
  846. ('timestamp_resolution', 'uint32'),
  847. # Sampling frequency of waveforms in Hz
  848. ('sample_resolution', 'uint32'),
  849. ('year', 'uint16'),
  850. ('month', 'uint16'),
  851. ('weekday', 'uint16'),
  852. ('day', 'uint16'),
  853. ('hour', 'uint16'),
  854. ('minute', 'uint16'),
  855. ('second', 'uint16'),
  856. ('millisecond', 'uint16'),
  857. ('application_to_create_file', 'S32'),
  858. ('comment_field', 'S256'),
  859. # Number of extended headers
  860. ('nb_ext_headers', 'uint32')]
  861. nev_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  862. # extended header
  863. # this consist in N block with code 8bytes + 24 data bytes
  864. # the data bytes depend on the code and need to be converted
  865. # cafilename_nsx, segse by case
  866. shape = nev_basic_header['nb_ext_headers']
  867. offset_dt0 = np.dtype(dt0).itemsize
  868. # This is the common structure of the beginning of extended headers
  869. dt1 = [
  870. ('packet_id', 'S8'),
  871. ('info_field', 'S24')]
  872. raw_ext_header = np.memmap(
  873. filename, offset=offset_dt0, dtype=dt1, shape=shape, mode='r')
  874. nev_ext_header = {}
  875. for packet_id in ext_header_variants.keys():
  876. mask = (raw_ext_header['packet_id'] == packet_id)
  877. dt2 = self.__nev_ext_header_types()[packet_id][
  878. ext_header_variants[packet_id]]
  879. nev_ext_header[packet_id] = raw_ext_header.view(dt2)[mask]
  880. return nev_basic_header, nev_ext_header
  881. def __read_nev_header_variant_a(self):
  882. """
  883. Extract nev header information from a 2.1 .nev file
  884. """
  885. ext_header_variants = {
  886. b'NEUEVWAV': 'a',
  887. b'ARRAYNME': 'a',
  888. b'ECOMMENT': 'a',
  889. b'CCOMMENT': 'a',
  890. b'MAPFILE': 'a',
  891. b'NSASEXEV': 'a'}
  892. return self.__read_nev_header(ext_header_variants)
  893. def __read_nev_header_variant_b(self):
  894. """
  895. Extract nev header information from a 2.2 .nev file
  896. """
  897. ext_header_variants = {
  898. b'NEUEVWAV': 'b',
  899. b'ARRAYNME': 'a',
  900. b'ECOMMENT': 'a',
  901. b'CCOMMENT': 'a',
  902. b'MAPFILE': 'a',
  903. b'NEUEVLBL': 'a',
  904. b'NEUEVFLT': 'a',
  905. b'DIGLABEL': 'a',
  906. b'NSASEXEV': 'a'}
  907. return self.__read_nev_header(ext_header_variants)
  908. def __read_nev_header_variant_c(self):
  909. """
  910. Extract nev header information from a 2.3 .nev file
  911. """
  912. ext_header_variants = {
  913. b'NEUEVWAV': 'b',
  914. b'ARRAYNME': 'a',
  915. b'ECOMMENT': 'a',
  916. b'CCOMMENT': 'a',
  917. b'MAPFILE': 'a',
  918. b'NEUEVLBL': 'a',
  919. b'NEUEVFLT': 'a',
  920. b'DIGLABEL': 'a',
  921. b'VIDEOSYN': 'a',
  922. b'TRACKOBJ': 'a'}
  923. return self.__read_nev_header(ext_header_variants)
  924. def __read_nev_data(self, nev_data_masks, nev_data_types):
  925. """
  926. Extract nev data from a 2.1 or 2.2 .nev file
  927. """
  928. filename = '.'.join([self._filenames['nev'], 'nev'])
  929. data_size = self.__nev_basic_header['bytes_in_data_packets']
  930. header_size = self.__nev_basic_header['bytes_in_headers']
  931. # read all raw data packets and markers
  932. dt0 = [
  933. ('timestamp', 'uint32'),
  934. ('packet_id', 'uint16'),
  935. ('value', 'S{0}'.format(data_size - 6))]
  936. raw_data = np.memmap(filename, offset=header_size, dtype=dt0, mode='r')
  937. masks = self.__nev_data_masks(raw_data['packet_id'])
  938. types = self.__nev_data_types(data_size)
  939. event_segment_ids = self.__get_event_segment_ids(raw_data, masks, nev_data_masks)
  940. data = {}
  941. for k, v in nev_data_masks.items():
  942. mask = masks[k][v]
  943. data[k] = (raw_data.view(types[k][nev_data_types[k]])[mask], event_segment_ids[mask])
  944. return data
  945. def __get_reset_event_mask(self, raw_event_data, masks, nev_data_masks):
  946. """
  947. Extract mask for reset comment events in 2.3 .nev file
  948. """
  949. restart_mask = np.logical_and(masks['Comments'][nev_data_masks['Comments']],
  950. raw_event_data['value']
  951. == b'\x00\x00\x00\x00\x00\x00critical load restart')
  952. # TODO: Fix hardcoded number of bytes
  953. return restart_mask
  954. def __get_event_segment_ids(self, raw_event_data, masks, nev_data_masks):
  955. """
  956. Construct array of corresponding segment ids for each event for nev version 2.3
  957. """
  958. if self.__nev_spec in ['2.1', '2.2']:
  959. # No pause or reset mechanism present for file version 2.1 and 2.2
  960. return np.zeros(len(raw_event_data), dtype=int)
  961. elif self.__nev_spec == '2.3':
  962. reset_ev_mask = self.__get_reset_event_mask(raw_event_data, masks, nev_data_masks)
  963. reset_ev_ids = np.where(reset_ev_mask)[0]
  964. # consistency check for monotone increasing time stamps
  965. # explicitely converting to int to allow for negative diff values
  966. jump_ids = \
  967. np.where(np.diff(np.asarray(raw_event_data['timestamp'], dtype=int)) < 0)[0] + 1
  968. overlap = np.in1d(jump_ids, reset_ev_ids)
  969. if not all(overlap):
  970. # additional resets occurred without a reset event being stored
  971. additional_ids = jump_ids[np.invert(overlap)]
  972. warnings.warn('Detected {} undocumented segments within '
  973. 'nev data after timestamps {}.'
  974. ''.format(len(additional_ids), additional_ids))
  975. reset_ev_ids = sorted(np.unique(np.concatenate((reset_ev_ids, jump_ids))))
  976. event_segment_ids = np.zeros(len(raw_event_data), dtype=int)
  977. for reset_event_id in reset_ev_ids:
  978. event_segment_ids[reset_event_id:] += 1
  979. self._nb_segment_nev = len(reset_ev_ids) + 1
  980. return event_segment_ids
  981. def __match_nsx_and_nev_segment_ids(self, nsx_nb):
  982. """
  983. Ensure matching ids of segments detected in nsx and nev file for version 2.3
  984. """
  985. # NSX required for matching, if not available, warn the user
  986. if not self._avail_nsx:
  987. warnings.warn("No nsX available so it cannot be checked whether "
  988. "the segments in nev are all correct. Most importantly, "
  989. "recording pauses will not be detected", UserWarning)
  990. return
  991. # Only needs to be done for nev version 2.3
  992. if self.__nev_spec == '2.3':
  993. nsx_offset = self.__nsx_data_header[nsx_nb][0]['timestamp']
  994. # Multiples of 1/30.000s that pass between two nsX samples
  995. nsx_period = self.__nsx_basic_header[self.nsx_to_load]['period']
  996. # NSX segments needed as dict and list
  997. nonempty_nsx_segments = {}
  998. list_nonempty_nsx_segments = []
  999. # Counts how many segments CAN be created from nev
  1000. nb_possible_nev_segments = self._nb_segment_nev
  1001. # Nonempty segments are those containing at least 2 samples
  1002. # These have to be able to be mapped to nev
  1003. for k, v in sorted(self.__nsx_data_header[nsx_nb].items()):
  1004. if v['nb_data_points'] > 1:
  1005. nonempty_nsx_segments[k] = v
  1006. list_nonempty_nsx_segments.append(v)
  1007. # Account for paused segments
  1008. # This increases nev event segment ids if from the nsx an additional segment is found
  1009. # If one new segment, i.e. that could not be determined from the nev was found,
  1010. # all following ids need to be increased to account for the additional segment before
  1011. for k, (data, ev_ids) in self.nev_data.items():
  1012. # Check all nonempty nsX segments
  1013. for i, seg in enumerate(list_nonempty_nsx_segments[:]):
  1014. # Last timestamp in this nsX segment
  1015. # Not subtracting nsX offset from end because spike extraction might continue
  1016. end_of_current_nsx_seg = seg['timestamp'] + \
  1017. seg['nb_data_points'] * self.__nsx_basic_header[nsx_nb]['period']
  1018. mask_after_seg = [(ev_ids == i)
  1019. & (data['timestamp'] > end_of_current_nsx_seg + nsx_period)]
  1020. # Show warning if spikes do not fit any segment (+- 1 sampling 'tick')
  1021. # Spike should belong to segment before
  1022. mask_outside = [(ev_ids == i)
  1023. & (data['timestamp'] < int(seg['timestamp'])
  1024. - nsx_offset - nsx_period)]
  1025. if len(data[mask_outside]) > 0:
  1026. warnings.warn("Spikes outside any segment. Detected on segment #{}".
  1027. format(i))
  1028. ev_ids[mask_outside] -= 1
  1029. # If some nev data are outside of this nsX segment, increase their segment ids
  1030. # and the ids of all following segments. They are checked for the next nsX
  1031. # segment then. If they do not fit any of them,
  1032. # a warning will be shown, indicating how far outside the segment spikes are
  1033. # If they fit the next segment, more segments are possible in nev,
  1034. # because a new one has been discovered
  1035. if len(data[mask_after_seg]) > 0:
  1036. # Warning if spikes are after last segment
  1037. if i == len(list_nonempty_nsx_segments) - 1:
  1038. timestamp_resolution = self.__nsx_params[self.__nsx_spec[
  1039. self.nsx_to_load]]('timestamp_resolution', self.nsx_to_load)
  1040. time_after_seg = (data[mask_after_seg]['timestamp'][-1]
  1041. - end_of_current_nsx_seg) / timestamp_resolution
  1042. warnings.warn("Spikes {}s after last segment.".format(time_after_seg))
  1043. # Break out of loop because it's the last iteration
  1044. # and the spikes should stay connected to last segment
  1045. break
  1046. # If reset and no segment detected in nev, then these segments cannot be
  1047. # distinguished in nev, which is a big problem
  1048. # XXX 96 is an arbitrary number based on observations in available files
  1049. elif list_nonempty_nsx_segments[i + 1]['timestamp'] - nsx_offset <= 96:
  1050. # If not all definitely belong to the next segment,
  1051. # then it cannot be distinguished where some belong
  1052. if len(data[ev_ids == i]) != len(data[mask_after_seg]):
  1053. raise ValueError("Some segments in nsX cannot be detected in nev")
  1054. # Actual processing if no problem has occurred
  1055. nb_possible_nev_segments += 1
  1056. ev_ids[ev_ids > i] += 1
  1057. ev_ids[mask_after_seg] += 1
  1058. # consistency check: same number of segments for nsx and nev data
  1059. assert nb_possible_nev_segments == len(nonempty_nsx_segments), \
  1060. ('Inconsistent ns{0} and nev file. {1} segments present in .nev file, but {2} in '
  1061. 'ns{0} file.'.format(self.nsx_to_load, nb_possible_nev_segments,
  1062. len(nonempty_nsx_segments)))
  1063. new_nev_segment_id_mapping = dict(zip(range(nb_possible_nev_segments),
  1064. sorted(list(nonempty_nsx_segments))))
  1065. # replacing event ids by matched event ids in place
  1066. for k, (data, ev_ids) in self.nev_data.items():
  1067. if len(ev_ids):
  1068. ev_ids[:] = np.vectorize(new_nev_segment_id_mapping.__getitem__)(ev_ids)
  1069. def __read_nev_data_variant_a(self):
  1070. """
  1071. Extract nev data from a 2.1 & 2.2 .nev file
  1072. """
  1073. nev_data_masks = {
  1074. 'NonNeural': 'a',
  1075. 'Spikes': 'a'}
  1076. nev_data_types = {
  1077. 'NonNeural': 'a',
  1078. 'Spikes': 'a'}
  1079. return self.__read_nev_data(nev_data_masks, nev_data_types)
  1080. def __read_nev_data_variant_b(self):
  1081. """
  1082. Extract nev data from a 2.3 .nev file
  1083. """
  1084. nev_data_masks = {
  1085. 'NonNeural': 'a',
  1086. 'Spikes': 'b',
  1087. 'Comments': 'a',
  1088. 'VideoSync': 'a',
  1089. 'TrackingEvents': 'a',
  1090. 'ButtonTrigger': 'a',
  1091. 'ConfigEvent': 'a'}
  1092. nev_data_types = {
  1093. 'NonNeural': 'b',
  1094. 'Spikes': 'a',
  1095. 'Comments': 'a',
  1096. 'VideoSync': 'a',
  1097. 'TrackingEvents': 'a',
  1098. 'ButtonTrigger': 'a',
  1099. 'ConfigEvent': 'a'}
  1100. return self.__read_nev_data(nev_data_masks, nev_data_types)
  1101. def __nev_ext_header_types(self):
  1102. """
  1103. Defines extended header types for different .nev file specifications.
  1104. """
  1105. nev_ext_header_types = {
  1106. b'NEUEVWAV': {
  1107. # Version>=2.1
  1108. 'a': [
  1109. ('packet_id', 'S8'),
  1110. ('electrode_id', 'uint16'),
  1111. ('physical_connector', 'uint8'),
  1112. ('connector_pin', 'uint8'),
  1113. ('digitization_factor', 'uint16'),
  1114. ('energy_threshold', 'uint16'),
  1115. ('hi_threshold', 'int16'),
  1116. ('lo_threshold', 'int16'),
  1117. ('nb_sorted_units', 'uint8'),
  1118. # number of bytes per waveform sample
  1119. ('bytes_per_waveform', 'uint8'),
  1120. ('unused', 'S10')],
  1121. # Version>=2.3
  1122. 'b': [
  1123. ('packet_id', 'S8'),
  1124. ('electrode_id', 'uint16'),
  1125. ('physical_connector', 'uint8'),
  1126. ('connector_pin', 'uint8'),
  1127. ('digitization_factor', 'uint16'),
  1128. ('energy_threshold', 'uint16'),
  1129. ('hi_threshold', 'int16'),
  1130. ('lo_threshold', 'int16'),
  1131. ('nb_sorted_units', 'uint8'),
  1132. # number of bytes per waveform sample
  1133. ('bytes_per_waveform', 'uint8'),
  1134. # number of samples for each waveform
  1135. ('spike_width', 'uint16'),
  1136. ('unused', 'S8')]},
  1137. b'ARRAYNME': {
  1138. 'a': [
  1139. ('packet_id', 'S8'),
  1140. ('electrode_array_name', 'S24')]},
  1141. b'ECOMMENT': {
  1142. 'a': [
  1143. ('packet_id', 'S8'),
  1144. ('extra_comment', 'S24')]},
  1145. b'CCOMMENT': {
  1146. 'a': [
  1147. ('packet_id', 'S8'),
  1148. ('continued_comment', 'S24')]},
  1149. b'MAPFILE': {
  1150. 'a': [
  1151. ('packet_id', 'S8'),
  1152. ('mapFile', 'S24')]},
  1153. b'NEUEVLBL': {
  1154. 'a': [
  1155. ('packet_id', 'S8'),
  1156. ('electrode_id', 'uint16'),
  1157. # label of this electrode
  1158. ('label', 'S16'),
  1159. ('unused', 'S6')]},
  1160. b'NEUEVFLT': {
  1161. 'a': [
  1162. ('packet_id', 'S8'),
  1163. ('electrode_id', 'uint16'),
  1164. ('hi_freq_corner', 'uint32'),
  1165. ('hi_freq_order', 'uint32'),
  1166. # 0=None 1=Butterworth
  1167. ('hi_freq_type', 'uint16'),
  1168. ('lo_freq_corner', 'uint32'),
  1169. ('lo_freq_order', 'uint32'),
  1170. # 0=None 1=Butterworth
  1171. ('lo_freq_type', 'uint16'),
  1172. ('unused', 'S2')]},
  1173. b'DIGLABEL': {
  1174. 'a': [
  1175. ('packet_id', 'S8'),
  1176. # Read name of digital
  1177. ('label', 'S16'),
  1178. # 0=serial, 1=parallel
  1179. ('mode', 'uint8'),
  1180. ('unused', 'S7')]},
  1181. b'NSASEXEV': {
  1182. 'a': [
  1183. ('packet_id', 'S8'),
  1184. # Read frequency of periodic packet generation
  1185. ('frequency', 'uint16'),
  1186. # Read if digital input triggers events
  1187. ('digital_input_config', 'uint8'),
  1188. # Read if analog input triggers events
  1189. ('analog_channel_1_config', 'uint8'),
  1190. ('analog_channel_1_edge_detec_val', 'uint16'),
  1191. ('analog_channel_2_config', 'uint8'),
  1192. ('analog_channel_2_edge_detec_val', 'uint16'),
  1193. ('analog_channel_3_config', 'uint8'),
  1194. ('analog_channel_3_edge_detec_val', 'uint16'),
  1195. ('analog_channel_4_config', 'uint8'),
  1196. ('analog_channel_4_edge_detec_val', 'uint16'),
  1197. ('analog_channel_5_config', 'uint8'),
  1198. ('analog_channel_5_edge_detec_val', 'uint16'),
  1199. ('unused', 'S6')]},
  1200. b'VIDEOSYN': {
  1201. 'a': [
  1202. ('packet_id', 'S8'),
  1203. ('video_source_id', 'uint16'),
  1204. ('video_source', 'S16'),
  1205. ('frame_rate', 'float32'),
  1206. ('unused', 'S2')]},
  1207. b'TRACKOBJ': {
  1208. 'a': [
  1209. ('packet_id', 'S8'),
  1210. ('trackable_type', 'uint16'),
  1211. ('trackable_id', 'uint16'),
  1212. ('point_count', 'uint16'),
  1213. ('video_source', 'S16'),
  1214. ('unused', 'S2')]}}
  1215. return nev_ext_header_types
  1216. def __nev_data_masks(self, packet_ids):
  1217. """
  1218. Defines data masks for different .nev file specifications depending on
  1219. the given packet identifiers.
  1220. """
  1221. __nev_data_masks = {
  1222. 'NonNeural': {
  1223. 'a': (packet_ids == 0)},
  1224. 'Spikes': {
  1225. # Version 2.1 & 2.2
  1226. 'a': (0 < packet_ids) & (packet_ids <= 255),
  1227. # Version>=2.3
  1228. 'b': (0 < packet_ids) & (packet_ids <= 2048)},
  1229. 'Comments': {
  1230. 'a': (packet_ids == 0xFFFF)},
  1231. 'VideoSync': {
  1232. 'a': (packet_ids == 0xFFFE)},
  1233. 'TrackingEvents': {
  1234. 'a': (packet_ids == 0xFFFD)},
  1235. 'ButtonTrigger': {
  1236. 'a': (packet_ids == 0xFFFC)},
  1237. 'ConfigEvent': {
  1238. 'a': (packet_ids == 0xFFFB)}}
  1239. return __nev_data_masks
  1240. def __nev_data_types(self, data_size):
  1241. """
  1242. Defines data types for different .nev file specifications depending on
  1243. the given packet identifiers.
  1244. """
  1245. __nev_data_types = {
  1246. 'NonNeural': {
  1247. # Version 2.1 & 2.2
  1248. 'a': [
  1249. ('timestamp', 'uint32'),
  1250. ('packet_id', 'uint16'),
  1251. ('packet_insertion_reason', 'uint8'),
  1252. ('reserved', 'uint8'),
  1253. ('digital_input', 'uint16'),
  1254. ('analog_input_channel_1', 'int16'),
  1255. ('analog_input_channel_2', 'int16'),
  1256. ('analog_input_channel_3', 'int16'),
  1257. ('analog_input_channel_4', 'int16'),
  1258. ('analog_input_channel_5', 'int16'),
  1259. ('unused', 'S{0}'.format(data_size - 20))],
  1260. # Version>=2.3
  1261. 'b': [
  1262. ('timestamp', 'uint32'),
  1263. ('packet_id', 'uint16'),
  1264. ('packet_insertion_reason', 'uint8'),
  1265. ('reserved', 'uint8'),
  1266. ('digital_input', 'uint16'),
  1267. ('unused', 'S{0}'.format(data_size - 10))]},
  1268. 'Spikes': {
  1269. 'a': [
  1270. ('timestamp', 'uint32'),
  1271. ('packet_id', 'uint16'),
  1272. ('unit_class_nb', 'uint8'),
  1273. ('reserved', 'uint8'),
  1274. ('waveform', 'S{0}'.format(data_size - 8))]},
  1275. 'Comments': {
  1276. 'a': [
  1277. ('timestamp', 'uint32'),
  1278. ('packet_id', 'uint16'),
  1279. ('char_set', 'uint8'),
  1280. ('flag', 'uint8'),
  1281. ('color', 'uint32'),
  1282. ('comment', 'S{0}'.format(data_size - 12))]},
  1283. 'VideoSync': {
  1284. 'a': [
  1285. ('timestamp', 'uint32'),
  1286. ('packet_id', 'uint16'),
  1287. ('video_file_nb', 'uint16'),
  1288. ('video_frame_nb', 'uint32'),
  1289. ('video_elapsed_time', 'uint32'),
  1290. ('video_source_id', 'uint32'),
  1291. ('unused', 'int8', (data_size - 20,))]},
  1292. 'TrackingEvents': {
  1293. 'a': [
  1294. ('timestamp', 'uint32'),
  1295. ('packet_id', 'uint16'),
  1296. ('parent_id', 'uint16'),
  1297. ('node_id', 'uint16'),
  1298. ('node_count', 'uint16'),
  1299. ('point_count', 'uint16'),
  1300. ('tracking_points', 'uint16', ((data_size - 14) // 2,))]},
  1301. 'ButtonTrigger': {
  1302. 'a': [
  1303. ('timestamp', 'uint32'),
  1304. ('packet_id', 'uint16'),
  1305. ('trigger_type', 'uint16'),
  1306. ('unused', 'int8', (data_size - 8,))]},
  1307. 'ConfigEvent': {
  1308. 'a': [
  1309. ('timestamp', 'uint32'),
  1310. ('packet_id', 'uint16'),
  1311. ('config_change_type', 'uint16'),
  1312. ('config_changed', 'S{0}'.format(data_size - 8))]}}
  1313. return __nev_data_types
  1314. def __nev_params(self, param_name):
  1315. """
  1316. Returns wanted nev parameter.
  1317. """
  1318. nev_parameters = {
  1319. 'bytes_in_data_packets':
  1320. self.__nev_basic_header['bytes_in_data_packets'],
  1321. 'rec_datetime': datetime.datetime(
  1322. year=self.__nev_basic_header['year'],
  1323. month=self.__nev_basic_header['month'],
  1324. day=self.__nev_basic_header['day'],
  1325. hour=self.__nev_basic_header['hour'],
  1326. minute=self.__nev_basic_header['minute'],
  1327. second=self.__nev_basic_header['second'],
  1328. microsecond=self.__nev_basic_header['millisecond']),
  1329. 'max_res': self.__nev_basic_header['timestamp_resolution'],
  1330. 'channel_ids': self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  1331. 'channel_labels': self.__channel_labels[self.__nev_spec](),
  1332. 'event_unit': pq.CompoundUnit("1.0/{0} * s".format(
  1333. self.__nev_basic_header['timestamp_resolution'])),
  1334. 'nb_units': dict(zip(
  1335. self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  1336. self.__nev_ext_header[b'NEUEVWAV']['nb_sorted_units'])),
  1337. 'digitization_factor': dict(zip(
  1338. self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  1339. self.__nev_ext_header[b'NEUEVWAV']['digitization_factor'])),
  1340. 'data_size': self.__nev_basic_header['bytes_in_data_packets'],
  1341. 'waveform_size': self.__waveform_size[self.__nev_spec](),
  1342. 'waveform_dtypes': self.__get_waveforms_dtype(),
  1343. 'waveform_sampling_rate':
  1344. self.__nev_basic_header['sample_resolution'] * pq.Hz,
  1345. 'waveform_time_unit': pq.CompoundUnit("1.0/{0} * s".format(
  1346. self.__nev_basic_header['sample_resolution'])),
  1347. 'waveform_unit': pq.uV}
  1348. return nev_parameters[param_name]
  1349. def __get_file_size(self, filename):
  1350. """
  1351. Returns the file size in bytes for the given file.
  1352. """
  1353. filebuf = open(filename, 'rb')
  1354. filebuf.seek(0, os.SEEK_END)
  1355. file_size = filebuf.tell()
  1356. filebuf.close()
  1357. return file_size
  1358. def __get_min_time(self):
  1359. """
  1360. Returns the smallest time that can be determined from the recording for
  1361. use as the lower bound n in an interval [n,m).
  1362. """
  1363. tp = []
  1364. if self._avail_files['nev']:
  1365. tp.extend(self.__get_nev_rec_times()[0])
  1366. for nsx_i in self._avail_nsx:
  1367. tp.extend(self.__nsx_rec_times[self.__nsx_spec[nsx_i]](nsx_i)[0])
  1368. return min(tp)
  1369. def __get_max_time(self):
  1370. """
  1371. Returns the largest time that can be determined from the recording for
  1372. use as the upper bound m in an interval [n,m).
  1373. """
  1374. tp = []
  1375. if self._avail_files['nev']:
  1376. tp.extend(self.__get_nev_rec_times()[1])
  1377. for nsx_i in self._avail_nsx:
  1378. tp.extend(self.__nsx_rec_times[self.__nsx_spec[nsx_i]](nsx_i)[1])
  1379. return max(tp)
  1380. def __get_nev_rec_times(self):
  1381. """
  1382. Extracts minimum and maximum time points from a nev file.
  1383. """
  1384. filename = '.'.join([self._filenames['nev'], 'nev'])
  1385. dt = [('timestamp', 'uint32')]
  1386. offset = \
  1387. self.__get_file_size(filename) - \
  1388. self.__nev_params('bytes_in_data_packets')
  1389. last_data_packet = np.memmap(filename, offset=offset, dtype=dt, mode='r')[0]
  1390. n_starts = [0 * self.__nev_params('event_unit')]
  1391. n_stops = [
  1392. last_data_packet['timestamp'] * self.__nev_params('event_unit')]
  1393. return n_starts, n_stops
  1394. def __get_waveforms_dtype(self):
  1395. """
  1396. Extracts the actual waveform dtype set for each channel.
  1397. """
  1398. # Blackrock code giving the approiate dtype
  1399. conv = {0: 'int8', 1: 'int8', 2: 'int16', 4: 'int32'}
  1400. # get all electrode ids from nev ext header
  1401. all_el_ids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1402. # get the dtype of waveform (this is stupidly complicated)
  1403. if self.__is_set(
  1404. np.array(self.__nev_basic_header['additionnal_flags']), 0):
  1405. dtype_waveforms = dict((k, 'int16') for k in all_el_ids)
  1406. else:
  1407. # extract bytes per waveform
  1408. waveform_bytes = \
  1409. self.__nev_ext_header[b'NEUEVWAV']['bytes_per_waveform']
  1410. # extract dtype for waveforms fro each electrode
  1411. dtype_waveforms = dict(zip(all_el_ids, conv[waveform_bytes]))
  1412. return dtype_waveforms
  1413. def __get_channel_labels_variant_a(self):
  1414. """
  1415. Returns labels for all channels for file spec 2.1
  1416. """
  1417. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1418. labels = []
  1419. for elid in elids:
  1420. if elid < 129:
  1421. labels.append('chan%i' % elid)
  1422. else:
  1423. labels.append('ainp%i' % (elid - 129 + 1))
  1424. return dict(zip(elids, labels))
  1425. def __get_channel_labels_variant_b(self):
  1426. """
  1427. Returns labels for all channels for file spec 2.2 and 2.3
  1428. """
  1429. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1430. labels = self.__nev_ext_header[b'NEUEVLBL']['label']
  1431. return dict(zip(elids, labels)) if len(labels) > 0 else None
  1432. def __get_waveform_size_variant_a(self):
  1433. """
  1434. Returns wavform sizes for all channels for file spec 2.1 and 2.2
  1435. """
  1436. wf_dtypes = self.__get_waveforms_dtype()
  1437. nb_bytes_wf = self.__nev_basic_header['bytes_in_data_packets'] - 8
  1438. wf_sizes = dict([
  1439. (ch, int(nb_bytes_wf / np.dtype(dt).itemsize)) for ch, dt in
  1440. wf_dtypes.items()])
  1441. return wf_sizes
  1442. def __get_waveform_size_variant_b(self):
  1443. """
  1444. Returns wavform sizes for all channels for file spec 2.3
  1445. """
  1446. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1447. spike_widths = self.__nev_ext_header[b'NEUEVWAV']['spike_width']
  1448. return dict(zip(elids, spike_widths))
  1449. def __get_left_sweep_waveforms(self):
  1450. """
  1451. Returns left sweep of waveforms for each channel. Left sweep is defined
  1452. as the time from the beginning of the waveform to the trigger time of
  1453. the corresponding spike.
  1454. """
  1455. # TODO: Double check if this is the actual setting for Blackrock
  1456. wf_t_unit = self.__nev_params('waveform_time_unit')
  1457. all_ch = self.__nev_params('channel_ids')
  1458. # TODO: Double check if this is the correct assumption (10 samples)
  1459. # default value: threshold crossing after 10 samples of waveform
  1460. wf_left_sweep = dict([(ch, 10 * wf_t_unit) for ch in all_ch])
  1461. # non-default: threshold crossing at center of waveform
  1462. # wf_size = self.__nev_params('waveform_size')
  1463. # wf_left_sweep = dict(
  1464. # [(ch, (wf_size[ch] / 2) * wf_t_unit) for ch in all_ch])
  1465. return wf_left_sweep
  1466. def __get_nsx_param_variant_a(self, nsx_nb):
  1467. """
  1468. Returns parameter (param_name) for a given nsx (nsx_nb) for file spec
  1469. 2.1.
  1470. """
  1471. # Here, min/max_analog_val and min/max_digital_val are not available in
  1472. # the nsx, so that we must estimate these parameters from the
  1473. # digitization factor of the nev (information by Kian Torab, Blackrock
  1474. # Microsystems). Here dig_factor=max_analog_val/max_digital_val. We set
  1475. # max_digital_val to 1000, and max_analog_val=dig_factor. dig_factor is
  1476. # given in nV by definition, so the units turn out to be uV.
  1477. labels = []
  1478. dig_factor = []
  1479. for elid in self.__nsx_ext_header[nsx_nb]['electrode_id']:
  1480. if self._avail_files['nev']:
  1481. # This is a workaround for the DigitalFactor overflow in NEV
  1482. # files recorded with buggy Cerebus system.
  1483. # Fix taken from: NMPK toolbox by Blackrock,
  1484. # file openNEV, line 464,
  1485. # git rev. d0a25eac902704a3a29fa5dfd3aed0744f4733ed
  1486. df = self.__nev_params('digitization_factor')[elid]
  1487. if df == 21516:
  1488. df = 152592.547
  1489. dig_factor.append(df)
  1490. else:
  1491. dig_factor.append(float('nan'))
  1492. if elid < 129:
  1493. labels.append('chan%i' % elid)
  1494. else:
  1495. labels.append('ainp%i' % (elid - 129 + 1))
  1496. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  1497. bytes_in_headers = self.__nsx_basic_header[nsx_nb].dtype.itemsize + \
  1498. self.__nsx_ext_header[nsx_nb].dtype.itemsize * \
  1499. self.__nsx_basic_header[nsx_nb]['channel_count']
  1500. if np.isnan(dig_factor[0]):
  1501. units = ''
  1502. warnings.warn("Cannot rescale to voltage, raw data will be returned.", UserWarning)
  1503. else:
  1504. units = 'uV'
  1505. nsx_parameters = {
  1506. 'nb_data_points': int(
  1507. (self.__get_file_size(filename) - bytes_in_headers)
  1508. / (2 * self.__nsx_basic_header[nsx_nb]['channel_count']) - 1),
  1509. 'labels': labels,
  1510. 'units': np.array([units] * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1511. 'min_analog_val': -1 * np.array(dig_factor),
  1512. 'max_analog_val': np.array(dig_factor),
  1513. 'min_digital_val': np.array(
  1514. [-1000] * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1515. 'max_digital_val': np.array([1000] * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1516. 'timestamp_resolution': 30000,
  1517. 'bytes_in_headers': bytes_in_headers,
  1518. 'sampling_rate': 30000 / self.__nsx_basic_header[nsx_nb]['period'] * pq.Hz,
  1519. 'time_unit': pq.CompoundUnit("1.0/{0}*s".format(
  1520. 30000 / self.__nsx_basic_header[nsx_nb]['period']))}
  1521. # Returns complete dictionary because then it does not need to be called so often
  1522. return nsx_parameters
  1523. def __get_nsx_param_variant_b(self, param_name, nsx_nb):
  1524. """
  1525. Returns parameter (param_name) for a given nsx (nsx_nb) for file spec
  1526. 2.2 and 2.3.
  1527. """
  1528. nsx_parameters = {
  1529. 'labels':
  1530. self.__nsx_ext_header[nsx_nb]['electrode_label'],
  1531. 'units':
  1532. self.__nsx_ext_header[nsx_nb]['units'],
  1533. 'min_analog_val':
  1534. self.__nsx_ext_header[nsx_nb]['min_analog_val'],
  1535. 'max_analog_val':
  1536. self.__nsx_ext_header[nsx_nb]['max_analog_val'],
  1537. 'min_digital_val':
  1538. self.__nsx_ext_header[nsx_nb]['min_digital_val'],
  1539. 'max_digital_val':
  1540. self.__nsx_ext_header[nsx_nb]['max_digital_val'],
  1541. 'timestamp_resolution':
  1542. self.__nsx_basic_header[nsx_nb]['timestamp_resolution'],
  1543. 'bytes_in_headers':
  1544. self.__nsx_basic_header[nsx_nb]['bytes_in_headers'],
  1545. 'sampling_rate':
  1546. self.__nsx_basic_header[nsx_nb]['timestamp_resolution']
  1547. / self.__nsx_basic_header[nsx_nb]['period'] * pq.Hz,
  1548. 'time_unit': pq.CompoundUnit("1.0/{0}*s".format(
  1549. self.__nsx_basic_header[nsx_nb]['timestamp_resolution']
  1550. / self.__nsx_basic_header[nsx_nb]['period']))}
  1551. return nsx_parameters[param_name]
  1552. def __get_nonneural_evdicts_variant_a(self, data):
  1553. """
  1554. Defines event types and the necessary parameters to extract them from
  1555. a 2.1 and 2.2 nev file.
  1556. """
  1557. # TODO: add annotations of nev ext header (NSASEXEX) to event types
  1558. # digital events
  1559. event_types = {
  1560. 'digital_input_port': {
  1561. 'name': 'digital_input_port',
  1562. 'field': 'digital_input',
  1563. 'mask': self.__is_set(data['packet_insertion_reason'], 0)
  1564. & ~self.__is_set(data['packet_insertion_reason'], 7),
  1565. 'desc': "Events of the digital input port"},
  1566. 'serial_input_port': {
  1567. 'name': 'serial_input_port',
  1568. 'field': 'digital_input',
  1569. 'mask':
  1570. self.__is_set(data['packet_insertion_reason'], 0)
  1571. & self.__is_set(data['packet_insertion_reason'], 7),
  1572. 'desc': "Events of the serial input port"}}
  1573. # analog input events via threshold crossings
  1574. for ch in range(5):
  1575. event_types.update({
  1576. 'analog_input_channel_{0}'.format(ch + 1): {
  1577. 'name': 'analog_input_channel_{0}'.format(ch + 1),
  1578. 'field': 'analog_input_channel_{0}'.format(ch + 1),
  1579. 'mask': self.__is_set(
  1580. data['packet_insertion_reason'], ch + 1),
  1581. 'desc': "Values of analog input channel {0} in mV "
  1582. "(+/- 5000)".format(ch + 1)}})
  1583. # TODO: define field and desc
  1584. event_types.update({
  1585. 'periodic_sampling_events': {
  1586. 'name': 'periodic_sampling_events',
  1587. 'field': 'digital_input',
  1588. 'mask': self.__is_set(data['packet_insertion_reason'], 6),
  1589. 'desc': 'Periodic sampling event of a certain frequency'}})
  1590. return event_types
  1591. def __delete_empty_segments(self):
  1592. """
  1593. If there are empty segments (e.g. due to a reset or clock synchronization across
  1594. two systems), these can be discarded.
  1595. If this is done, all the data and data_headers need to be remapped to become a range
  1596. starting from 0 again. Nev data are mapped accordingly to stay with their corresponding
  1597. segment in the nsX data.
  1598. """
  1599. nb_empty_segments = 0
  1600. for data_bl in range(self._nb_segment):
  1601. length = self.nsx_data[data_bl].shape[0]
  1602. # Discard empty segments
  1603. if length < 2:
  1604. nb_empty_segments += 1
  1605. self.nsx_data.pop(data_bl)
  1606. self.__nsx_data_header[self.nsx_to_load].pop(data_bl)
  1607. continue
  1608. # Keys need to be increasing from 0 to maximum in steps of 1
  1609. # To ensure this after removing empty segments, some keys need to be changed
  1610. for i in range(self._nb_segment):
  1611. # If this key does not exist, reduce all following keys by 1,
  1612. # so the keys become a range starting from 0
  1613. if i not in self.nsx_data:
  1614. self.nsx_data = {key - 1 if key > i else key: value for (key, value) in
  1615. self.nsx_data.items()}
  1616. data_header = self.__nsx_data_header[self.nsx_to_load]
  1617. data_header = {key - 1 if key > i else key: value for (key, value) in
  1618. data_header.items()}
  1619. self.__nsx_data_header[self.nsx_to_load] = data_header
  1620. # Also remap nev data, ev_ids are the equivalent to keys above
  1621. if self._avail_files['nev']:
  1622. for k, (data, ev_ids) in self.nev_data.items():
  1623. # If this segment id does not exist, reduce all following by 1
  1624. # to make them a range starting from 0
  1625. if i not in ev_ids:
  1626. ev_ids[ev_ids > i] -= 1
  1627. else:
  1628. raise ValueError("A segment in nsX was omitted but there were"
  1629. "spikes matched to it.")
  1630. # Empty segments were discarded, thus there may be less segments now
  1631. self._nb_segment -= nb_empty_segments
  1632. def __get_nonneural_evdicts_variant_b(self, data):
  1633. """
  1634. Defines event types and the necessary parameters to extract them from
  1635. a 2.3 nev file.
  1636. """
  1637. # digital events
  1638. if not np.all(np.in1d(data['packet_insertion_reason'], [1, 129])):
  1639. # Blackrock spec gives reason==64 means PERIODIC, but never seen this live
  1640. warnings.warn("Unknown event codes found", RuntimeWarning)
  1641. event_types = {
  1642. 'digital_input_port': {
  1643. 'name': 'digital_input_port',
  1644. 'field': 'digital_input',
  1645. 'mask': self.__is_set(data['packet_insertion_reason'], 0)
  1646. & ~self.__is_set(data['packet_insertion_reason'], 7),
  1647. 'desc': "Events of the digital input port"},
  1648. 'serial_input_port': {
  1649. 'name': 'serial_input_port',
  1650. 'field': 'digital_input',
  1651. 'mask': self.__is_set(data['packet_insertion_reason'], 0)
  1652. & self.__is_set(data['packet_insertion_reason'], 7),
  1653. 'desc': "Events of the serial input port"}}
  1654. return event_types
  1655. def __get_comment_evdict_variant_a(self, data):
  1656. return {
  1657. 'comments': {
  1658. 'name': 'comments',
  1659. 'field': 'comment',
  1660. 'mask': data['packet_id'] == 65535,
  1661. 'desc': 'Comments'
  1662. }
  1663. }
  1664. def __is_set(self, flag, pos):
  1665. """
  1666. Checks if bit is set at the given position for flag. If flag is an
  1667. array, an array will be returned.
  1668. """
  1669. return flag & (1 << pos) > 0