blackrockio.py 102 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567
  1. # -*- coding: utf-8 -*-
  2. """
  3. Module for reading data from files in the Blackrock format.
  4. This work is based on:
  5. * Chris Rodgers - first version
  6. * Michael Denker, Lyuba Zehl - second version
  7. * Samuel Garcia - third version
  8. * Lyuba Zehl, Michael Denker - fourth version
  9. This IO supports reading only.
  10. This IO is able to read:
  11. * the nev file which contains spikes
  12. * ns1, ns2, .., ns6 files that contain signals at different sampling rates
  13. This IO can handle the following Blackrock file specifications:
  14. * 2.1
  15. * 2.2
  16. * 2.3
  17. The neural data channels are 1 - 128.
  18. The analog inputs are 129 - 144. (129 - 137 AC coupled, 138 - 144 DC coupled)
  19. spike- and event-data; 30000 Hz
  20. "ns1": "analog data: 500 Hz",
  21. "ns2": "analog data: 1000 Hz",
  22. "ns3": "analog data: 2000 Hz",
  23. "ns4": "analog data: 10000 Hz",
  24. "ns5": "analog data: 30000 Hz",
  25. "ns6": "analog data: 30000 Hz (no digital filter)"
  26. TODO:
  27. * videosync events (file spec 2.3)
  28. * tracking events (file spec 2.3)
  29. * buttontrigger events (file spec 2.3)
  30. * config events (file spec 2.3)
  31. * check left sweep settings of Blackrock
  32. * check nsx offsets (file spec 2.1)
  33. * add info of nev ext header (NSASEXEX) to non-neural events
  34. (file spec 2.1 and 2.2)
  35. * read sif file information
  36. * read ccf file information
  37. * fix reading of periodic sampling events (non-neural event type)
  38. (file spec 2.1 and 2.2)
  39. """
  40. from __future__ import division
  41. import datetime
  42. import os
  43. import re
  44. import numpy as np
  45. import quantities as pq
  46. import neo
  47. from neo.io.baseio import BaseIO
  48. from neo.core import (Block, Segment, SpikeTrain, Unit, Event,
  49. ChannelIndex, AnalogSignal)
  50. if __name__ == '__main__':
  51. pass
  52. class BlackrockIO(BaseIO):
  53. """
  54. Class for reading data in from a file set recorded by the Blackrock
  55. (Cerebus) recording system.
  56. Upon initialization, the class is linked to the available set of Blackrock
  57. files. Data can be read as a neo Block or neo Segment object using the
  58. read_block or read_segment function, respectively.
  59. Note: This routine will handle files according to specification 2.1, 2.2,
  60. and 2.3. Recording pauses that may occur in file specifications 2.2 and
  61. 2.3 are automatically extracted and the data set is split into different
  62. segments.
  63. Inherits from:
  64. neo.io.BaseIO
  65. The Blackrock data format consists not of a single file, but a set of
  66. different files. This constructor associates itself with a set of files
  67. that constitute a common data set. By default, all files belonging to
  68. the file set have the same base name, but different extensions.
  69. However, by using the override parameters, individual filenames can
  70. be set.
  71. Args:
  72. filename (string):
  73. File name (without extension) of the set of Blackrock files to
  74. associate with. Any .nsX or .nev, .sif, or .ccf extensions are
  75. ignored when parsing this parameter.
  76. nsx_override (string):
  77. File name of the .nsX files (without extension). If None,
  78. filename is used.
  79. Default: None.
  80. nev_override (string):
  81. File name of the .nev file (without extension). If None,
  82. filename is used.
  83. Default: None.
  84. sif_override (string):
  85. File name of the .sif file (without extension). If None,
  86. filename is used.
  87. Default: None.
  88. ccf_override (string):
  89. File name of the .ccf file (without extension). If None,
  90. filename is used.
  91. Default: None.
  92. verbose (boolean):
  93. If True, the class will output additional diagnostic
  94. information on stdout.
  95. Default: False
  96. Returns:
  97. -
  98. Examples:
  99. >>> a = BlackrockIO('myfile')
  100. Loads a set of file consisting of files myfile.ns1, ...,
  101. myfile.ns6, and myfile.nev
  102. >>> b = BlackrockIO('myfile', nev_override='sorted')
  103. Loads the analog data from the set of files myfile.ns1, ...,
  104. myfile.ns6, but reads spike/event data from sorted.nev
  105. """
  106. # Class variables demonstrating capabilities of this IO
  107. is_readable = True
  108. is_writable = False
  109. # This IO can only manipulate continuous data, spikes, and events
  110. supported_objects = [
  111. Block, Segment, Event, AnalogSignal, SpikeTrain, Unit, ChannelIndex]
  112. readable_objects = [Block, Segment]
  113. writeable_objects = []
  114. has_header = False
  115. is_streameable = False
  116. read_params = {
  117. neo.Block: [
  118. ('nsx_to_load', {
  119. 'value': 'none',
  120. 'label': "List of nsx files (ids, int) to read."}),
  121. ('n_starts', {
  122. 'value': None,
  123. 'label': "List of n_start points (Quantity) to create "
  124. "segments from."}),
  125. ('n_stops', {
  126. 'value': None,
  127. 'label': "List of n_stop points (Quantity) to create "
  128. "segments from."}),
  129. ('channels', {
  130. 'value': 'none',
  131. 'label': "List of channels (ids, int) to load data from."}),
  132. ('units', {
  133. 'value': 'none',
  134. 'label': "Dictionary for units (values, list of int) to load "
  135. "for each channel (key, int)."}),
  136. ('load_waveforms', {
  137. 'value': False,
  138. 'label': "States if waveforms should be loaded and attached "
  139. "to spiketrain"}),
  140. ('load_events', {
  141. 'value': False,
  142. 'label': "States if events should be loaded."})],
  143. neo.Segment: [
  144. ('n_start', {
  145. 'label': "Start time point (Quantity) for segment"}),
  146. ('n_stop', {
  147. 'label': "Stop time point (Quantity) for segment"}),
  148. ('nsx_to_load', {
  149. 'value': 'none',
  150. 'label': "List of nsx files (ids, int) to read."}),
  151. ('channels', {
  152. 'value': 'none',
  153. 'label': "List of channels (ids, int) to load data from."}),
  154. ('units', {
  155. 'value': 'none',
  156. 'label': "Dictionary for units (values, list of int) to load "
  157. "for each channel (key, int)."}),
  158. ('load_waveforms', {
  159. 'value': False,
  160. 'label': "States if waveforms should be loaded and attached "
  161. "to spiketrain"}),
  162. ('load_events', {
  163. 'value': False,
  164. 'label': "States if events should be loaded."})]}
  165. write_params = {}
  166. name = 'Blackrock IO'
  167. description = "This IO reads .nev/.nsX file of the Blackrock " + \
  168. "(Cerebus) recordings system."
  169. # The possible file extensions of the Cerebus system and their content:
  170. # ns1: contains analog data; sampled at 500 Hz (+ digital filters)
  171. # ns2: contains analog data; sampled at 1000 Hz (+ digital filters)
  172. # ns3: contains analog data; sampled at 2000 Hz (+ digital filters)
  173. # ns4: contains analog data; sampled at 10000 Hz (+ digital filters)
  174. # ns5: contains analog data; sampled at 30000 Hz (+ digital filters)
  175. # ns6: contains analog data; sampled at 30000 Hz (no digital filters)
  176. # nev: contains spike- and event-data; sampled at 30000 Hz
  177. # sif: contains institution and patient info (XML)
  178. # ccf: contains Cerebus configurations
  179. extensions = ['ns' + str(_) for _ in range(1, 7)]
  180. extensions.extend(['nev', 'sif', 'ccf'])
  181. mode = 'file'
  182. def __init__(self, filename, nsx_override=None, nev_override=None,
  183. sif_override=None, ccf_override=None, verbose=False):
  184. """
  185. Initialize the BlackrockIO class.
  186. """
  187. BaseIO.__init__(self)
  188. # Used to avoid unnecessary repetition of verbose messages
  189. self.__verbose_messages = []
  190. # remove extension from base _filenames
  191. for ext in self.extensions:
  192. self.filename = re.sub(
  193. os.path.extsep + ext + '$', '', filename)
  194. # remove extensions from overrides
  195. self._filenames = {}
  196. if nsx_override:
  197. self._filenames['nsx'] = re.sub(
  198. os.path.extsep + 'ns[1,2,3,4,5,6]$', '', nsx_override)
  199. else:
  200. self._filenames['nsx'] = self.filename
  201. if nev_override:
  202. self._filenames['nev'] = re.sub(
  203. os.path.extsep + 'nev$', '', nev_override)
  204. else:
  205. self._filenames['nev'] = self.filename
  206. if sif_override:
  207. self._filenames['sif'] = re.sub(
  208. os.path.extsep + 'sif$', '', sif_override)
  209. else:
  210. self._filenames['sif'] = self.filename
  211. if ccf_override:
  212. self._filenames['ccf'] = re.sub(
  213. os.path.extsep + 'ccf$', '', ccf_override)
  214. else:
  215. self._filenames['ccf'] = self.filename
  216. # check which files are available
  217. self._avail_files = dict.fromkeys(self.extensions, False)
  218. self._avail_nsx = []
  219. for ext in self.extensions:
  220. if ext.startswith('ns'):
  221. file2check = ''.join(
  222. [self._filenames['nsx'], os.path.extsep, ext])
  223. else:
  224. file2check = ''.join(
  225. [self._filenames[ext], os.path.extsep, ext])
  226. if os.path.exists(file2check):
  227. self._print_verbose("Found " + file2check + ".")
  228. self._avail_files[ext] = True
  229. if ext.startswith('ns'):
  230. self._avail_nsx.append(int(ext[-1]))
  231. # check if there are any files present
  232. if not any(list(self._avail_files.values())):
  233. raise IOError(
  234. 'No Blackrock files present at {}'.format(filename))
  235. # check if manually specified files were found
  236. exts = ['nsx', 'nev', 'sif', 'ccf']
  237. ext_overrides = [nsx_override, nev_override, sif_override, ccf_override]
  238. for ext, ext_override in zip(exts, ext_overrides):
  239. if ext_override is not None and self._avail_files[ext] == False:
  240. raise ValueError('Specified {} file {} could not be '
  241. 'found.'.format(ext, ext_override))
  242. # These dictionaries are used internally to map the file specification
  243. # revision of the nsx and nev files to one of the reading routines
  244. self.__nsx_header_reader = {
  245. '2.1': self.__read_nsx_header_variant_a,
  246. '2.2': self.__read_nsx_header_variant_b,
  247. '2.3': self.__read_nsx_header_variant_b}
  248. self.__nsx_dataheader_reader = {
  249. '2.1': self.__read_nsx_dataheader_variant_a,
  250. '2.2': self.__read_nsx_dataheader_variant_b,
  251. '2.3': self.__read_nsx_dataheader_variant_b}
  252. self.__nsx_data_reader = {
  253. '2.1': self.__read_nsx_data_variant_a,
  254. '2.2': self.__read_nsx_data_variant_b,
  255. '2.3': self.__read_nsx_data_variant_b}
  256. self.__nev_header_reader = {
  257. '2.1': self.__read_nev_header_variant_a,
  258. '2.2': self.__read_nev_header_variant_b,
  259. '2.3': self.__read_nev_header_variant_c}
  260. self.__nev_data_reader = {
  261. '2.1': self.__read_nev_data_variant_a,
  262. '2.2': self.__read_nev_data_variant_a,
  263. '2.3': self.__read_nev_data_variant_b}
  264. self.__nsx_params = {
  265. '2.1': self.__get_nsx_param_variant_a,
  266. '2.2': self.__get_nsx_param_variant_b,
  267. '2.3': self.__get_nsx_param_variant_b}
  268. self.__nsx_databl_param = {
  269. '2.1': self.__get_nsx_databl_param_variant_a,
  270. '2.2': self.__get_nsx_databl_param_variant_b,
  271. '2.3': self.__get_nsx_databl_param_variant_b}
  272. self.__waveform_size = {
  273. '2.1': self.__get_waveform_size_variant_a,
  274. '2.2': self.__get_waveform_size_variant_a,
  275. '2.3': self.__get_waveform_size_variant_b}
  276. self.__channel_labels = {
  277. '2.1': self.__get_channel_labels_variant_a,
  278. '2.2': self.__get_channel_labels_variant_b,
  279. '2.3': self.__get_channel_labels_variant_b}
  280. self.__nsx_rec_times = {
  281. '2.1': self.__get_nsx_rec_times_variant_a,
  282. '2.2': self.__get_nsx_rec_times_variant_b,
  283. '2.3': self.__get_nsx_rec_times_variant_b}
  284. self.__nonneural_evtypes = {
  285. '2.1': self.__get_nonneural_evtypes_variant_a,
  286. '2.2': self.__get_nonneural_evtypes_variant_a,
  287. '2.3': self.__get_nonneural_evtypes_variant_b}
  288. # Load file spec and headers of available nev file
  289. if self._avail_files['nev']:
  290. # read nev file specification
  291. self.__nev_spec = self.__extract_nev_file_spec()
  292. self._print_verbose('Specification Version ' + self.__nev_spec)
  293. # read nev headers
  294. self.__nev_basic_header, self.__nev_ext_header = \
  295. self.__nev_header_reader[self.__nev_spec]()
  296. # Load file spec and headers of available nsx files
  297. self.__nsx_spec = {}
  298. self.__nsx_basic_header = {}
  299. self.__nsx_ext_header = {}
  300. self.__nsx_data_header = {}
  301. for nsx_nb in self._avail_nsx:
  302. # read nsx file specification
  303. self.__nsx_spec[nsx_nb] = self.__extract_nsx_file_spec(nsx_nb)
  304. # read nsx headers
  305. self.__nsx_basic_header[nsx_nb], self.__nsx_ext_header[nsx_nb] = \
  306. self.__nsx_header_reader[self.__nsx_spec[nsx_nb]](nsx_nb)
  307. # Read nsx data header(s) for nsx
  308. self.__nsx_data_header[nsx_nb] = self.__nsx_dataheader_reader[
  309. self.__nsx_spec[nsx_nb]](nsx_nb)
  310. def _print_verbose(self, text):
  311. """
  312. Print a verbose diagnostic message (string).
  313. """
  314. if self.__verbose_messages:
  315. if text not in self.__verbose_messages:
  316. self.__verbose_messages.append(text)
  317. print(str(self.__class__.__name__) + ': ' + text)
  318. def __extract_nsx_file_spec(self, nsx_nb):
  319. """
  320. Extract file specification from an .nsx file.
  321. """
  322. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  323. # Header structure of files specification 2.2 and higher. For files 2.1
  324. # and lower, the entries ver_major and ver_minor are not supported.
  325. dt0 = [
  326. ('file_id', 'S8'),
  327. ('ver_major', 'uint8'),
  328. ('ver_minor', 'uint8')]
  329. nsx_file_id = np.fromfile(filename, count=1, dtype=dt0)[0]
  330. if nsx_file_id['file_id'].decode() == 'NEURALSG':
  331. spec = '2.1'
  332. elif nsx_file_id['file_id'].decode() == 'NEURALCD':
  333. spec = '{0}.{1}'.format(
  334. nsx_file_id['ver_major'], nsx_file_id['ver_minor'])
  335. else:
  336. raise IOError('Unsupported NSX file type.')
  337. return spec
  338. def __extract_nev_file_spec(self):
  339. """
  340. Extract file specification from an .nsx file
  341. """
  342. filename = '.'.join([self._filenames['nsx'], 'nev'])
  343. # Header structure of files specification 2.2 and higher. For files 2.1
  344. # and lower, the entries ver_major and ver_minor are not supported.
  345. dt0 = [
  346. ('file_id', 'S8'),
  347. ('ver_major', 'uint8'),
  348. ('ver_minor', 'uint8')]
  349. nev_file_id = np.fromfile(filename, count=1, dtype=dt0)[0]
  350. if nev_file_id['file_id'].decode() == 'NEURALEV':
  351. spec = '{0}.{1}'.format(
  352. nev_file_id['ver_major'], nev_file_id['ver_minor'])
  353. else:
  354. raise IOError('NEV file type {0} is not supported'.format(
  355. nev_file_id['file_id']))
  356. return spec
  357. def __read_nsx_header_variant_a(self, nsx_nb):
  358. """
  359. Extract nsx header information from a 2.1 .nsx file
  360. """
  361. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  362. # basic header (file_id: NEURALCD)
  363. dt0 = [
  364. ('file_id', 'S8'),
  365. # label of sampling groun (e.g. "1kS/s" or "LFP Low")
  366. ('label', 'S16'),
  367. # number of 1/30000 seconds between data points
  368. # (e.g., if sampling rate "1 kS/s", period equals "30")
  369. ('period', 'uint32'),
  370. ('channel_count', 'uint32')]
  371. nsx_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  372. # "extended" header (last field of file_id: NEURALCD)
  373. # (to facilitate compatibility with higher file specs)
  374. offset_dt0 = np.dtype(dt0).itemsize
  375. shape = nsx_basic_header['channel_count']
  376. # originally called channel_id in Blackrock user manual
  377. # (to facilitate compatibility with higher file specs)
  378. dt1 = [('electrode_id', 'uint32')]
  379. nsx_ext_header = np.memmap(
  380. filename, mode='r', shape=shape, offset=offset_dt0, dtype=dt1)
  381. return nsx_basic_header, nsx_ext_header
  382. def __read_nsx_header_variant_b(self, nsx_nb):
  383. """
  384. Extract nsx header information from a 2.2 or 2.3 .nsx file
  385. """
  386. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  387. # basic header (file_id: NEURALCD)
  388. dt0 = [
  389. ('file_id', 'S8'),
  390. # file specification split into major and minor version number
  391. ('ver_major', 'uint8'),
  392. ('ver_minor', 'uint8'),
  393. # bytes of basic & extended header
  394. ('bytes_in_headers', 'uint32'),
  395. # label of the sampling group (e.g., "1 kS/s" or "LFP low")
  396. ('label', 'S16'),
  397. ('comment', 'S256'),
  398. ('period', 'uint32'),
  399. ('timestamp_resolution', 'uint32'),
  400. # time origin: 2byte uint16 values for ...
  401. ('year', 'uint16'),
  402. ('month', 'uint16'),
  403. ('weekday', 'uint16'),
  404. ('day', 'uint16'),
  405. ('hour', 'uint16'),
  406. ('minute', 'uint16'),
  407. ('second', 'uint16'),
  408. ('millisecond', 'uint16'),
  409. # number of channel_count match number of extended headers
  410. ('channel_count', 'uint32')]
  411. nsx_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  412. # extended header (type: CC)
  413. offset_dt0 = np.dtype(dt0).itemsize
  414. shape = nsx_basic_header['channel_count']
  415. dt1 = [
  416. ('type', 'S2'),
  417. ('electrode_id', 'uint16'),
  418. ('electrode_label', 'S16'),
  419. # used front-end amplifier bank (e.g., A, B, C, D)
  420. ('physical_connector', 'uint8'),
  421. # used connector pin (e.g., 1-37 on bank A, B, C or D)
  422. ('connector_pin', 'uint8'),
  423. # digital and analog value ranges of the signal
  424. ('min_digital_val', 'int16'),
  425. ('max_digital_val', 'int16'),
  426. ('min_analog_val', 'int16'),
  427. ('max_analog_val', 'int16'),
  428. # units of the analog range values ("mV" or "uV")
  429. ('units', 'S16'),
  430. # filter settings used to create nsx from source signal
  431. ('hi_freq_corner', 'uint32'),
  432. ('hi_freq_order', 'uint32'),
  433. ('hi_freq_type', 'uint16'), # 0=None, 1=Butterworth
  434. ('lo_freq_corner', 'uint32'),
  435. ('lo_freq_order', 'uint32'),
  436. ('lo_freq_type', 'uint16')] # 0=None, 1=Butterworth
  437. nsx_ext_header = np.memmap(
  438. filename, mode='r', shape=shape, offset=offset_dt0, dtype=dt1)
  439. return nsx_basic_header, nsx_ext_header
  440. def __read_nsx_dataheader(self, nsx_nb, offset):
  441. """
  442. Reads data header following the given offset of an nsx file.
  443. """
  444. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  445. # dtypes data header
  446. dt2 = [
  447. ('header', 'uint8'),
  448. ('timestamp', 'uint32'),
  449. ('nb_data_points', 'uint32')]
  450. return np.memmap(
  451. filename, mode='r', dtype=dt2, shape=1, offset=offset)[0]
  452. def __read_nsx_dataheader_variant_a(
  453. self, nsx_nb, filesize=None, offset=None):
  454. """
  455. Reads None for the nsx data header of file spec 2.1. Introduced to
  456. facilitate compatibility with higher file spec.
  457. """
  458. return None
  459. def __read_nsx_dataheader_variant_b(
  460. self, nsx_nb, filesize=None, offset=None, ):
  461. """
  462. Reads the nsx data header for each data block following the offset of
  463. file spec 2.2 and 2.3.
  464. """
  465. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  466. filesize = self.__get_file_size(filename)
  467. data_header = {}
  468. index = 0
  469. if offset is None:
  470. offset = self.__nsx_basic_header[nsx_nb]['bytes_in_headers']
  471. while offset < filesize:
  472. index += 1
  473. dh = self.__read_nsx_dataheader(nsx_nb, offset)
  474. data_header[index] = {
  475. 'header': dh['header'],
  476. 'timestamp': dh['timestamp'],
  477. 'nb_data_points': dh['nb_data_points'],
  478. 'offset_to_data_block': offset + dh.dtype.itemsize}
  479. # data size = number of data points * (2bytes * number of channels)
  480. # use of `int` avoids overflow problem
  481. data_size = int(dh['nb_data_points']) * \
  482. int(self.__nsx_basic_header[nsx_nb]['channel_count']) * 2
  483. # define new offset (to possible next data block)
  484. offset = data_header[index]['offset_to_data_block'] + data_size
  485. return data_header
  486. def __read_nsx_data_variant_a(self, nsx_nb):
  487. """
  488. Extract nsx data from a 2.1 .nsx file
  489. """
  490. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  491. # get shape of data
  492. shape = (
  493. self.__nsx_databl_param['2.1']('nb_data_points', nsx_nb),
  494. self.__nsx_basic_header[nsx_nb]['channel_count'])
  495. offset = self.__nsx_params['2.1']('bytes_in_headers', nsx_nb)
  496. # read nsx data
  497. # store as dict for compatibility with higher file specs
  498. data = {1: np.memmap(
  499. filename, mode='r', dtype='int16', shape=shape, offset=offset)}
  500. return data
  501. def __read_nsx_data_variant_b(self, nsx_nb):
  502. """
  503. Extract nsx data (blocks) from a 2.2 or 2.3 .nsx file. Blocks can arise
  504. if the recording was paused by the user.
  505. """
  506. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  507. data = {}
  508. for data_bl in self.__nsx_data_header[nsx_nb].keys():
  509. # get shape and offset of data
  510. shape = (
  511. self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points'],
  512. self.__nsx_basic_header[nsx_nb]['channel_count'])
  513. offset = \
  514. self.__nsx_data_header[nsx_nb][data_bl]['offset_to_data_block']
  515. # read data
  516. data[data_bl] = np.memmap(
  517. filename, mode='r', dtype='int16', shape=shape, offset=offset)
  518. return data
  519. def __read_nev_header(self, ext_header_variants):
  520. """
  521. Extract nev header information from a 2.1 .nsx file
  522. """
  523. filename = '.'.join([self._filenames['nev'], 'nev'])
  524. # basic header
  525. dt0 = [
  526. # Set to "NEURALEV"
  527. ('file_type_id', 'S8'),
  528. ('ver_major', 'uint8'),
  529. ('ver_minor', 'uint8'),
  530. # Flags
  531. ('additionnal_flags', 'uint16'),
  532. # File index of first data sample
  533. ('bytes_in_headers', 'uint32'),
  534. # Number of bytes per data packet (sample)
  535. ('bytes_in_data_packets', 'uint32'),
  536. # Time resolution of time stamps in Hz
  537. ('timestamp_resolution', 'uint32'),
  538. # Sampling frequency of waveforms in Hz
  539. ('sample_resolution', 'uint32'),
  540. ('year', 'uint16'),
  541. ('month', 'uint16'),
  542. ('weekday', 'uint16'),
  543. ('day', 'uint16'),
  544. ('hour', 'uint16'),
  545. ('minute', 'uint16'),
  546. ('second', 'uint16'),
  547. ('millisecond', 'uint16'),
  548. ('application_to_create_file', 'S32'),
  549. ('comment_field', 'S256'),
  550. # Number of extended headers
  551. ('nb_ext_headers', 'uint32')]
  552. nev_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  553. # extended header
  554. # this consist in N block with code 8bytes + 24 data bytes
  555. # the data bytes depend on the code and need to be converted
  556. # cafilename_nsx, segse by case
  557. shape = nev_basic_header['nb_ext_headers']
  558. offset_dt0 = np.dtype(dt0).itemsize
  559. # This is the common structure of the beginning of extended headers
  560. dt1 = [
  561. ('packet_id', 'S8'),
  562. ('info_field', 'S24')]
  563. raw_ext_header = np.memmap(
  564. filename, mode='r', offset=offset_dt0, dtype=dt1, shape=shape)
  565. nev_ext_header = {}
  566. for packet_id in ext_header_variants.keys():
  567. mask = (raw_ext_header['packet_id'] == packet_id)
  568. dt2 = self.__nev_ext_header_types()[packet_id][
  569. ext_header_variants[packet_id]]
  570. nev_ext_header[packet_id] = raw_ext_header.view(dt2)[mask]
  571. return nev_basic_header, nev_ext_header
  572. def __read_nev_header_variant_a(self):
  573. """
  574. Extract nev header information from a 2.1 .nev file
  575. """
  576. ext_header_variants = {
  577. b'NEUEVWAV': 'a',
  578. b'ARRAYNME': 'a',
  579. b'ECOMMENT': 'a',
  580. b'CCOMMENT': 'a',
  581. b'MAPFILE': 'a',
  582. b'NSASEXEV': 'a'}
  583. return self.__read_nev_header(ext_header_variants)
  584. def __read_nev_header_variant_b(self):
  585. """
  586. Extract nev header information from a 2.2 .nev file
  587. """
  588. ext_header_variants = {
  589. b'NEUEVWAV': 'b',
  590. b'ARRAYNME': 'a',
  591. b'ECOMMENT': 'a',
  592. b'CCOMMENT': 'a',
  593. b'MAPFILE': 'a',
  594. b'NEUEVLBL': 'a',
  595. b'NEUEVFLT': 'a',
  596. b'DIGLABEL': 'a',
  597. b'NSASEXEV': 'a'}
  598. return self.__read_nev_header(ext_header_variants)
  599. def __read_nev_header_variant_c(self):
  600. """
  601. Extract nev header information from a 2.3 .nev file
  602. """
  603. ext_header_variants = {
  604. b'NEUEVWAV': 'b',
  605. b'ARRAYNME': 'a',
  606. b'ECOMMENT': 'a',
  607. b'CCOMMENT': 'a',
  608. b'MAPFILE': 'a',
  609. b'NEUEVLBL': 'a',
  610. b'NEUEVFLT': 'a',
  611. b'DIGLABEL': 'a',
  612. b'VIDEOSYN': 'a',
  613. b'TRACKOBJ': 'a'}
  614. return self.__read_nev_header(ext_header_variants)
  615. def __read_nev_data(self, nev_data_masks, nev_data_types):
  616. """
  617. Extract nev data from a 2.1 or 2.2 .nev file
  618. """
  619. filename = '.'.join([self._filenames['nev'], 'nev'])
  620. data_size = self.__nev_basic_header['bytes_in_data_packets']
  621. header_size = self.__nev_basic_header['bytes_in_headers']
  622. # read all raw data packets and markers
  623. dt0 = [
  624. ('timestamp', 'uint32'),
  625. ('packet_id', 'uint16'),
  626. ('value', 'S{0}'.format(data_size - 6))]
  627. raw_data = np.memmap(filename, mode='r', offset=header_size, dtype=dt0)
  628. masks = self.__nev_data_masks(raw_data['packet_id'])
  629. types = self.__nev_data_types(data_size)
  630. data = {}
  631. for k, v in nev_data_masks.items():
  632. data[k] = raw_data.view(types[k][nev_data_types[k]])[masks[k][v]]
  633. return data
  634. def __read_nev_data_variant_a(self):
  635. """
  636. Extract nev data from a 2.1 & 2.2 .nev file
  637. """
  638. nev_data_masks = {
  639. 'NonNeural': 'a',
  640. 'Spikes': 'a'}
  641. nev_data_types = {
  642. 'NonNeural': 'a',
  643. 'Spikes': 'a'}
  644. return self.__read_nev_data(nev_data_masks, nev_data_types)
  645. def __read_nev_data_variant_b(self):
  646. """
  647. Extract nev data from a 2.3 .nev file
  648. """
  649. nev_data_masks = {
  650. 'NonNeural': 'a',
  651. 'Spikes': 'b',
  652. 'Comments': 'a',
  653. 'VideoSync': 'a',
  654. 'TrackingEvents': 'a',
  655. 'ButtonTrigger': 'a',
  656. 'ConfigEvent': 'a'}
  657. nev_data_types = {
  658. 'NonNeural': 'b',
  659. 'Spikes': 'a',
  660. 'Comments': 'a',
  661. 'VideoSync': 'a',
  662. 'TrackingEvents': 'a',
  663. 'ButtonTrigger': 'a',
  664. 'ConfigEvent': 'a'}
  665. return self.__read_nev_data(nev_data_masks, nev_data_types)
  666. def __nev_ext_header_types(self):
  667. """
  668. Defines extended header types for different .nev file specifications.
  669. """
  670. nev_ext_header_types = {
  671. b'NEUEVWAV': {
  672. # Version>=2.1
  673. 'a': [
  674. ('packet_id', 'S8'),
  675. ('electrode_id', 'uint16'),
  676. ('physical_connector', 'uint8'),
  677. ('connector_pin', 'uint8'),
  678. ('digitization_factor', 'uint16'),
  679. ('energy_threshold', 'uint16'),
  680. ('hi_threshold', 'int16'),
  681. ('lo_threshold', 'int16'),
  682. ('nb_sorted_units', 'uint8'),
  683. # number of bytes per waveform sample
  684. ('bytes_per_waveform', 'uint8'),
  685. ('unused', 'S10')],
  686. # Version>=2.3
  687. 'b': [
  688. ('packet_id', 'S8'),
  689. ('electrode_id', 'uint16'),
  690. ('physical_connector', 'uint8'),
  691. ('connector_pin', 'uint8'),
  692. ('digitization_factor', 'uint16'),
  693. ('energy_threshold', 'uint16'),
  694. ('hi_threshold', 'int16'),
  695. ('lo_threshold', 'int16'),
  696. ('nb_sorted_units', 'uint8'),
  697. # number of bytes per waveform sample
  698. ('bytes_per_waveform', 'uint8'),
  699. # number of samples for each waveform
  700. ('spike_width', 'uint16'),
  701. ('unused', 'S8')]},
  702. b'ARRAYNME': {
  703. 'a': [
  704. ('packet_id', 'S8'),
  705. ('electrode_array_name', 'S24')]},
  706. b'ECOMMENT': {
  707. 'a': [
  708. ('packet_id', 'S8'),
  709. ('extra_comment', 'S24')]},
  710. b'CCOMMENT': {
  711. 'a': [
  712. ('packet_id', 'S8'),
  713. ('continued_comment', 'S24')]},
  714. b'MAPFILE': {
  715. 'a': [
  716. ('packet_id', 'S8'),
  717. ('mapFile', 'S24')]},
  718. b'NEUEVLBL': {
  719. 'a': [
  720. ('packet_id', 'S8'),
  721. ('electrode_id', 'uint16'),
  722. # label of this electrode
  723. ('label', 'S16'),
  724. ('unused', 'S6')]},
  725. b'NEUEVFLT': {
  726. 'a': [
  727. ('packet_id', 'S8'),
  728. ('electrode_id', 'uint16'),
  729. ('hi_freq_corner', 'uint32'),
  730. ('hi_freq_order', 'uint32'),
  731. # 0=None 1=Butterworth
  732. ('hi_freq_type', 'uint16'),
  733. ('lo_freq_corner', 'uint32'),
  734. ('lo_freq_order', 'uint32'),
  735. # 0=None 1=Butterworth
  736. ('lo_freq_type', 'uint16'),
  737. ('unused', 'S2')]},
  738. b'DIGLABEL': {
  739. 'a': [
  740. ('packet_id', 'S8'),
  741. # Read name of digital
  742. ('label', 'S16'),
  743. # 0=serial, 1=parallel
  744. ('mode', 'uint8'),
  745. ('unused', 'S7')]},
  746. b'NSASEXEV': {
  747. 'a': [
  748. ('packet_id', 'S8'),
  749. # Read frequency of periodic packet generation
  750. ('frequency', 'uint16'),
  751. # Read if digital input triggers events
  752. ('digital_input_config', 'uint8'),
  753. # Read if analog input triggers events
  754. ('analog_channel_1_config', 'uint8'),
  755. ('analog_channel_1_edge_detec_val', 'uint16'),
  756. ('analog_channel_2_config', 'uint8'),
  757. ('analog_channel_2_edge_detec_val', 'uint16'),
  758. ('analog_channel_3_config', 'uint8'),
  759. ('analog_channel_3_edge_detec_val', 'uint16'),
  760. ('analog_channel_4_config', 'uint8'),
  761. ('analog_channel_4_edge_detec_val', 'uint16'),
  762. ('analog_channel_5_config', 'uint8'),
  763. ('analog_channel_5_edge_detec_val', 'uint16'),
  764. ('unused', 'S6')]},
  765. b'VIDEOSYN': {
  766. 'a': [
  767. ('packet_id', 'S8'),
  768. ('video_source_id', 'uint16'),
  769. ('video_source', 'S16'),
  770. ('frame_rate', 'float32'),
  771. ('unused', 'S2')]},
  772. b'TRACKOBJ': {
  773. 'a': [
  774. ('packet_id', 'S8'),
  775. ('trackable_type', 'uint16'),
  776. ('trackable_id', 'uint16'),
  777. ('point_count', 'uint16'),
  778. ('video_source', 'S16'),
  779. ('unused', 'S2')]}}
  780. return nev_ext_header_types
  781. def __nev_data_masks(self, packet_ids):
  782. """
  783. Defines data masks for different .nev file specifications depending on
  784. the given packet identifiers.
  785. """
  786. __nev_data_masks = {
  787. 'NonNeural': {
  788. 'a': (packet_ids == 0)},
  789. 'Spikes': {
  790. # Version 2.1 & 2.2
  791. 'a': (0 < packet_ids) & (packet_ids <= 255),
  792. # Version>=2.3
  793. 'b': (0 < packet_ids) & (packet_ids <= 2048)},
  794. 'Comments': {
  795. 'a': (packet_ids == 0xFFFF)},
  796. 'VideoSync': {
  797. 'a': (packet_ids == 0xFFFE)},
  798. 'TrackingEvents': {
  799. 'a': (packet_ids == 0xFFFD)},
  800. 'ButtonTrigger': {
  801. 'a': (packet_ids == 0xFFFC)},
  802. 'ConfigEvent': {
  803. 'a': (packet_ids == 0xFFFB)}}
  804. return __nev_data_masks
  805. def __nev_data_types(self, data_size):
  806. """
  807. Defines data types for different .nev file specifications depending on
  808. the given packet identifiers.
  809. """
  810. __nev_data_types = {
  811. 'NonNeural': {
  812. # Version 2.1 & 2.2
  813. 'a': [
  814. ('timestamp', 'uint32'),
  815. ('packet_id', 'uint16'),
  816. ('packet_insertion_reason', 'uint8'),
  817. ('reserved', 'uint8'),
  818. ('digital_input', 'uint16'),
  819. ('analog_input_channel_1', 'int16'),
  820. ('analog_input_channel_2', 'int16'),
  821. ('analog_input_channel_3', 'int16'),
  822. ('analog_input_channel_4', 'int16'),
  823. ('analog_input_channel_5', 'int16'),
  824. ('unused', 'S{0}'.format(data_size - 20))],
  825. # Version>=2.3
  826. 'b': [
  827. ('timestamp', 'uint32'),
  828. ('packet_id', 'uint16'),
  829. ('packet_insertion_reason', 'uint8'),
  830. ('reserved', 'uint8'),
  831. ('digital_input', 'uint16'),
  832. ('unused', 'S{0}'.format(data_size - 10))]},
  833. 'Spikes': {
  834. 'a': [
  835. ('timestamp', 'uint32'),
  836. ('packet_id', 'uint16'),
  837. ('unit_class_nb', 'uint8'),
  838. ('reserved', 'uint8'),
  839. ('waveform', 'S{0}'.format(data_size - 8))]},
  840. 'Comments': {
  841. 'a': [
  842. ('timestamp', 'uint32'),
  843. ('packet_id', 'uint16'),
  844. ('char_set', 'uint8'),
  845. ('flag', 'uint8'),
  846. ('data', 'uint32'),
  847. ('comment', 'S{0}'.format(data_size - 12))]},
  848. 'VideoSync': {
  849. 'a': [
  850. ('timestamp', 'uint32'),
  851. ('packet_id', 'uint16'),
  852. ('video_file_nb', 'uint16'),
  853. ('video_frame_nb', 'uint32'),
  854. ('video_elapsed_time', 'uint32'),
  855. ('video_source_id', 'uint32'),
  856. ('unused', 'int8', (data_size - 20, ))]},
  857. 'TrackingEvents': {
  858. 'a': [
  859. ('timestamp', 'uint32'),
  860. ('packet_id', 'uint16'),
  861. ('parent_id', 'uint16'),
  862. ('node_id', 'uint16'),
  863. ('node_count', 'uint16'),
  864. ('point_count', 'uint16'),
  865. ('tracking_points', 'uint16', ((data_size - 14) // 2, ))]},
  866. 'ButtonTrigger': {
  867. 'a': [
  868. ('timestamp', 'uint32'),
  869. ('packet_id', 'uint16'),
  870. ('trigger_type', 'uint16'),
  871. ('unused', 'int8', (data_size - 8, ))]},
  872. 'ConfigEvent': {
  873. 'a': [
  874. ('timestamp', 'uint32'),
  875. ('packet_id', 'uint16'),
  876. ('config_change_type', 'uint16'),
  877. ('config_changed', 'S{0}'.format(data_size - 8))]}}
  878. return __nev_data_types
  879. def __nev_params(self, param_name):
  880. """
  881. Returns wanted nev parameter.
  882. """
  883. nev_parameters = {
  884. 'bytes_in_data_packets':
  885. self.__nev_basic_header['bytes_in_data_packets'],
  886. 'rec_datetime': datetime.datetime(
  887. year=self.__nev_basic_header['year'],
  888. month=self.__nev_basic_header['month'],
  889. day=self.__nev_basic_header['day'],
  890. hour=self.__nev_basic_header['hour'],
  891. minute=self.__nev_basic_header['minute'],
  892. second=self.__nev_basic_header['second'],
  893. microsecond=self.__nev_basic_header['millisecond']),
  894. 'max_res': self.__nev_basic_header['timestamp_resolution'],
  895. 'channel_ids': self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  896. 'channel_labels': self.__channel_labels[self.__nev_spec](),
  897. 'event_unit': pq.CompoundUnit("1.0/{0} * s".format(
  898. self.__nev_basic_header['timestamp_resolution'])),
  899. 'nb_units': dict(zip(
  900. self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  901. self.__nev_ext_header[b'NEUEVWAV']['nb_sorted_units'])),
  902. 'digitization_factor': dict(zip(
  903. self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  904. self.__nev_ext_header[b'NEUEVWAV']['digitization_factor'])),
  905. 'data_size': self.__nev_basic_header['bytes_in_data_packets'],
  906. 'waveform_size': self.__waveform_size[self.__nev_spec](),
  907. 'waveform_dtypes': self.__get_waveforms_dtype(),
  908. 'waveform_sampling_rate':
  909. self.__nev_basic_header['sample_resolution'] * pq.Hz,
  910. 'waveform_time_unit': pq.CompoundUnit("1.0/{0} * s".format(
  911. self.__nev_basic_header['sample_resolution'])),
  912. 'waveform_unit': pq.uV}
  913. return nev_parameters[param_name]
  914. def __get_file_size(self, filename):
  915. """
  916. Returns the file size in bytes for the given file.
  917. """
  918. filebuf = open(filename, 'rb')
  919. filebuf.seek(0, os.SEEK_END)
  920. file_size = filebuf.tell()
  921. filebuf.close()
  922. return file_size
  923. def __get_min_time(self):
  924. """
  925. Returns the smallest time that can be determined from the recording for
  926. use as the lower bound n in an interval [n,m).
  927. """
  928. tp = []
  929. if self._avail_files['nev']:
  930. tp.extend(self.__get_nev_rec_times()[0])
  931. for nsx_i in self._avail_nsx:
  932. tp.extend(self.__nsx_rec_times[self.__nsx_spec[nsx_i]](nsx_i)[0])
  933. return min(tp)
  934. def __get_max_time(self):
  935. """
  936. Returns the largest time that can be determined from the recording for
  937. use as the upper bound m in an interval [n,m).
  938. """
  939. tp = []
  940. if self._avail_files['nev']:
  941. tp.extend(self.__get_nev_rec_times()[1])
  942. for nsx_i in self._avail_nsx:
  943. tp.extend(self.__nsx_rec_times[self.__nsx_spec[nsx_i]](nsx_i)[1])
  944. return max(tp)
  945. def __get_nev_rec_times(self):
  946. """
  947. Extracts minimum and maximum time points from a nev file.
  948. """
  949. filename = '.'.join([self._filenames['nev'], 'nev'])
  950. dt = [('timestamp', 'uint32')]
  951. offset = \
  952. self.__get_file_size(filename) - \
  953. self.__nev_params('bytes_in_data_packets')
  954. last_data_packet = np.memmap(
  955. filename, mode='r', offset=offset, dtype=dt)[0]
  956. n_starts = [0 * self.__nev_params('event_unit')]
  957. n_stops = [
  958. last_data_packet['timestamp'] * self.__nev_params('event_unit')]
  959. return n_starts, n_stops
  960. def __get_nsx_rec_times_variant_a(self, nsx_nb):
  961. """
  962. Extracts minimum and maximum time points from a 2.1 nsx file.
  963. """
  964. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  965. t_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  966. 'time_unit', nsx_nb)
  967. highest_res = self.__nev_params('event_unit')
  968. bytes_in_headers = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  969. 'bytes_in_headers', nsx_nb)
  970. nb_data_points = int(
  971. (self.__get_file_size(filename) - bytes_in_headers) /
  972. (2 * self.__nsx_basic_header[nsx_nb]['channel_count']) - 1)
  973. # add n_start
  974. n_starts = [(0 * t_unit).rescale(highest_res)]
  975. # add n_stop
  976. n_stops = [(nb_data_points * t_unit).rescale(highest_res)]
  977. return n_starts, n_stops
  978. def __get_nsx_rec_times_variant_b(self, nsx_nb):
  979. """
  980. Extracts minimum and maximum time points from a 2.2 or 2.3 nsx file.
  981. """
  982. t_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  983. 'time_unit', nsx_nb)
  984. highest_res = self.__nev_params('event_unit')
  985. n_starts = []
  986. n_stops = []
  987. # add n-start and n_stop for all data blocks
  988. for data_bl in self.__nsx_data_header[nsx_nb].keys():
  989. ts0 = self.__nsx_data_header[nsx_nb][data_bl]['timestamp']
  990. nbdp = self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points']
  991. # add n_start
  992. start = ts0 * t_unit
  993. n_starts.append(start.rescale(highest_res))
  994. # add n_stop
  995. stop = start + nbdp * t_unit
  996. n_stops.append(stop.rescale(highest_res))
  997. return sorted(n_starts), sorted(n_stops)
  998. def __get_waveforms_dtype(self):
  999. """
  1000. Extracts the actual waveform dtype set for each channel.
  1001. """
  1002. # Blackrock code giving the approiate dtype
  1003. conv = {0: 'int8', 1: 'int8', 2: 'int16', 4: 'int32'}
  1004. # get all electrode ids from nev ext header
  1005. all_el_ids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1006. # get the dtype of waveform (this is stupidly complicated)
  1007. if self.__is_set(
  1008. np.array(self.__nev_basic_header['additionnal_flags']), 0):
  1009. dtype_waveforms = dict((k, 'int16') for k in all_el_ids)
  1010. else:
  1011. # extract bytes per waveform
  1012. waveform_bytes = \
  1013. self.__nev_ext_header[b'NEUEVWAV']['bytes_per_waveform']
  1014. # extract dtype for waveforms fro each electrode
  1015. dtype_waveforms = dict(zip(all_el_ids, conv[waveform_bytes]))
  1016. return dtype_waveforms
  1017. def __get_channel_labels_variant_a(self):
  1018. """
  1019. Returns labels for all channels for file spec 2.1
  1020. """
  1021. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1022. labels = []
  1023. for elid in elids:
  1024. if elid < 129:
  1025. labels.append('chan%i' % elid)
  1026. else:
  1027. labels.append('ainp%i' % (elid - 129 + 1))
  1028. return dict(zip(elids, labels))
  1029. def __get_channel_labels_variant_b(self):
  1030. """
  1031. Returns labels for all channels for file spec 2.2 and 2.3
  1032. """
  1033. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1034. labels = self.__nev_ext_header[b'NEUEVLBL']['label']
  1035. return dict(zip(elids, labels)) if len(labels) > 0 else None
  1036. def __get_waveform_size_variant_a(self):
  1037. """
  1038. Returns wavform sizes for all channels for file spec 2.1 and 2.2
  1039. """
  1040. wf_dtypes = self.__get_waveforms_dtype()
  1041. nb_bytes_wf = self.__nev_basic_header['bytes_in_data_packets'] - 8
  1042. wf_sizes = dict([
  1043. (ch, int(nb_bytes_wf / np.dtype(dt).itemsize)) for ch, dt in
  1044. wf_dtypes.items()])
  1045. return wf_sizes
  1046. def __get_waveform_size_variant_b(self):
  1047. """
  1048. Returns wavform sizes for all channels for file spec 2.3
  1049. """
  1050. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1051. spike_widths = self.__nev_ext_header[b'NEUEVWAV']['spike_width']
  1052. return dict(zip(elids, spike_widths))
  1053. def __get_left_sweep_waveforms(self):
  1054. """
  1055. Returns left sweep of waveforms for each channel. Left sweep is defined
  1056. as the time from the beginning of the waveform to the trigger time of
  1057. the corresponding spike.
  1058. """
  1059. # TODO: Double check if this is the actual setting for Blackrock
  1060. wf_t_unit = self.__nev_params('waveform_time_unit')
  1061. all_ch = self.__nev_params('channel_ids')
  1062. # TODO: Double check if this is the correct assumption (10 samples)
  1063. # default value: threshold crossing after 10 samples of waveform
  1064. wf_left_sweep = dict([(ch, 10 * wf_t_unit) for ch in all_ch])
  1065. # non-default: threshold crossing at center of waveform
  1066. # wf_size = self.__nev_params('waveform_size')
  1067. # wf_left_sweep = dict(
  1068. # [(ch, (wf_size[ch] / 2) * wf_t_unit) for ch in all_ch])
  1069. return wf_left_sweep
  1070. def __get_nsx_param_variant_a(self, param_name, nsx_nb):
  1071. """
  1072. Returns parameter (param_name) for a given nsx (nsx_nb) for file spec
  1073. 2.1.
  1074. """
  1075. # Here, min/max_analog_val and min/max_digital_val are not available in
  1076. # the nsx, so that we must estimate these parameters from the
  1077. # digitization factor of the nev (information by Kian Torab, Blackrock
  1078. # Microsystems). Here dig_factor=max_analog_val/max_digital_val. We set
  1079. # max_digital_val to 1000, and max_analog_val=dig_factor. dig_factor is
  1080. # given in nV by definition, so the units turn out to be uV.
  1081. labels = []
  1082. dig_factor = []
  1083. for elid in self.__nsx_ext_header[nsx_nb]['electrode_id']:
  1084. if self._avail_files['nev']:
  1085. # This is a workaround for the DigitalFactor overflow in NEV
  1086. # files recorded with buggy Cerebus system.
  1087. # Fix taken from: NMPK toolbox by Blackrock,
  1088. # file openNEV, line 464,
  1089. # git rev. d0a25eac902704a3a29fa5dfd3aed0744f4733ed
  1090. df = self.__nev_params('digitization_factor')[elid]
  1091. if df == 21516:
  1092. df = 152592.547
  1093. dig_factor.append(df)
  1094. else:
  1095. dig_factor.append(None)
  1096. if elid < 129:
  1097. labels.append('chan%i' % elid)
  1098. else:
  1099. labels.append('ainp%i' % (elid - 129 + 1))
  1100. nsx_parameters = {
  1101. 'labels': labels,
  1102. 'units': np.array(
  1103. ['uV'] *
  1104. self.__nsx_basic_header[nsx_nb]['channel_count']),
  1105. 'min_analog_val': -1 * np.array(dig_factor),
  1106. 'max_analog_val': np.array(dig_factor),
  1107. 'min_digital_val': np.array(
  1108. [-1000] * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1109. 'max_digital_val': np.array(
  1110. [1000] * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1111. 'timestamp_resolution': 30000,
  1112. 'bytes_in_headers':
  1113. self.__nsx_basic_header[nsx_nb].dtype.itemsize +
  1114. self.__nsx_ext_header[nsx_nb].dtype.itemsize *
  1115. self.__nsx_basic_header[nsx_nb]['channel_count'],
  1116. 'sampling_rate':
  1117. 30000 / self.__nsx_basic_header[nsx_nb]['period'] * pq.Hz,
  1118. 'time_unit': pq.CompoundUnit("1.0/{0}*s".format(
  1119. 30000 / self.__nsx_basic_header[nsx_nb]['period']))}
  1120. return nsx_parameters[param_name]
  1121. def __get_nsx_param_variant_b(self, param_name, nsx_nb):
  1122. """
  1123. Returns parameter (param_name) for a given nsx (nsx_nb) for file spec
  1124. 2.2 and 2.3.
  1125. """
  1126. nsx_parameters = {
  1127. 'labels':
  1128. self.__nsx_ext_header[nsx_nb]['electrode_label'],
  1129. 'units':
  1130. self.__nsx_ext_header[nsx_nb]['units'],
  1131. 'min_analog_val':
  1132. self.__nsx_ext_header[nsx_nb]['min_analog_val'],
  1133. 'max_analog_val':
  1134. self.__nsx_ext_header[nsx_nb]['max_analog_val'],
  1135. 'min_digital_val':
  1136. self.__nsx_ext_header[nsx_nb]['min_digital_val'],
  1137. 'max_digital_val':
  1138. self.__nsx_ext_header[nsx_nb]['max_digital_val'],
  1139. 'timestamp_resolution':
  1140. self.__nsx_basic_header[nsx_nb]['timestamp_resolution'],
  1141. 'bytes_in_headers':
  1142. self.__nsx_basic_header[nsx_nb]['bytes_in_headers'],
  1143. 'sampling_rate':
  1144. self.__nsx_basic_header[nsx_nb]['timestamp_resolution'] /
  1145. self.__nsx_basic_header[nsx_nb]['period'] * pq.Hz,
  1146. 'time_unit': pq.CompoundUnit("1.0/{0}*s".format(
  1147. self.__nsx_basic_header[nsx_nb]['timestamp_resolution'] /
  1148. self.__nsx_basic_header[nsx_nb]['period']))}
  1149. return nsx_parameters[param_name]
  1150. def __get_nsx_databl_param_variant_a(
  1151. self, param_name, nsx_nb, n_start=None, n_stop=None):
  1152. """
  1153. Returns data block parameter (param_name) for a given nsx (nsx_nb) for
  1154. file spec 2.1. Arg 'n_start' should not be specified! It is only set
  1155. for compatibility reasons with higher file spec.
  1156. """
  1157. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  1158. t_starts, t_stops = \
  1159. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1160. bytes_in_headers = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1161. 'bytes_in_headers', nsx_nb)
  1162. # extract parameters from nsx basic extended and data header
  1163. data_parameters = {
  1164. 'nb_data_points': int(
  1165. (self.__get_file_size(filename) - bytes_in_headers) /
  1166. (2 * self.__nsx_basic_header[nsx_nb]['channel_count']) - 1),
  1167. 'databl_idx': 1,
  1168. 'databl_t_start': t_starts[0],
  1169. 'databl_t_stop': t_stops[0]}
  1170. return data_parameters[param_name]
  1171. def __get_nsx_databl_param_variant_b(
  1172. self, param_name, nsx_nb, n_start, n_stop):
  1173. """
  1174. Returns data block parameter (param_name) for a given nsx (nsx_nb) with
  1175. a wanted n_start for file spec 2.2 and 2.3.
  1176. """
  1177. t_starts, t_stops = \
  1178. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1179. # data header
  1180. for d_bl in self.__nsx_data_header[nsx_nb].keys():
  1181. # from "data header" with corresponding t_start and t_stop
  1182. data_parameters = {
  1183. 'nb_data_points':
  1184. self.__nsx_data_header[nsx_nb][d_bl]['nb_data_points'],
  1185. 'databl_idx': d_bl,
  1186. 'databl_t_start': t_starts[d_bl - 1],
  1187. 'databl_t_stop': t_stops[d_bl - 1]}
  1188. if t_starts[d_bl - 1] <= n_start < n_stop <= t_stops[d_bl - 1]:
  1189. return data_parameters[param_name]
  1190. elif n_start < t_starts[d_bl - 1] < n_stop <= t_stops[d_bl - 1]:
  1191. self._print_verbose(
  1192. "User n_start ({0}) is smaller than the corresponding "
  1193. "t_start of the available ns{1} datablock "
  1194. "({2}).".format(n_start, nsx_nb, t_starts[d_bl - 1]))
  1195. return data_parameters[param_name]
  1196. elif t_starts[d_bl - 1] <= n_start < t_stops[d_bl - 1] < n_stop:
  1197. self._print_verbose(
  1198. "User n_stop ({0}) is larger than the corresponding "
  1199. "t_stop of the available ns{1} datablock "
  1200. "({2}).".format(n_stop, nsx_nb, t_stops[d_bl - 1]))
  1201. return data_parameters[param_name]
  1202. elif n_start < t_starts[d_bl - 1] < t_stops[d_bl - 1] < n_stop:
  1203. self._print_verbose(
  1204. "User n_start ({0}) is smaller than the corresponding "
  1205. "t_start and user n_stop ({1}) is larger than the "
  1206. "corresponding t_stop of the available ns{2} datablock "
  1207. "({3}).".format(
  1208. n_start, n_stop, nsx_nb,
  1209. (t_starts[d_bl - 1], t_stops[d_bl - 1])))
  1210. return data_parameters[param_name]
  1211. else:
  1212. continue
  1213. raise ValueError(
  1214. "User n_start and n_stop are all smaller or larger than the "
  1215. "t_start and t_stops of all available ns%i datablocks" % nsx_nb)
  1216. def __get_nonneural_evtypes_variant_a(self, data):
  1217. """
  1218. Defines event types and the necessary parameters to extract them from
  1219. a 2.1 and 2.2 nev file.
  1220. """
  1221. # TODO: add annotations of nev ext header (NSASEXEX) to event types
  1222. # digital events
  1223. event_types = {
  1224. 'digital_input_port': {
  1225. 'name': 'digital_input_port',
  1226. 'field': 'digital_input',
  1227. 'mask': self.__is_set(data['packet_insertion_reason'], 0),
  1228. 'desc': "Events of the digital input port"},
  1229. 'serial_input_port': {
  1230. 'name': 'serial_input_port',
  1231. 'field': 'digital_input',
  1232. 'mask':
  1233. self.__is_set(data['packet_insertion_reason'], 0) &
  1234. self.__is_set(data['packet_insertion_reason'], 7),
  1235. 'desc': "Events of the serial input port"}}
  1236. # analog input events via threshold crossings
  1237. for ch in range(5):
  1238. event_types.update({
  1239. 'analog_input_channel_{0}'.format(ch + 1): {
  1240. 'name': 'analog_input_channel_{0}'.format(ch + 1),
  1241. 'field': 'analog_input_channel_{0}'.format(ch + 1),
  1242. 'mask': self.__is_set(
  1243. data['packet_insertion_reason'], ch + 1),
  1244. 'desc': "Values of analog input channel {0} in mV "
  1245. "(+/- 5000)".format(ch + 1)}})
  1246. # TODO: define field and desc
  1247. event_types.update({
  1248. 'periodic_sampling_events': {
  1249. 'name': 'periodic_sampling_events',
  1250. 'field': 'digital_input',
  1251. 'mask': self.__is_set(data['packet_insertion_reason'], 6),
  1252. 'desc': 'Periodic sampling event of a certain frequency'}})
  1253. return event_types
  1254. def __get_nonneural_evtypes_variant_b(self, data):
  1255. """
  1256. Defines event types and the necessary parameters to extract them from
  1257. a 2.3 nev file.
  1258. """
  1259. # digital events
  1260. event_types = {
  1261. 'digital_input_port': {
  1262. 'name': 'digital_input_port',
  1263. 'field': 'digital_input',
  1264. 'mask': self.__is_set(data['packet_insertion_reason'], 0),
  1265. 'desc': "Events of the digital input port"},
  1266. 'serial_input_port': {
  1267. 'name': 'serial_input_port',
  1268. 'field': 'digital_input',
  1269. 'mask':
  1270. self.__is_set(data['packet_insertion_reason'], 0) &
  1271. self.__is_set(data['packet_insertion_reason'], 7),
  1272. 'desc': "Events of the serial input port"}}
  1273. return event_types
  1274. def __get_unit_classification(self, un_id):
  1275. """
  1276. Returns the Blackrock unit classification of an online spike sorting
  1277. for the given unit id (un_id).
  1278. """
  1279. # Blackrock unit classification
  1280. if un_id == 0:
  1281. return 'unclassified'
  1282. elif 1 <= un_id <= 16:
  1283. return '{0}'.format(un_id)
  1284. elif 17 <= un_id <= 244:
  1285. raise ValueError(
  1286. "Unit id {0} is not used by daq system".format(un_id))
  1287. elif un_id == 255:
  1288. return 'noise'
  1289. else:
  1290. raise ValueError("Unit id {0} cannot be classified".format(un_id))
  1291. def __is_set(self, flag, pos):
  1292. """
  1293. Checks if bit is set at the given position for flag. If flag is an
  1294. array, an array will be returned.
  1295. """
  1296. return flag & (1 << pos) > 0
  1297. def __transform_nsx_to_load(self, nsx_to_load):
  1298. """
  1299. Transforms the input argument nsx_to_load to a list of integers.
  1300. """
  1301. if hasattr(nsx_to_load, "__len__") and len(nsx_to_load) == 0:
  1302. nsx_to_load = None
  1303. if isinstance(nsx_to_load, int):
  1304. nsx_to_load = [nsx_to_load]
  1305. if isinstance(nsx_to_load, str):
  1306. if nsx_to_load.lower() == 'none':
  1307. nsx_to_load = None
  1308. elif nsx_to_load.lower() == 'all':
  1309. nsx_to_load = self._avail_nsx
  1310. else:
  1311. raise ValueError("Invalid specification of nsx_to_load.")
  1312. if nsx_to_load:
  1313. for nsx_nb in nsx_to_load:
  1314. if not self._avail_files['ns' + str(nsx_nb)]:
  1315. raise ValueError("ns%i is not available" % nsx_nb)
  1316. return nsx_to_load
  1317. def __transform_channels(self, channels, nsx_to_load):
  1318. """
  1319. Transforms the input argument channels to a list of integers.
  1320. """
  1321. all_channels = []
  1322. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  1323. if nsx_to_load is not None:
  1324. for nsx_nb in nsx_to_load:
  1325. all_channels.extend(
  1326. self.__nsx_ext_header[nsx_nb]['electrode_id'].astype(int))
  1327. elec_id = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1328. all_channels.extend(elec_id.astype(int))
  1329. all_channels = np.unique(all_channels).tolist()
  1330. if hasattr(channels, "__len__") and len(channels) == 0:
  1331. channels = None
  1332. if isinstance(channels, int):
  1333. channels = [channels]
  1334. if isinstance(channels, str):
  1335. if channels.lower() == 'none':
  1336. channels = None
  1337. elif channels.lower() == 'all':
  1338. channels = all_channels
  1339. else:
  1340. raise ValueError("Invalid channel specification.")
  1341. if channels:
  1342. if len(set(all_channels) & set(channels)) < len(channels):
  1343. self._print_verbose(
  1344. "Ignoring unknown channel ID(s) specified in in channels.")
  1345. # Make sure, all channels are valid and contain no duplicates
  1346. channels = list(set(all_channels).intersection(set(channels)))
  1347. else:
  1348. self._print_verbose("No channel is specified, therefore no "
  1349. "time series and unit data is loaded.")
  1350. return channels
  1351. def __transform_units(self, units, channels):
  1352. """
  1353. Transforms the input argument nsx_to_load to a dictionary, where keys
  1354. (channels) are int, and values (units) are lists of integers.
  1355. """
  1356. if isinstance(units, dict):
  1357. for ch, u in units.items():
  1358. if ch not in channels:
  1359. self._print_verbose(
  1360. "Units contain a channel id which is not listed in "
  1361. "channels")
  1362. if isinstance(u, int):
  1363. units[ch] = [u]
  1364. if hasattr(u, '__len__') and len(u) == 0:
  1365. units[ch] = None
  1366. if isinstance(u, str):
  1367. if u.lower() == 'none':
  1368. units[ch] = None
  1369. elif u.lower() == 'all':
  1370. units[ch] = list(range(17))
  1371. units[ch].append(255)
  1372. else:
  1373. raise ValueError("Invalid unit specification.")
  1374. else:
  1375. if hasattr(units, "__len__") and len(units) == 0:
  1376. units = None
  1377. if isinstance(units, str):
  1378. if units.lower() == 'none':
  1379. units = None
  1380. elif units.lower() == 'all':
  1381. units = list(range(17))
  1382. units.append(255)
  1383. else:
  1384. raise ValueError("Invalid unit specification.")
  1385. if isinstance(units, int):
  1386. units = [units]
  1387. if (channels is None) and (units is not None):
  1388. raise ValueError(
  1389. 'At least one channel needs to be loaded to load units')
  1390. if units:
  1391. units = dict(zip(channels, [units] * len(channels)))
  1392. if units is None:
  1393. self._print_verbose("No units are specified, therefore no "
  1394. "unit or spiketrain is loaded.")
  1395. return units
  1396. def __transform_times(self, n, default_n):
  1397. """
  1398. Transforms the input argument n_start or n_stop (n) to a list of
  1399. quantities. In case n is None, it is set to a default value provided by
  1400. the given function (default_n).
  1401. """
  1402. highest_res = self.__nev_params('event_unit')
  1403. if isinstance(n, pq.Quantity):
  1404. n = [n.rescale(highest_res)]
  1405. elif hasattr(n, "__len__"):
  1406. n = [tp.rescale(highest_res) if tp is not None
  1407. else default_n for tp in n]
  1408. elif n is None:
  1409. n = [default_n]
  1410. else:
  1411. raise ValueError('Invalid specification of n_start/n_stop.')
  1412. return n
  1413. def __merge_time_ranges(
  1414. self, user_n_starts, user_n_stops, nsx_to_load):
  1415. """
  1416. Merges after a validation the user specified n_starts and n_stops with
  1417. the intrinsically given n_starts and n_stops (from e.g, recording
  1418. pauses) of the file set.
  1419. Final n_starts and n_stops are chosen, so that the time range of each
  1420. resulting segment is set to the best meaningful maximum. This means
  1421. that the duration of the signals stored in the segments might be
  1422. smaller than the actually set duration of the segment.
  1423. """
  1424. # define the higest time resolution
  1425. # (for accurate manipulations of the time settings)
  1426. max_time = self.__get_max_time()
  1427. min_time = self.__get_min_time()
  1428. highest_res = self.__nev_params('event_unit')
  1429. user_n_starts = self.__transform_times(
  1430. user_n_starts, min_time)
  1431. user_n_stops = self.__transform_times(
  1432. user_n_stops, max_time)
  1433. # check if user provided as many n_starts as n_stops
  1434. if len(user_n_starts) != len(user_n_stops):
  1435. raise ValueError("n_starts and n_stops must be of equal length")
  1436. # if necessary reset max n_stop to max time of file set
  1437. start_stop_id = 0
  1438. while start_stop_id < len(user_n_starts):
  1439. if user_n_starts[start_stop_id] < min_time:
  1440. user_n_starts[start_stop_id] = min_time
  1441. self._print_verbose(
  1442. "Entry of n_start '{}' is smaller than min time of the file "
  1443. "set: n_start set to min time of file set"
  1444. "".format(user_n_starts[start_stop_id]))
  1445. if user_n_stops[start_stop_id] > max_time:
  1446. user_n_stops[start_stop_id] = max_time
  1447. self._print_verbose(
  1448. "Entry of n_stop '{}' is larger than max time of the file "
  1449. "set: n_stop set to max time of file set"
  1450. "".format(user_n_stops[start_stop_id]))
  1451. if (user_n_stops[start_stop_id] < min_time
  1452. or user_n_starts[start_stop_id] > max_time):
  1453. user_n_stops.pop(start_stop_id)
  1454. user_n_starts.pop(start_stop_id)
  1455. self._print_verbose(
  1456. "Entry of n_start is larger than max time or entry of "
  1457. "n_stop is smaller than min time of the "
  1458. "file set: n_start and n_stop are ignored")
  1459. continue
  1460. start_stop_id += 1
  1461. # get intrinsic time settings of nsx files (incl. rec pauses)
  1462. n_starts_files = []
  1463. n_stops_files = []
  1464. if nsx_to_load is not None:
  1465. for nsx_nb in nsx_to_load:
  1466. start_stop = \
  1467. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1468. n_starts_files.append(start_stop[0])
  1469. n_stops_files.append(start_stop[1])
  1470. # reducing n_starts from wanted nsx files to minima
  1471. # (keep recording pause if it occurs)
  1472. if len(n_starts_files) > 0:
  1473. if np.shape(n_starts_files)[1] > 1:
  1474. n_starts_files = [
  1475. tp * highest_res for tp in np.min(n_starts_files, axis=1)]
  1476. else:
  1477. n_starts_files = [
  1478. tp * highest_res for tp in np.min(n_starts_files, axis=0)]
  1479. # reducing n_starts from wanted nsx files to maxima
  1480. # (keep recording pause if it occurs)
  1481. if len(n_stops_files) > 0:
  1482. if np.shape(n_stops_files)[1] > 1:
  1483. n_stops_files = [
  1484. tp * highest_res for tp in np.max(n_stops_files, axis=1)]
  1485. else:
  1486. n_stops_files = [
  1487. tp * highest_res for tp in np.max(n_stops_files, axis=0)]
  1488. # merge user time settings with intrinsic nsx time settings
  1489. n_starts = []
  1490. n_stops = []
  1491. for start, stop in zip(user_n_starts, user_n_stops):
  1492. # check if start and stop of user create a positive time interval
  1493. if not start < stop:
  1494. raise ValueError(
  1495. "t(i) in n_starts has to be smaller than t(i) in n_stops")
  1496. # Reduce n_starts_files to given intervals of user & add start
  1497. if len(n_starts_files) > 0:
  1498. mask = (n_starts_files > start) & (n_starts_files < stop)
  1499. red_n_starts_files = np.array(n_starts_files)[mask]
  1500. merged_n_starts = [start] + [
  1501. tp * highest_res for tp in red_n_starts_files]
  1502. else:
  1503. merged_n_starts = [start]
  1504. # Reduce n_stops_files to given intervals of user & add stop
  1505. if len(n_stops_files) > 0:
  1506. mask = (n_stops_files > start) & (n_stops_files < stop)
  1507. red_n_stops_files = np.array(n_stops_files)[mask]
  1508. merged_n_stops = [
  1509. tp * highest_res for tp in red_n_stops_files] + [stop]
  1510. else:
  1511. merged_n_stops = [stop]
  1512. # Define combined user and file n_starts and n_stops
  1513. # case one:
  1514. if len(merged_n_starts) == len(merged_n_stops):
  1515. if len(merged_n_starts) + len(merged_n_stops) == 2:
  1516. n_starts.extend(merged_n_starts)
  1517. n_stops.extend(merged_n_stops)
  1518. if len(merged_n_starts) + len(merged_n_stops) > 2:
  1519. merged_n_starts.remove(merged_n_starts[1])
  1520. n_starts.extend([merged_n_starts])
  1521. merged_n_stops.remove(merged_n_stops[-2])
  1522. n_stops.extend(merged_n_stops)
  1523. # case two:
  1524. elif len(merged_n_starts) < len(merged_n_stops):
  1525. n_starts.extend(merged_n_starts)
  1526. merged_n_stops.remove(merged_n_stops[-2])
  1527. n_stops.extend(merged_n_stops)
  1528. # case three:
  1529. elif len(merged_n_starts) > len(merged_n_stops):
  1530. merged_n_starts.remove(merged_n_starts[1])
  1531. n_starts.extend(merged_n_starts)
  1532. n_stops.extend(merged_n_stops)
  1533. if len(n_starts) > len(user_n_starts) and \
  1534. len(n_stops) > len(user_n_stops):
  1535. self._print_verbose(
  1536. "Additional recording pauses were detected. There will be "
  1537. "more segments than the user expects.")
  1538. return n_starts, n_stops
  1539. def __read_event(self, n_start, n_stop, data, ev_dict, lazy=False):
  1540. """
  1541. Creates an event for non-neural experimental events in nev data.
  1542. """
  1543. event_unit = self.__nev_params('event_unit')
  1544. if lazy:
  1545. times = []
  1546. labels = np.array([], dtype='S')
  1547. else:
  1548. times = data['timestamp'][ev_dict['mask']] * event_unit
  1549. labels = data[ev_dict['field']][ev_dict['mask']].astype(str)
  1550. # mask for given time interval
  1551. mask = (times >= n_start) & (times < n_stop)
  1552. if np.sum(mask) > 0:
  1553. ev = Event(
  1554. times=times[mask].astype(float),
  1555. labels=labels[mask],
  1556. name=ev_dict['name'],
  1557. description=ev_dict['desc'])
  1558. if lazy:
  1559. ev.lazy_shape = np.sum(mask)
  1560. else:
  1561. ev = None
  1562. return ev
  1563. def __read_spiketrain(
  1564. self, n_start, n_stop, spikes, channel_id, unit_id,
  1565. load_waveforms=False, scaling='raw', lazy=False):
  1566. """
  1567. Creates spiketrains for Spikes in nev data.
  1568. """
  1569. event_unit = self.__nev_params('event_unit')
  1570. # define a name for spiketrain
  1571. # (unique identifier: 1000 * elid + unit_nb)
  1572. name = "Unit {0}".format(1000 * channel_id + unit_id)
  1573. # define description for spiketrain
  1574. desc = 'SpikeTrain from channel: {0}, unit: {1}'.format(
  1575. channel_id, self.__get_unit_classification(unit_id))
  1576. # get spike times for given time interval
  1577. if not lazy:
  1578. times = spikes['timestamp'] * event_unit
  1579. mask = (times >= n_start) & (times <= n_stop)
  1580. times = times[mask].astype(float)
  1581. else:
  1582. times = np.array([]) * event_unit
  1583. st = SpikeTrain(
  1584. times=times,
  1585. name=name,
  1586. description=desc,
  1587. file_origin='.'.join([self._filenames['nev'], 'nev']),
  1588. t_start=n_start,
  1589. t_stop=n_stop)
  1590. if lazy:
  1591. st.lazy_shape = np.shape(times)
  1592. # load waveforms if requested
  1593. if load_waveforms and not lazy:
  1594. wf_dtype = self.__nev_params('waveform_dtypes')[channel_id]
  1595. wf_size = self.__nev_params('waveform_size')[channel_id]
  1596. waveforms = spikes['waveform'].flatten().view(wf_dtype)
  1597. waveforms = waveforms.reshape(int(spikes.size), 1, int(wf_size))
  1598. if scaling == 'voltage':
  1599. st.waveforms = (
  1600. waveforms[mask] * self.__nev_params('waveform_unit') *
  1601. self.__nev_params('digitization_factor')[channel_id] /
  1602. 1000.)
  1603. elif scaling == 'raw':
  1604. st.waveforms = waveforms[mask] * pq.dimensionless
  1605. else:
  1606. raise ValueError(
  1607. 'Unkown option {1} for parameter scaling.'.format(scaling))
  1608. st.sampling_rate = self.__nev_params('waveform_sampling_rate')
  1609. st.left_sweep = self.__get_left_sweep_waveforms()[channel_id]
  1610. # add additional annotations
  1611. st.annotate(
  1612. unit_id=int(unit_id),
  1613. channel_id=int(channel_id))
  1614. return st
  1615. def __read_analogsignal(
  1616. self, n_start, n_stop, signal, channel_id, nsx_nb,
  1617. scaling='raw', lazy=False):
  1618. """
  1619. Creates analogsignal for signal of channel in nsx data.
  1620. """
  1621. # TODO: The following part is extremely slow, since the memmaps for the
  1622. # headers are created again and again. In particular, this makes lazy
  1623. # loading slow as well. Solution would be to create header memmaps up
  1624. # front.
  1625. # get parameters
  1626. sampling_rate = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1627. 'sampling_rate', nsx_nb)
  1628. nsx_time_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1629. 'time_unit', nsx_nb)
  1630. max_ana = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1631. 'max_analog_val', nsx_nb)
  1632. min_ana = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1633. 'min_analog_val', nsx_nb)
  1634. max_dig = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1635. 'max_digital_val', nsx_nb)
  1636. min_dig = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1637. 'min_digital_val', nsx_nb)
  1638. units = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1639. 'units', nsx_nb)
  1640. labels = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1641. 'labels', nsx_nb)
  1642. dbl_idx = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1643. 'databl_idx', nsx_nb, n_start, n_stop)
  1644. t_start = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1645. 'databl_t_start', nsx_nb, n_start, n_stop)
  1646. t_stop = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1647. 'databl_t_stop', nsx_nb, n_start, n_stop)
  1648. elids_nsx = list(self.__nsx_ext_header[nsx_nb]['electrode_id'])
  1649. if channel_id in elids_nsx:
  1650. idx_ch = elids_nsx.index(channel_id)
  1651. else:
  1652. return None
  1653. description = \
  1654. "AnalogSignal from channel: {0}, label: {1}, nsx: {2}".format(
  1655. channel_id, labels[idx_ch], nsx_nb)
  1656. # TODO: Find a more time/memory efficient way to handle lazy loading
  1657. data_times = np.arange(
  1658. t_start.item(), t_stop.item(),
  1659. self.__nsx_basic_header[nsx_nb]['period']) * t_start.units
  1660. mask = (data_times >= n_start) & (data_times < n_stop)
  1661. if lazy:
  1662. lazy_shape = (np.sum(mask), )
  1663. sig_ch = np.array([], dtype='float32')
  1664. sig_unit = pq.dimensionless
  1665. t_start = n_start.rescale('s')
  1666. else:
  1667. data_times = data_times[mask].astype(float)
  1668. if scaling == 'voltage':
  1669. if not self._avail_files['nev']:
  1670. raise ValueError(
  1671. 'Cannot convert signals in filespec 2.1 nsX '
  1672. 'files to voltage without nev file.')
  1673. sig_ch = signal[dbl_idx][:, idx_ch][mask].astype('float32')
  1674. # transform dig value to physical value
  1675. sym_ana = (max_ana[idx_ch] == -min_ana[idx_ch])
  1676. sym_dig = (max_dig[idx_ch] == -min_dig[idx_ch])
  1677. if sym_ana and sym_dig:
  1678. sig_ch *= float(max_ana[idx_ch]) / float(max_dig[idx_ch])
  1679. else:
  1680. # general case (same result as above for symmetric input)
  1681. sig_ch -= min_dig[idx_ch]
  1682. sig_ch *= float(max_ana[idx_ch] - min_ana[idx_ch]) / \
  1683. float(max_dig[idx_ch] - min_dig[idx_ch])
  1684. sig_ch += float(min_ana[idx_ch])
  1685. sig_unit = units[idx_ch].decode()
  1686. elif scaling == 'raw':
  1687. sig_ch = signal[dbl_idx][:, idx_ch][mask].astype(int)
  1688. sig_unit = pq.dimensionless
  1689. else:
  1690. raise ValueError(
  1691. 'Unkown option {1} for parameter '
  1692. 'scaling.'.format(scaling))
  1693. t_start = data_times[0].rescale(nsx_time_unit)
  1694. anasig = AnalogSignal(
  1695. signal=pq.Quantity(sig_ch, sig_unit, copy=False),
  1696. sampling_rate=sampling_rate,
  1697. t_start=t_start,
  1698. name=labels[idx_ch],
  1699. description=description,
  1700. file_origin='.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb]))
  1701. if lazy:
  1702. anasig.lazy_shape = lazy_shape
  1703. anasig.annotate(
  1704. nsx=nsx_nb,
  1705. channel_id=int(channel_id))
  1706. return anasig
  1707. def __read_unit(self, unit_id, channel_id):
  1708. """
  1709. Creates unit with unit id for given channel id.
  1710. """
  1711. # define a name for spiketrain
  1712. # (unique identifier: 1000 * elid + unit_nb)
  1713. name = "Unit {0}".format(1000 * channel_id + unit_id)
  1714. # define description for spiketrain
  1715. desc = 'Unit from channel: {0}, id: {1}'.format(
  1716. channel_id, self.__get_unit_classification(unit_id))
  1717. un = Unit(
  1718. name=name,
  1719. description=desc,
  1720. file_origin='.'.join([self._filenames['nev'], 'nev']))
  1721. # add additional annotations
  1722. un.annotate(
  1723. unit_id=int(unit_id),
  1724. channel_id=int(channel_id))
  1725. return un
  1726. def __read_channelindex(
  1727. self, channel_id, index=None, channel_units=None, cascade=True):
  1728. """
  1729. Returns a ChannelIndex with the given index for the given channels
  1730. containing a neo.core.unit.Unit object list of the given units.
  1731. """
  1732. flt_type = {0: 'None', 1: 'Butterworth'}
  1733. chidx = ChannelIndex(
  1734. np.array([channel_id]),
  1735. file_origin=self.filename)
  1736. if index is not None:
  1737. chidx.index = index
  1738. chidx.name = "ChannelIndex {0}".format(chidx.index)
  1739. else:
  1740. chidx.name = "ChannelIndex"
  1741. if self._avail_files['nev']:
  1742. channel_labels = self.__nev_params('channel_labels')
  1743. if channel_labels is not None:
  1744. chidx.channel_names = np.array([channel_labels[channel_id]])
  1745. chidx.channel_ids = np.array([channel_id])
  1746. # additional annotations from nev
  1747. if channel_id in self.__nev_ext_header[b'NEUEVWAV']['electrode_id']:
  1748. get_idx = list(
  1749. self.__nev_ext_header[b'NEUEVWAV']['electrode_id']).index(
  1750. channel_id)
  1751. chidx.annotate(
  1752. connector_ID=self.__nev_ext_header[
  1753. b'NEUEVWAV']['physical_connector'][get_idx],
  1754. connector_pinID=self.__nev_ext_header[
  1755. b'NEUEVWAV']['connector_pin'][get_idx],
  1756. nev_dig_factor=self.__nev_ext_header[
  1757. b'NEUEVWAV']['digitization_factor'][get_idx],
  1758. nev_energy_threshold=self.__nev_ext_header[
  1759. b'NEUEVWAV']['energy_threshold'][get_idx] * pq.uV,
  1760. nev_hi_threshold=self.__nev_ext_header[
  1761. b'NEUEVWAV']['hi_threshold'][get_idx] * pq.uV,
  1762. nev_lo_threshold=self.__nev_ext_header[
  1763. b'NEUEVWAV']['lo_threshold'][get_idx] * pq.uV,
  1764. nb_sorted_units=self.__nev_ext_header[
  1765. b'NEUEVWAV']['nb_sorted_units'][get_idx],
  1766. waveform_size=self.__waveform_size[self.__nev_spec](
  1767. )[channel_id] * self.__nev_params('waveform_time_unit'))
  1768. # additional annotations from nev (only for file_spec > 2.1)
  1769. if self.__nev_spec in ['2.2', '2.3']:
  1770. get_idx = list(
  1771. self.__nev_ext_header[
  1772. b'NEUEVFLT']['electrode_id']).index(
  1773. channel_id)
  1774. # filter type codes (extracted from blackrock manual)
  1775. chidx.annotate(
  1776. nev_hi_freq_corner=self.__nev_ext_header[b'NEUEVFLT'][
  1777. 'hi_freq_corner'][get_idx] /
  1778. 1000. * pq.Hz,
  1779. nev_hi_freq_order=self.__nev_ext_header[b'NEUEVFLT'][
  1780. 'hi_freq_order'][get_idx],
  1781. nev_hi_freq_type=flt_type[self.__nev_ext_header[
  1782. b'NEUEVFLT']['hi_freq_type'][get_idx]],
  1783. nev_lo_freq_corner=self.__nev_ext_header[
  1784. b'NEUEVFLT']['lo_freq_corner'][get_idx] /
  1785. 1000. * pq.Hz,
  1786. nev_lo_freq_order=self.__nev_ext_header[
  1787. b'NEUEVFLT']['lo_freq_order'][get_idx],
  1788. nev_lo_freq_type=flt_type[self.__nev_ext_header[
  1789. b'NEUEVFLT']['lo_freq_type'][get_idx]])
  1790. # additional information about the LFP signal
  1791. if self.__nev_spec in ['2.2', '2.3'] and self.__nsx_ext_header:
  1792. # It does not matter which nsX file to ask for this info
  1793. k = list(self.__nsx_ext_header.keys())[0]
  1794. if channel_id in self.__nsx_ext_header[k]['electrode_id']:
  1795. get_idx = list(
  1796. self.__nsx_ext_header[k]['electrode_id']).index(
  1797. channel_id)
  1798. chidx.annotate(
  1799. nsx_hi_freq_corner=self.__nsx_ext_header[k][
  1800. 'hi_freq_corner'][get_idx] / 1000. * pq.Hz,
  1801. nsx_lo_freq_corner=self.__nsx_ext_header[k][
  1802. 'lo_freq_corner'][get_idx] / 1000. * pq.Hz,
  1803. nsx_hi_freq_order=self.__nsx_ext_header[k][
  1804. 'hi_freq_order'][get_idx],
  1805. nsx_lo_freq_order=self.__nsx_ext_header[k][
  1806. 'lo_freq_order'][get_idx],
  1807. nsx_hi_freq_type=flt_type[
  1808. self.__nsx_ext_header[k]['hi_freq_type'][get_idx]],
  1809. nsx_lo_freq_type=flt_type[
  1810. self.__nsx_ext_header[k]['hi_freq_type'][get_idx]])
  1811. chidx.description = \
  1812. "Container for units and groups analogsignals of one recording " \
  1813. "channel across segments."
  1814. if not cascade:
  1815. return chidx
  1816. if self._avail_files['nev']:
  1817. # read nev data
  1818. nev_data = self.__nev_data_reader[self.__nev_spec]()
  1819. if channel_units is not None:
  1820. # extract first data for channel
  1821. ch_mask = (nev_data['Spikes']['packet_id'] == channel_id)
  1822. data_ch = nev_data['Spikes'][ch_mask]
  1823. for un_id in channel_units:
  1824. if un_id in np.unique(data_ch['unit_class_nb']):
  1825. un = self.__read_unit(
  1826. unit_id=un_id, channel_id=channel_id)
  1827. chidx.units.append(un)
  1828. chidx.create_many_to_one_relationship()
  1829. return chidx
  1830. def read_segment(
  1831. self, n_start, n_stop, name=None, description=None, index=None,
  1832. nsx_to_load='none', channels='none', units='none',
  1833. load_waveforms=False, load_events=False, scaling='raw',
  1834. lazy=False, cascade=True):
  1835. """
  1836. Returns an annotated neo.core.segment.Segment.
  1837. Args:
  1838. n_start (Quantity):
  1839. Start time of maximum time range of signals contained in this
  1840. segment.
  1841. n_stop (Quantity):
  1842. Stop time of maximum time range of signals contained in this
  1843. segment.
  1844. name (None, string):
  1845. If None, name is set to default, otherwise it is set to user
  1846. input.
  1847. description (None, string):
  1848. If None, description is set to default, otherwise it is set to
  1849. user input.
  1850. index (None, int):
  1851. If not None, index of segment is set to user index.
  1852. nsx_to_load (int, list, str):
  1853. ID(s) of nsx file(s) from which to load data, e.g., if set to
  1854. 5 only data from the ns5 file are loaded. If 'none' or empty
  1855. list, no nsx files and therefore no analog signals are loaded.
  1856. If 'all', data from all available nsx are loaded.
  1857. channels (int, list, str):
  1858. Channel id(s) from which to load data. If 'none' or empty list,
  1859. no channels and therefore no analog signal or spiketrains are
  1860. loaded. If 'all', all available channels are loaded.
  1861. units (int, list, str, dict):
  1862. ID(s) of unit(s) to load. If 'none' or empty list, no units and
  1863. therefore no spiketrains are loaded. If 'all', all available
  1864. units are loaded. If dict, the above can be specified
  1865. individually for each channel (keys), e.g. {1: 5, 2: 'all'}
  1866. loads unit 5 from channel 1 and all units from channel 2.
  1867. load_waveforms (boolean):
  1868. If True, waveforms are attached to all loaded spiketrains.
  1869. load_events (boolean):
  1870. If True, all recorded events are loaded.
  1871. scaling (str):
  1872. Determines whether time series of individual
  1873. electrodes/channels are returned as AnalogSignals containing
  1874. raw integer samples ('raw'), or scaled to arrays of floats
  1875. representing voltage ('voltage'). Note that for file
  1876. specification 2.1 and lower, the option 'voltage' requires a
  1877. nev file to be present.
  1878. lazy (boolean):
  1879. If True, only the shape of the data is loaded.
  1880. cascade (boolean):
  1881. If True, only the segment without children is returned.
  1882. Returns:
  1883. Segment (neo.Segment):
  1884. Returns the specified segment. See documentation of
  1885. `read_block()` for a full list of annotations of all child
  1886. objects.
  1887. """
  1888. # Make sure that input args are transformed into correct instances
  1889. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  1890. channels = self.__transform_channels(channels, nsx_to_load)
  1891. units = self.__transform_units(units, channels)
  1892. seg = Segment(file_origin=self.filename)
  1893. # set user defined annotations if they were provided
  1894. if index is None:
  1895. seg.index = 0
  1896. else:
  1897. seg.index = index
  1898. if name is None:
  1899. seg.name = "Segment {0}".format(seg.index)
  1900. else:
  1901. seg.name = name
  1902. if description is None:
  1903. seg.description = "Segment containing data from t_min to t_max."
  1904. else:
  1905. seg.description = description
  1906. if not cascade:
  1907. return seg
  1908. if self._avail_files['nev']:
  1909. # filename = self._filenames['nev'] + '.nev'
  1910. # annotate segment according to file headers
  1911. seg.rec_datetime = datetime.datetime(
  1912. year=self.__nev_basic_header['year'],
  1913. month=self.__nev_basic_header['month'],
  1914. day=self.__nev_basic_header['day'],
  1915. hour=self.__nev_basic_header['hour'],
  1916. minute=self.__nev_basic_header['minute'],
  1917. second=self.__nev_basic_header['second'],
  1918. microsecond=self.__nev_basic_header['millisecond'])
  1919. # read nev data
  1920. nev_data = self.__nev_data_reader[self.__nev_spec]()
  1921. # read non-neural experimental events
  1922. if load_events:
  1923. ev_dict = self.__nonneural_evtypes[self.__nev_spec](
  1924. nev_data['NonNeural'])
  1925. for ev_type in ev_dict.keys():
  1926. ev = self.__read_event(
  1927. n_start=n_start,
  1928. n_stop=n_stop,
  1929. data=nev_data['NonNeural'],
  1930. ev_dict=ev_dict[ev_type],
  1931. lazy=lazy)
  1932. if ev is not None:
  1933. seg.events.append(ev)
  1934. # TODO: not yet implemented (only avail in nev_spec 2.3)
  1935. # videosync events
  1936. # trackingevents events
  1937. # buttontrigger events
  1938. # configevent events
  1939. # get spiketrain
  1940. if units is not None:
  1941. not_existing_units = []
  1942. for ch_id in units.keys():
  1943. # extract first data for channel
  1944. ch_mask = (nev_data['Spikes']['packet_id'] == ch_id)
  1945. data_ch = nev_data['Spikes'][ch_mask]
  1946. if units[ch_id] is not None:
  1947. for un_id in units[ch_id]:
  1948. if un_id in np.unique(data_ch['unit_class_nb']):
  1949. # extract then data for unit if unit exists
  1950. un_mask = (data_ch['unit_class_nb'] == un_id)
  1951. data_un = data_ch[un_mask]
  1952. st = self.__read_spiketrain(
  1953. n_start=n_start,
  1954. n_stop=n_stop,
  1955. spikes=data_un,
  1956. channel_id=ch_id,
  1957. unit_id=un_id,
  1958. load_waveforms=load_waveforms,
  1959. scaling=scaling,
  1960. lazy=lazy)
  1961. seg.spiketrains.append(st)
  1962. else:
  1963. not_existing_units.append(un_id)
  1964. if not_existing_units:
  1965. self._print_verbose(
  1966. "Units {0} on channel {1} do not "
  1967. "exist".format(not_existing_units, ch_id))
  1968. else:
  1969. self._print_verbose(
  1970. "There are no units specified for channel "
  1971. "{0}".format(ch_id))
  1972. if nsx_to_load is not None:
  1973. for nsx_nb in nsx_to_load:
  1974. # read nsx data
  1975. nsx_data = \
  1976. self.__nsx_data_reader[self.__nsx_spec[nsx_nb]](nsx_nb)
  1977. # read Analogsignals
  1978. for ch_id in channels:
  1979. anasig = self.__read_analogsignal(
  1980. n_start=n_start,
  1981. n_stop=n_stop,
  1982. signal=nsx_data,
  1983. channel_id=ch_id,
  1984. nsx_nb=nsx_nb,
  1985. scaling=scaling,
  1986. lazy=lazy)
  1987. if anasig is not None:
  1988. seg.analogsignals.append(anasig)
  1989. # TODO: not yet implemented
  1990. # if self._avail_files['sif']:
  1991. # sif_header = self._read_sif(self._filenames['sif'] + '.sif')
  1992. # TODO: not yet implemented
  1993. # if self._avail_files['ccf']:
  1994. # ccf_header = self._read_sif(self._filenames['ccf'] + '.ccf')
  1995. seg.create_many_to_one_relationship()
  1996. return seg
  1997. def read_block(
  1998. self, index=None, name=None, description=None, nsx_to_load='none',
  1999. n_starts=None, n_stops=None, channels='none', units='none',
  2000. load_waveforms=False, load_events=False, scaling='raw',
  2001. lazy=False, cascade=True):
  2002. """
  2003. Args:
  2004. index (None, int):
  2005. If not None, index of block is set to user input.
  2006. name (None, str):
  2007. If None, name is set to default, otherwise it is set to user
  2008. input.
  2009. description (None, str):
  2010. If None, description is set to default, otherwise it is set to
  2011. user input.
  2012. nsx_to_load (int, list, str):
  2013. ID(s) of nsx file(s) from which to load data, e.g., if set to
  2014. 5 only data from the ns5 file are loaded. If 'none' or empty
  2015. list, no nsx files and therefore no analog signals are loaded.
  2016. If 'all', data from all available nsx are loaded.
  2017. n_starts (None, Quantity, list):
  2018. Start times for data in each segment. Number of entries must be
  2019. equal to length of n_stops. If None, intrinsic recording start
  2020. times of files set are used.
  2021. n_stops (None, Quantity, list):
  2022. Stop times for data in each segment. Number of entries must be
  2023. equal to length of n_starts. If None, intrinsic recording stop
  2024. times of files set are used.
  2025. channels (int, list, str):
  2026. Channel id(s) from which to load data. If 'none' or empty list,
  2027. no channels and therefore no analog signal or spiketrains are
  2028. loaded. If 'all', all available channels are loaded.
  2029. units (int, list, str, dict):
  2030. ID(s) of unit(s) to load. If 'none' or empty list, no units and
  2031. therefore no spiketrains are loaded. If 'all', all available
  2032. units are loaded. If dict, the above can be specified
  2033. individually for each channel (keys), e.g. {1: 5, 2: 'all'}
  2034. loads unit 5 from channel 1 and all units from channel 2.
  2035. load_waveforms (boolean):
  2036. If True, waveforms are attached to all loaded spiketrains.
  2037. load_events (boolean):
  2038. If True, all recorded events are loaded.
  2039. scaling (str):
  2040. Determines whether time series of individual
  2041. electrodes/channels are returned as AnalogSignals containing
  2042. raw integer samples ('raw'), or scaled to arrays of floats
  2043. representing voltage ('voltage'). Note that for file
  2044. specification 2.1 and lower, the option 'voltage' requires a
  2045. nev file to be present.
  2046. lazy (bool):
  2047. If True, only the shape of the data is loaded.
  2048. cascade (bool or "lazy"):
  2049. If True, only the block without children is returned.
  2050. Returns:
  2051. Block (neo.segment.Block):
  2052. Block linking all loaded Neo objects.
  2053. Block annotations:
  2054. avail_file_set (list):
  2055. List of extensions of all available files for the given
  2056. recording.
  2057. avail_nsx (list of int):
  2058. List of integers specifying the .nsX files available,
  2059. e.g., [2, 5] indicates that an ns2 and and ns5 file are
  2060. available.
  2061. avail_nev (bool):
  2062. True if a .nev file is available.
  2063. avail_ccf (bool):
  2064. True if a .ccf file is available.
  2065. avail_sif (bool):
  2066. True if a .sif file is available.
  2067. rec_pauses (bool):
  2068. True if the session contains a recording pause (i.e.,
  2069. multiple segments).
  2070. nb_segments (int):
  2071. Number of segments created after merging recording
  2072. times specified by user with the intrinsic ones of the
  2073. file set.
  2074. Segment annotations:
  2075. None.
  2076. ChannelIndex annotations:
  2077. waveform_size (Quantitiy):
  2078. Length of time used to save spike waveforms (in units
  2079. of 1/30000 s).
  2080. nev_hi_freq_corner (Quantitiy),
  2081. nev_lo_freq_corner (Quantitiy),
  2082. nev_hi_freq_order (int), nev_lo_freq_order (int),
  2083. nev_hi_freq_type (str), nev_lo_freq_type (str),
  2084. nev_hi_threshold, nev_lo_threshold,
  2085. nev_energy_threshold (quantity):
  2086. Indicates parameters of spike detection.
  2087. nsx_hi_freq_corner (Quantity),
  2088. nsx_lo_freq_corner (Quantity)
  2089. nsx_hi_freq_order (int), nsx_lo_freq_order (int),
  2090. nsx_hi_freq_type (str), nsx_lo_freq_type (str)
  2091. Indicates parameters of the filtered signal in one of
  2092. the files ns1-ns5 (ns6, if available, is not filtered).
  2093. nev_dig_factor (int):
  2094. Digitization factor in microvolts of the nev file, used
  2095. to convert raw samples to volt.
  2096. connector_ID, connector_pinID (int):
  2097. ID of connector and pin on the connector where the
  2098. channel was recorded from.
  2099. nb_sorted_units (int):
  2100. Number of sorted units on this channel (noise, mua and
  2101. sua).
  2102. Unit annotations:
  2103. unit_id (int):
  2104. ID of the unit.
  2105. channel_id (int):
  2106. Channel ID (Blackrock ID) from which the unit was
  2107. loaded (equiv. to the single list entry in the
  2108. attribute channel_ids of ChannelIndex parent).
  2109. AnalogSignal annotations:
  2110. nsx (int):
  2111. nsX file the signal was loaded from, e.g., 5 indicates
  2112. the .ns5 file.
  2113. channel_id (int):
  2114. Channel ID (Blackrock ID) from which the signal was
  2115. loaded.
  2116. Spiketrain annotations:
  2117. unit_id (int):
  2118. ID of the unit from which the spikes were recorded.
  2119. channel_id (int):
  2120. Channel ID (Blackrock ID) from which the spikes were
  2121. loaded.
  2122. Event annotations:
  2123. The resulting Block contains one Event object with the name
  2124. `digital_input_port`. It contains all digitally recorded
  2125. events, with the event code coded in the labels of the
  2126. Event. The Event object contains no further annotation.
  2127. """
  2128. # Make sure that input args are transformed into correct instances
  2129. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  2130. channels = self.__transform_channels(channels, nsx_to_load)
  2131. units = self.__transform_units(units, channels)
  2132. # Create block
  2133. bl = Block(file_origin=self.filename)
  2134. # set user defined annotations if they were provided
  2135. if index is not None:
  2136. bl.index = index
  2137. if name is None:
  2138. bl.name = "Blackrock Data Block"
  2139. else:
  2140. bl.name = name
  2141. if description is None:
  2142. bl.description = "Block of data from Blackrock file set."
  2143. else:
  2144. bl.description = description
  2145. if self._avail_files['nev']:
  2146. bl.rec_datetime = self.__nev_params('rec_datetime')
  2147. bl.annotate(
  2148. avail_file_set=[k for k, v in self._avail_files.items() if v])
  2149. bl.annotate(avail_nsx=self._avail_nsx)
  2150. bl.annotate(avail_nev=self._avail_files['nev'])
  2151. bl.annotate(avail_sif=self._avail_files['sif'])
  2152. bl.annotate(avail_ccf=self._avail_files['ccf'])
  2153. bl.annotate(rec_pauses=False)
  2154. # Test n_starts and n_stops user requirements and combine them if
  2155. # possible with file internal n_starts and n_stops from rec pauses.
  2156. n_starts, n_stops = \
  2157. self.__merge_time_ranges(n_starts, n_stops, nsx_to_load)
  2158. bl.annotate(nb_segments=len(n_starts))
  2159. if not cascade:
  2160. return bl
  2161. # read segment
  2162. for seg_idx, (n_start, n_stop) in enumerate(zip(n_starts, n_stops)):
  2163. seg = self.read_segment(
  2164. n_start=n_start,
  2165. n_stop=n_stop,
  2166. index=seg_idx,
  2167. nsx_to_load=nsx_to_load,
  2168. channels=channels,
  2169. units=units,
  2170. load_waveforms=load_waveforms,
  2171. load_events=load_events,
  2172. scaling=scaling,
  2173. lazy=lazy,
  2174. cascade=cascade)
  2175. bl.segments.append(seg)
  2176. # read channelindexes
  2177. if channels:
  2178. for ch_id in channels:
  2179. if units and ch_id in units.keys():
  2180. ch_units = units[ch_id]
  2181. else:
  2182. ch_units = None
  2183. chidx = self.__read_channelindex(
  2184. channel_id=ch_id,
  2185. index=0,
  2186. channel_units=ch_units,
  2187. cascade=cascade)
  2188. for seg in bl.segments:
  2189. if ch_units:
  2190. for un in chidx.units:
  2191. sts = seg.filter(
  2192. targdict={'name': un.name},
  2193. objects='SpikeTrain')
  2194. for st in sts:
  2195. un.spiketrains.append(st)
  2196. anasigs = seg.filter(
  2197. targdict={'channel_id': ch_id},
  2198. objects='AnalogSignal')
  2199. for anasig in anasigs:
  2200. chidx.analogsignals.append(anasig)
  2201. bl.channel_indexes.append(chidx)
  2202. bl.create_many_to_one_relationship()
  2203. return bl
  2204. def __str__(self):
  2205. """
  2206. Prints summary of the Blackrock data file set.
  2207. """
  2208. output = "\nFile Origins for Blackrock File Set\n"\
  2209. "====================================\n"
  2210. for ftype in self._filenames.keys():
  2211. output += ftype + ':' + self._filenames[ftype] + '\n'
  2212. if self._avail_files['nev']:
  2213. output += "\nEvent Parameters (NEV)\n"\
  2214. "====================================\n"\
  2215. "Timestamp resolution (Hz): " +\
  2216. str(self.__nev_basic_header['timestamp_resolution']) +\
  2217. "\nWaveform resolution (Hz): " +\
  2218. str(self.__nev_basic_header['sample_resolution'])
  2219. if b'NEUEVWAV' in self.__nev_ext_header.keys():
  2220. avail_el = \
  2221. self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  2222. con = \
  2223. self.__nev_ext_header[b'NEUEVWAV']['physical_connector']
  2224. pin = \
  2225. self.__nev_ext_header[b'NEUEVWAV']['connector_pin']
  2226. nb_units = \
  2227. self.__nev_ext_header[b'NEUEVWAV']['nb_sorted_units']
  2228. output += "\n\nAvailable electrode IDs:\n"\
  2229. "====================================\n"
  2230. for i, el in enumerate(avail_el):
  2231. output += "Electrode ID %i: " % el
  2232. channel_labels = self.__nev_params('channel_labels')
  2233. if channel_labels is not None:
  2234. output += "label %s: " % channel_labels[el]
  2235. output += "connector: %i, " % con[i]
  2236. output += "pin: %i, " % pin[i]
  2237. output += 'nb_units: %i\n' % nb_units[i]
  2238. for nsx_nb in self._avail_nsx:
  2239. analog_res = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  2240. 'sampling_rate', nsx_nb)
  2241. avail_el = [
  2242. el for el in self.__nsx_ext_header[nsx_nb]['electrode_id']]
  2243. output += "\nAnalog Parameters (NS" + str(nsx_nb) + ")"\
  2244. "\n===================================="
  2245. output += "\nResolution (Hz): %i" % analog_res
  2246. output += "\nAvailable channel IDs: " +\
  2247. ", " .join(["%i" % a for a in avail_el]) + "\n"
  2248. return output