blackrockio_v4.py 103 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570
  1. """
  2. Module for reading data from files in the Blackrock format.
  3. This module is an older implementation with old neo.io API.
  4. A new class Blackrock compunded by BlackrockRawIO and BaseFromIO
  5. superseed this one.
  6. This work is based on:
  7. * Chris Rodgers - first version
  8. * Michael Denker, Lyuba Zehl - second version
  9. * Samuel Garcia - third version
  10. * Lyuba Zehl, Michael Denker - fourth version
  11. This IO supports reading only.
  12. This IO is able to read:
  13. * the nev file which contains spikes
  14. * ns1, ns2, .., ns6 files that contain signals at different sampling rates
  15. This IO can handle the following Blackrock file specifications:
  16. * 2.1
  17. * 2.2
  18. * 2.3
  19. The neural data channels are 1 - 128.
  20. The analog inputs are 129 - 144. (129 - 137 AC coupled, 138 - 144 DC coupled)
  21. spike- and event-data; 30000 Hz
  22. "ns1": "analog data: 500 Hz",
  23. "ns2": "analog data: 1000 Hz",
  24. "ns3": "analog data: 2000 Hz",
  25. "ns4": "analog data: 10000 Hz",
  26. "ns5": "analog data: 30000 Hz",
  27. "ns6": "analog data: 30000 Hz (no digital filter)"
  28. TODO:
  29. * videosync events (file spec 2.3)
  30. * tracking events (file spec 2.3)
  31. * buttontrigger events (file spec 2.3)
  32. * config events (file spec 2.3)
  33. * check left sweep settings of Blackrock
  34. * check nsx offsets (file spec 2.1)
  35. * add info of nev ext header (NSASEXEX) to non-neural events
  36. (file spec 2.1 and 2.2)
  37. * read sif file information
  38. * read ccf file information
  39. * fix reading of periodic sampling events (non-neural event type)
  40. (file spec 2.1 and 2.2)
  41. """
  42. import datetime
  43. import os
  44. import re
  45. import warnings
  46. import numpy as np
  47. import quantities as pq
  48. import neo.io.blackrockio
  49. from neo.io.baseio import BaseIO
  50. from neo.core import (Block, Segment, SpikeTrain, Unit, Event,
  51. ChannelIndex, AnalogSignal)
  52. if __name__ == '__main__':
  53. pass
  54. class BlackrockIO(BaseIO):
  55. """
  56. Class for reading data in from a file set recorded by the Blackrock
  57. (Cerebus) recording system.
  58. Upon initialization, the class is linked to the available set of Blackrock
  59. files. Data can be read as a neo Block or neo Segment object using the
  60. read_block or read_segment function, respectively.
  61. Note: This routine will handle files according to specification 2.1, 2.2,
  62. and 2.3. Recording pauses that may occur in file specifications 2.2 and
  63. 2.3 are automatically extracted and the data set is split into different
  64. segments.
  65. Inherits from:
  66. neo.io.BaseIO
  67. The Blackrock data format consists not of a single file, but a set of
  68. different files. This constructor associates itself with a set of files
  69. that constitute a common data set. By default, all files belonging to
  70. the file set have the same base name, but different extensions.
  71. However, by using the override parameters, individual filenames can
  72. be set.
  73. Args:
  74. filename (string):
  75. File name (without extension) of the set of Blackrock files to
  76. associate with. Any .nsX or .nev, .sif, or .ccf extensions are
  77. ignored when parsing this parameter.
  78. nsx_override (string):
  79. File name of the .nsX files (without extension). If None,
  80. filename is used.
  81. Default: None.
  82. nev_override (string):
  83. File name of the .nev file (without extension). If None,
  84. filename is used.
  85. Default: None.
  86. sif_override (string):
  87. File name of the .sif file (without extension). If None,
  88. filename is used.
  89. Default: None.
  90. ccf_override (string):
  91. File name of the .ccf file (without extension). If None,
  92. filename is used.
  93. Default: None.
  94. verbose (boolean):
  95. If True, the class will output additional diagnostic
  96. information on stdout.
  97. Default: False
  98. Returns:
  99. -
  100. Examples:
  101. >>> a = BlackrockIO('myfile')
  102. Loads a set of file consisting of files myfile.ns1, ...,
  103. myfile.ns6, and myfile.nev
  104. >>> b = BlackrockIO('myfile', nev_override='sorted')
  105. Loads the analog data from the set of files myfile.ns1, ...,
  106. myfile.ns6, but reads spike/event data from sorted.nev
  107. """
  108. # Class variables demonstrating capabilities of this IO
  109. is_readable = True
  110. is_writable = False
  111. # This IO can only manipulate continuous data, spikes, and events
  112. supported_objects = [
  113. Block, Segment, Event, AnalogSignal, SpikeTrain, Unit, ChannelIndex]
  114. readable_objects = [Block, Segment]
  115. writeable_objects = []
  116. has_header = False
  117. is_streameable = False
  118. read_params = {
  119. Block: [
  120. ('nsx_to_load', {
  121. 'value': 'none',
  122. 'label': "List of nsx files (ids, int) to read."}),
  123. ('n_starts', {
  124. 'value': None,
  125. 'label': "List of n_start points (Quantity) to create "
  126. "segments from."}),
  127. ('n_stops', {
  128. 'value': None,
  129. 'label': "List of n_stop points (Quantity) to create "
  130. "segments from."}),
  131. ('channels', {
  132. 'value': 'none',
  133. 'label': "List of channels (ids, int) to load data from."}),
  134. ('units', {
  135. 'value': 'none',
  136. 'label': "Dictionary for units (values, list of int) to load "
  137. "for each channel (key, int)."}),
  138. ('load_waveforms', {
  139. 'value': False,
  140. 'label': "States if waveforms should be loaded and attached "
  141. "to spiketrain"}),
  142. ('load_events', {
  143. 'value': False,
  144. 'label': "States if events should be loaded."})],
  145. Segment: [
  146. ('n_start', {
  147. 'label': "Start time point (Quantity) for segment"}),
  148. ('n_stop', {
  149. 'label': "Stop time point (Quantity) for segment"}),
  150. ('nsx_to_load', {
  151. 'value': 'none',
  152. 'label': "List of nsx files (ids, int) to read."}),
  153. ('channels', {
  154. 'value': 'none',
  155. 'label': "List of channels (ids, int) to load data from."}),
  156. ('units', {
  157. 'value': 'none',
  158. 'label': "Dictionary for units (values, list of int) to load "
  159. "for each channel (key, int)."}),
  160. ('load_waveforms', {
  161. 'value': False,
  162. 'label': "States if waveforms should be loaded and attached "
  163. "to spiketrain"}),
  164. ('load_events', {
  165. 'value': False,
  166. 'label': "States if events should be loaded."})]}
  167. write_params = {}
  168. name = 'Blackrock IO'
  169. description = "This IO reads .nev/.nsX file of the Blackrock " + \
  170. "(Cerebus) recordings system."
  171. # The possible file extensions of the Cerebus system and their content:
  172. # ns1: contains analog data; sampled at 500 Hz (+ digital filters)
  173. # ns2: contains analog data; sampled at 1000 Hz (+ digital filters)
  174. # ns3: contains analog data; sampled at 2000 Hz (+ digital filters)
  175. # ns4: contains analog data; sampled at 10000 Hz (+ digital filters)
  176. # ns5: contains analog data; sampled at 30000 Hz (+ digital filters)
  177. # ns6: contains analog data; sampled at 30000 Hz (no digital filters)
  178. # nev: contains spike- and event-data; sampled at 30000 Hz
  179. # sif: contains institution and patient info (XML)
  180. # ccf: contains Cerebus configurations
  181. extensions = ['ns' + str(_) for _ in range(1, 7)]
  182. extensions.extend(['nev', 'sif', 'ccf'])
  183. mode = 'file'
  184. def __init__(self, filename, nsx_override=None, nev_override=None,
  185. sif_override=None, ccf_override=None, verbose=False):
  186. """
  187. Initialize the BlackrockIO class.
  188. """
  189. warnings.warn('{} is deprecated and will be removed in neo version 0.10. Use {} instead.'
  190. ''.format(self.__class__, neo.io.blackrockio.BlackrockIO), FutureWarning)
  191. BaseIO.__init__(self)
  192. # Used to avoid unnecessary repetition of verbose messages
  193. self.__verbose_messages = []
  194. # remove extension from base _filenames
  195. for ext in self.extensions:
  196. self.filename = re.sub(
  197. os.path.extsep + ext + '$', '', filename)
  198. # remove extensions from overrides
  199. self._filenames = {}
  200. if nsx_override:
  201. self._filenames['nsx'] = re.sub(
  202. os.path.extsep + r'ns[1,2,3,4,5,6]$', '', nsx_override)
  203. else:
  204. self._filenames['nsx'] = self.filename
  205. if nev_override:
  206. self._filenames['nev'] = re.sub(
  207. os.path.extsep + r'nev$', '', nev_override)
  208. else:
  209. self._filenames['nev'] = self.filename
  210. if sif_override:
  211. self._filenames['sif'] = re.sub(
  212. os.path.extsep + r'sif$', '', sif_override)
  213. else:
  214. self._filenames['sif'] = self.filename
  215. if ccf_override:
  216. self._filenames['ccf'] = re.sub(
  217. os.path.extsep + r'ccf$', '', ccf_override)
  218. else:
  219. self._filenames['ccf'] = self.filename
  220. # check which files are available
  221. self._avail_files = dict.fromkeys(self.extensions, False)
  222. self._avail_nsx = []
  223. for ext in self.extensions:
  224. if ext.startswith('ns'):
  225. file2check = ''.join(
  226. [self._filenames['nsx'], os.path.extsep, ext])
  227. else:
  228. file2check = ''.join(
  229. [self._filenames[ext], os.path.extsep, ext])
  230. if os.path.exists(file2check):
  231. self._print_verbose("Found " + file2check + ".")
  232. self._avail_files[ext] = True
  233. if ext.startswith('ns'):
  234. self._avail_nsx.append(int(ext[-1]))
  235. # check if there are any files present
  236. if not any(list(self._avail_files.values())):
  237. raise IOError(
  238. 'No Blackrock files present at {}'.format(filename))
  239. # check if manually specified files were found
  240. exts = ['nsx', 'nev', 'sif', 'ccf']
  241. ext_overrides = [nsx_override, nev_override, sif_override, ccf_override]
  242. for ext, ext_override in zip(exts, ext_overrides):
  243. if ext_override is not None and self._avail_files[ext] is False:
  244. raise ValueError('Specified {} file {} could not be '
  245. 'found.'.format(ext, ext_override))
  246. # These dictionaries are used internally to map the file specification
  247. # revision of the nsx and nev files to one of the reading routines
  248. self.__nsx_header_reader = {
  249. '2.1': self.__read_nsx_header_variant_a,
  250. '2.2': self.__read_nsx_header_variant_b,
  251. '2.3': self.__read_nsx_header_variant_b}
  252. self.__nsx_dataheader_reader = {
  253. '2.1': self.__read_nsx_dataheader_variant_a,
  254. '2.2': self.__read_nsx_dataheader_variant_b,
  255. '2.3': self.__read_nsx_dataheader_variant_b}
  256. self.__nsx_data_reader = {
  257. '2.1': self.__read_nsx_data_variant_a,
  258. '2.2': self.__read_nsx_data_variant_b,
  259. '2.3': self.__read_nsx_data_variant_b}
  260. self.__nev_header_reader = {
  261. '2.1': self.__read_nev_header_variant_a,
  262. '2.2': self.__read_nev_header_variant_b,
  263. '2.3': self.__read_nev_header_variant_c}
  264. self.__nev_data_reader = {
  265. '2.1': self.__read_nev_data_variant_a,
  266. '2.2': self.__read_nev_data_variant_a,
  267. '2.3': self.__read_nev_data_variant_b}
  268. self.__nsx_params = {
  269. '2.1': self.__get_nsx_param_variant_a,
  270. '2.2': self.__get_nsx_param_variant_b,
  271. '2.3': self.__get_nsx_param_variant_b}
  272. self.__nsx_databl_param = {
  273. '2.1': self.__get_nsx_databl_param_variant_a,
  274. '2.2': self.__get_nsx_databl_param_variant_b,
  275. '2.3': self.__get_nsx_databl_param_variant_b}
  276. self.__waveform_size = {
  277. '2.1': self.__get_waveform_size_variant_a,
  278. '2.2': self.__get_waveform_size_variant_a,
  279. '2.3': self.__get_waveform_size_variant_b}
  280. self.__channel_labels = {
  281. '2.1': self.__get_channel_labels_variant_a,
  282. '2.2': self.__get_channel_labels_variant_b,
  283. '2.3': self.__get_channel_labels_variant_b}
  284. self.__nsx_rec_times = {
  285. '2.1': self.__get_nsx_rec_times_variant_a,
  286. '2.2': self.__get_nsx_rec_times_variant_b,
  287. '2.3': self.__get_nsx_rec_times_variant_b}
  288. self.__nonneural_evtypes = {
  289. '2.1': self.__get_nonneural_evtypes_variant_a,
  290. '2.2': self.__get_nonneural_evtypes_variant_a,
  291. '2.3': self.__get_nonneural_evtypes_variant_b}
  292. # Load file spec and headers of available nev file
  293. if self._avail_files['nev']:
  294. # read nev file specification
  295. self.__nev_spec = self.__extract_nev_file_spec()
  296. self._print_verbose('Specification Version ' + self.__nev_spec)
  297. # read nev headers
  298. self.__nev_basic_header, self.__nev_ext_header = \
  299. self.__nev_header_reader[self.__nev_spec]()
  300. # Load file spec and headers of available nsx files
  301. self.__nsx_spec = {}
  302. self.__nsx_basic_header = {}
  303. self.__nsx_ext_header = {}
  304. self.__nsx_data_header = {}
  305. for nsx_nb in self._avail_nsx:
  306. # read nsx file specification
  307. self.__nsx_spec[nsx_nb] = self.__extract_nsx_file_spec(nsx_nb)
  308. # read nsx headers
  309. self.__nsx_basic_header[nsx_nb], self.__nsx_ext_header[nsx_nb] = \
  310. self.__nsx_header_reader[self.__nsx_spec[nsx_nb]](nsx_nb)
  311. # Read nsx data header(s) for nsx
  312. self.__nsx_data_header[nsx_nb] = self.__nsx_dataheader_reader[
  313. self.__nsx_spec[nsx_nb]](nsx_nb)
  314. def _print_verbose(self, text):
  315. """
  316. Print a verbose diagnostic message (string).
  317. """
  318. if self.__verbose_messages:
  319. if text not in self.__verbose_messages:
  320. self.__verbose_messages.append(text)
  321. print(str(self.__class__.__name__) + ': ' + text)
  322. def __extract_nsx_file_spec(self, nsx_nb):
  323. """
  324. Extract file specification from an .nsx file.
  325. """
  326. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  327. # Header structure of files specification 2.2 and higher. For files 2.1
  328. # and lower, the entries ver_major and ver_minor are not supported.
  329. dt0 = [
  330. ('file_id', 'S8'),
  331. ('ver_major', 'uint8'),
  332. ('ver_minor', 'uint8')]
  333. nsx_file_id = np.fromfile(filename, count=1, dtype=dt0)[0]
  334. if nsx_file_id['file_id'].decode() == 'NEURALSG':
  335. spec = '2.1'
  336. elif nsx_file_id['file_id'].decode() == 'NEURALCD':
  337. spec = '{}.{}'.format(
  338. nsx_file_id['ver_major'], nsx_file_id['ver_minor'])
  339. else:
  340. raise IOError('Unsupported NSX file type.')
  341. return spec
  342. def __extract_nev_file_spec(self):
  343. """
  344. Extract file specification from an .nev file
  345. """
  346. filename = '.'.join([self._filenames['nev'], 'nev'])
  347. # Header structure of files specification 2.2 and higher. For files 2.1
  348. # and lower, the entries ver_major and ver_minor are not supported.
  349. dt0 = [
  350. ('file_id', 'S8'),
  351. ('ver_major', 'uint8'),
  352. ('ver_minor', 'uint8')]
  353. nev_file_id = np.fromfile(filename, count=1, dtype=dt0)[0]
  354. if nev_file_id['file_id'].decode() == 'NEURALEV':
  355. spec = '{}.{}'.format(
  356. nev_file_id['ver_major'], nev_file_id['ver_minor'])
  357. else:
  358. raise IOError('NEV file type {} is not supported'.format(
  359. nev_file_id['file_id']))
  360. return spec
  361. def __read_nsx_header_variant_a(self, nsx_nb):
  362. """
  363. Extract nsx header information from a 2.1 .nsx file
  364. """
  365. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  366. # basic header (file_id: NEURALCD)
  367. dt0 = [
  368. ('file_id', 'S8'),
  369. # label of sampling groun (e.g. "1kS/s" or "LFP Low")
  370. ('label', 'S16'),
  371. # number of 1/30000 seconds between data points
  372. # (e.g., if sampling rate "1 kS/s", period equals "30")
  373. ('period', 'uint32'),
  374. ('channel_count', 'uint32')]
  375. nsx_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  376. # "extended" header (last field of file_id: NEURALCD)
  377. # (to facilitate compatibility with higher file specs)
  378. offset_dt0 = np.dtype(dt0).itemsize
  379. shape = nsx_basic_header['channel_count']
  380. # originally called channel_id in Blackrock user manual
  381. # (to facilitate compatibility with higher file specs)
  382. dt1 = [('electrode_id', 'uint32')]
  383. nsx_ext_header = np.memmap(
  384. filename, mode='r', shape=shape, offset=offset_dt0, dtype=dt1)
  385. return nsx_basic_header, nsx_ext_header
  386. def __read_nsx_header_variant_b(self, nsx_nb):
  387. """
  388. Extract nsx header information from a 2.2 or 2.3 .nsx file
  389. """
  390. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  391. # basic header (file_id: NEURALCD)
  392. dt0 = [
  393. ('file_id', 'S8'),
  394. # file specification split into major and minor version number
  395. ('ver_major', 'uint8'),
  396. ('ver_minor', 'uint8'),
  397. # bytes of basic & extended header
  398. ('bytes_in_headers', 'uint32'),
  399. # label of the sampling group (e.g., "1 kS/s" or "LFP low")
  400. ('label', 'S16'),
  401. ('comment', 'S256'),
  402. ('period', 'uint32'),
  403. ('timestamp_resolution', 'uint32'),
  404. # time origin: 2byte uint16 values for ...
  405. ('year', 'uint16'),
  406. ('month', 'uint16'),
  407. ('weekday', 'uint16'),
  408. ('day', 'uint16'),
  409. ('hour', 'uint16'),
  410. ('minute', 'uint16'),
  411. ('second', 'uint16'),
  412. ('millisecond', 'uint16'),
  413. # number of channel_count match number of extended headers
  414. ('channel_count', 'uint32')]
  415. nsx_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  416. # extended header (type: CC)
  417. offset_dt0 = np.dtype(dt0).itemsize
  418. shape = nsx_basic_header['channel_count']
  419. dt1 = [
  420. ('type', 'S2'),
  421. ('electrode_id', 'uint16'),
  422. ('electrode_label', 'S16'),
  423. # used front-end amplifier bank (e.g., A, B, C, D)
  424. ('physical_connector', 'uint8'),
  425. # used connector pin (e.g., 1-37 on bank A, B, C or D)
  426. ('connector_pin', 'uint8'),
  427. # digital and analog value ranges of the signal
  428. ('min_digital_val', 'int16'),
  429. ('max_digital_val', 'int16'),
  430. ('min_analog_val', 'int16'),
  431. ('max_analog_val', 'int16'),
  432. # units of the analog range values ("mV" or "uV")
  433. ('units', 'S16'),
  434. # filter settings used to create nsx from source signal
  435. ('hi_freq_corner', 'uint32'),
  436. ('hi_freq_order', 'uint32'),
  437. ('hi_freq_type', 'uint16'), # 0=None, 1=Butterworth
  438. ('lo_freq_corner', 'uint32'),
  439. ('lo_freq_order', 'uint32'),
  440. ('lo_freq_type', 'uint16')] # 0=None, 1=Butterworth
  441. nsx_ext_header = np.memmap(
  442. filename, mode='r', shape=shape, offset=offset_dt0, dtype=dt1)
  443. return nsx_basic_header, nsx_ext_header
  444. def __read_nsx_dataheader(self, nsx_nb, offset):
  445. """
  446. Reads data header following the given offset of an nsx file.
  447. """
  448. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  449. # dtypes data header
  450. dt2 = [
  451. ('header', 'uint8'),
  452. ('timestamp', 'uint32'),
  453. ('nb_data_points', 'uint32')]
  454. return np.memmap(
  455. filename, mode='r', dtype=dt2, shape=1, offset=offset)[0]
  456. def __read_nsx_dataheader_variant_a(
  457. self, nsx_nb, filesize=None, offset=None):
  458. """
  459. Reads None for the nsx data header of file spec 2.1. Introduced to
  460. facilitate compatibility with higher file spec.
  461. """
  462. return None
  463. def __read_nsx_dataheader_variant_b(
  464. self, nsx_nb, filesize=None, offset=None, ):
  465. """
  466. Reads the nsx data header for each data block following the offset of
  467. file spec 2.2 and 2.3.
  468. """
  469. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  470. filesize = self.__get_file_size(filename)
  471. data_header = {}
  472. index = 0
  473. if offset is None:
  474. offset = self.__nsx_basic_header[nsx_nb]['bytes_in_headers']
  475. while offset < filesize:
  476. index += 1
  477. dh = self.__read_nsx_dataheader(nsx_nb, offset)
  478. data_header[index] = {
  479. 'header': dh['header'],
  480. 'timestamp': dh['timestamp'],
  481. 'nb_data_points': dh['nb_data_points'],
  482. 'offset_to_data_block': offset + dh.dtype.itemsize}
  483. # data size = number of data points * (2bytes * number of channels)
  484. # use of `int` avoids overflow problem
  485. data_size = int(dh['nb_data_points']) * \
  486. int(self.__nsx_basic_header[nsx_nb]['channel_count']) * 2
  487. # define new offset (to possible next data block)
  488. offset = data_header[index]['offset_to_data_block'] + data_size
  489. return data_header
  490. def __read_nsx_data_variant_a(self, nsx_nb):
  491. """
  492. Extract nsx data from a 2.1 .nsx file
  493. """
  494. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  495. # get shape of data
  496. shape = (
  497. self.__nsx_databl_param['2.1']('nb_data_points', nsx_nb),
  498. self.__nsx_basic_header[nsx_nb]['channel_count'])
  499. offset = self.__nsx_params['2.1']('bytes_in_headers', nsx_nb)
  500. # read nsx data
  501. # store as dict for compatibility with higher file specs
  502. data = {1: np.memmap(
  503. filename, mode='r', dtype='int16', shape=shape, offset=offset)}
  504. return data
  505. def __read_nsx_data_variant_b(self, nsx_nb):
  506. """
  507. Extract nsx data (blocks) from a 2.2 or 2.3 .nsx file. Blocks can arise
  508. if the recording was paused by the user.
  509. """
  510. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  511. data = {}
  512. for data_bl in self.__nsx_data_header[nsx_nb].keys():
  513. # get shape and offset of data
  514. shape = (
  515. self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points'],
  516. self.__nsx_basic_header[nsx_nb]['channel_count'])
  517. offset = \
  518. self.__nsx_data_header[nsx_nb][data_bl]['offset_to_data_block']
  519. # read data
  520. data[data_bl] = np.memmap(
  521. filename, mode='r', dtype='int16', shape=shape, offset=offset)
  522. return data
  523. def __read_nev_header(self, ext_header_variants):
  524. """
  525. Extract nev header information from a 2.1 .nsx file
  526. """
  527. filename = '.'.join([self._filenames['nev'], 'nev'])
  528. # basic header
  529. dt0 = [
  530. # Set to "NEURALEV"
  531. ('file_type_id', 'S8'),
  532. ('ver_major', 'uint8'),
  533. ('ver_minor', 'uint8'),
  534. # Flags
  535. ('additionnal_flags', 'uint16'),
  536. # File index of first data sample
  537. ('bytes_in_headers', 'uint32'),
  538. # Number of bytes per data packet (sample)
  539. ('bytes_in_data_packets', 'uint32'),
  540. # Time resolution of time stamps in Hz
  541. ('timestamp_resolution', 'uint32'),
  542. # Sampling frequency of waveforms in Hz
  543. ('sample_resolution', 'uint32'),
  544. ('year', 'uint16'),
  545. ('month', 'uint16'),
  546. ('weekday', 'uint16'),
  547. ('day', 'uint16'),
  548. ('hour', 'uint16'),
  549. ('minute', 'uint16'),
  550. ('second', 'uint16'),
  551. ('millisecond', 'uint16'),
  552. ('application_to_create_file', 'S32'),
  553. ('comment_field', 'S256'),
  554. # Number of extended headers
  555. ('nb_ext_headers', 'uint32')]
  556. nev_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  557. # extended header
  558. # this consist in N block with code 8bytes + 24 data bytes
  559. # the data bytes depend on the code and need to be converted
  560. # cafilename_nsx, segse by case
  561. shape = nev_basic_header['nb_ext_headers']
  562. offset_dt0 = np.dtype(dt0).itemsize
  563. # This is the common structure of the beginning of extended headers
  564. dt1 = [
  565. ('packet_id', 'S8'),
  566. ('info_field', 'S24')]
  567. raw_ext_header = np.memmap(
  568. filename, mode='r', offset=offset_dt0, dtype=dt1, shape=shape)
  569. nev_ext_header = {}
  570. for packet_id in ext_header_variants.keys():
  571. mask = (raw_ext_header['packet_id'] == packet_id)
  572. dt2 = self.__nev_ext_header_types()[packet_id][
  573. ext_header_variants[packet_id]]
  574. nev_ext_header[packet_id] = raw_ext_header.view(dt2)[mask]
  575. return nev_basic_header, nev_ext_header
  576. def __read_nev_header_variant_a(self):
  577. """
  578. Extract nev header information from a 2.1 .nev file
  579. """
  580. ext_header_variants = {
  581. b'NEUEVWAV': 'a',
  582. b'ARRAYNME': 'a',
  583. b'ECOMMENT': 'a',
  584. b'CCOMMENT': 'a',
  585. b'MAPFILE': 'a',
  586. b'NSASEXEV': 'a'}
  587. return self.__read_nev_header(ext_header_variants)
  588. def __read_nev_header_variant_b(self):
  589. """
  590. Extract nev header information from a 2.2 .nev file
  591. """
  592. ext_header_variants = {
  593. b'NEUEVWAV': 'b',
  594. b'ARRAYNME': 'a',
  595. b'ECOMMENT': 'a',
  596. b'CCOMMENT': 'a',
  597. b'MAPFILE': 'a',
  598. b'NEUEVLBL': 'a',
  599. b'NEUEVFLT': 'a',
  600. b'DIGLABEL': 'a',
  601. b'NSASEXEV': 'a'}
  602. return self.__read_nev_header(ext_header_variants)
  603. def __read_nev_header_variant_c(self):
  604. """
  605. Extract nev header information from a 2.3 .nev file
  606. """
  607. ext_header_variants = {
  608. b'NEUEVWAV': 'b',
  609. b'ARRAYNME': 'a',
  610. b'ECOMMENT': 'a',
  611. b'CCOMMENT': 'a',
  612. b'MAPFILE': 'a',
  613. b'NEUEVLBL': 'a',
  614. b'NEUEVFLT': 'a',
  615. b'DIGLABEL': 'a',
  616. b'VIDEOSYN': 'a',
  617. b'TRACKOBJ': 'a'}
  618. return self.__read_nev_header(ext_header_variants)
  619. def __read_nev_data(self, nev_data_masks, nev_data_types):
  620. """
  621. Extract nev data from a 2.1 or 2.2 .nev file
  622. """
  623. filename = '.'.join([self._filenames['nev'], 'nev'])
  624. data_size = self.__nev_basic_header['bytes_in_data_packets']
  625. header_size = self.__nev_basic_header['bytes_in_headers']
  626. # read all raw data packets and markers
  627. dt0 = [
  628. ('timestamp', 'uint32'),
  629. ('packet_id', 'uint16'),
  630. ('value', 'S{}'.format(data_size - 6))]
  631. raw_data = np.memmap(filename, mode='r', offset=header_size, dtype=dt0)
  632. masks = self.__nev_data_masks(raw_data['packet_id'])
  633. types = self.__nev_data_types(data_size)
  634. data = {}
  635. for k, v in nev_data_masks.items():
  636. data[k] = raw_data.view(types[k][nev_data_types[k]])[masks[k][v]]
  637. return data
  638. def __read_nev_data_variant_a(self):
  639. """
  640. Extract nev data from a 2.1 & 2.2 .nev file
  641. """
  642. nev_data_masks = {
  643. 'NonNeural': 'a',
  644. 'Spikes': 'a'}
  645. nev_data_types = {
  646. 'NonNeural': 'a',
  647. 'Spikes': 'a'}
  648. return self.__read_nev_data(nev_data_masks, nev_data_types)
  649. def __read_nev_data_variant_b(self):
  650. """
  651. Extract nev data from a 2.3 .nev file
  652. """
  653. nev_data_masks = {
  654. 'NonNeural': 'a',
  655. 'Spikes': 'b',
  656. 'Comments': 'a',
  657. 'VideoSync': 'a',
  658. 'TrackingEvents': 'a',
  659. 'ButtonTrigger': 'a',
  660. 'ConfigEvent': 'a'}
  661. nev_data_types = {
  662. 'NonNeural': 'b',
  663. 'Spikes': 'a',
  664. 'Comments': 'a',
  665. 'VideoSync': 'a',
  666. 'TrackingEvents': 'a',
  667. 'ButtonTrigger': 'a',
  668. 'ConfigEvent': 'a'}
  669. return self.__read_nev_data(nev_data_masks, nev_data_types)
  670. def __nev_ext_header_types(self):
  671. """
  672. Defines extended header types for different .nev file specifications.
  673. """
  674. nev_ext_header_types = {
  675. b'NEUEVWAV': {
  676. # Version>=2.1
  677. 'a': [
  678. ('packet_id', 'S8'),
  679. ('electrode_id', 'uint16'),
  680. ('physical_connector', 'uint8'),
  681. ('connector_pin', 'uint8'),
  682. ('digitization_factor', 'uint16'),
  683. ('energy_threshold', 'uint16'),
  684. ('hi_threshold', 'int16'),
  685. ('lo_threshold', 'int16'),
  686. ('nb_sorted_units', 'uint8'),
  687. # number of bytes per waveform sample
  688. ('bytes_per_waveform', 'uint8'),
  689. ('unused', 'S10')],
  690. # Version>=2.3
  691. 'b': [
  692. ('packet_id', 'S8'),
  693. ('electrode_id', 'uint16'),
  694. ('physical_connector', 'uint8'),
  695. ('connector_pin', 'uint8'),
  696. ('digitization_factor', 'uint16'),
  697. ('energy_threshold', 'uint16'),
  698. ('hi_threshold', 'int16'),
  699. ('lo_threshold', 'int16'),
  700. ('nb_sorted_units', 'uint8'),
  701. # number of bytes per waveform sample
  702. ('bytes_per_waveform', 'uint8'),
  703. # number of samples for each waveform
  704. ('spike_width', 'uint16'),
  705. ('unused', 'S8')]},
  706. b'ARRAYNME': {
  707. 'a': [
  708. ('packet_id', 'S8'),
  709. ('electrode_array_name', 'S24')]},
  710. b'ECOMMENT': {
  711. 'a': [
  712. ('packet_id', 'S8'),
  713. ('extra_comment', 'S24')]},
  714. b'CCOMMENT': {
  715. 'a': [
  716. ('packet_id', 'S8'),
  717. ('continued_comment', 'S24')]},
  718. b'MAPFILE': {
  719. 'a': [
  720. ('packet_id', 'S8'),
  721. ('mapFile', 'S24')]},
  722. b'NEUEVLBL': {
  723. 'a': [
  724. ('packet_id', 'S8'),
  725. ('electrode_id', 'uint16'),
  726. # label of this electrode
  727. ('label', 'S16'),
  728. ('unused', 'S6')]},
  729. b'NEUEVFLT': {
  730. 'a': [
  731. ('packet_id', 'S8'),
  732. ('electrode_id', 'uint16'),
  733. ('hi_freq_corner', 'uint32'),
  734. ('hi_freq_order', 'uint32'),
  735. # 0=None 1=Butterworth
  736. ('hi_freq_type', 'uint16'),
  737. ('lo_freq_corner', 'uint32'),
  738. ('lo_freq_order', 'uint32'),
  739. # 0=None 1=Butterworth
  740. ('lo_freq_type', 'uint16'),
  741. ('unused', 'S2')]},
  742. b'DIGLABEL': {
  743. 'a': [
  744. ('packet_id', 'S8'),
  745. # Read name of digital
  746. ('label', 'S16'),
  747. # 0=serial, 1=parallel
  748. ('mode', 'uint8'),
  749. ('unused', 'S7')]},
  750. b'NSASEXEV': {
  751. 'a': [
  752. ('packet_id', 'S8'),
  753. # Read frequency of periodic packet generation
  754. ('frequency', 'uint16'),
  755. # Read if digital input triggers events
  756. ('digital_input_config', 'uint8'),
  757. # Read if analog input triggers events
  758. ('analog_channel_1_config', 'uint8'),
  759. ('analog_channel_1_edge_detec_val', 'uint16'),
  760. ('analog_channel_2_config', 'uint8'),
  761. ('analog_channel_2_edge_detec_val', 'uint16'),
  762. ('analog_channel_3_config', 'uint8'),
  763. ('analog_channel_3_edge_detec_val', 'uint16'),
  764. ('analog_channel_4_config', 'uint8'),
  765. ('analog_channel_4_edge_detec_val', 'uint16'),
  766. ('analog_channel_5_config', 'uint8'),
  767. ('analog_channel_5_edge_detec_val', 'uint16'),
  768. ('unused', 'S6')]},
  769. b'VIDEOSYN': {
  770. 'a': [
  771. ('packet_id', 'S8'),
  772. ('video_source_id', 'uint16'),
  773. ('video_source', 'S16'),
  774. ('frame_rate', 'float32'),
  775. ('unused', 'S2')]},
  776. b'TRACKOBJ': {
  777. 'a': [
  778. ('packet_id', 'S8'),
  779. ('trackable_type', 'uint16'),
  780. ('trackable_id', 'uint16'),
  781. ('point_count', 'uint16'),
  782. ('video_source', 'S16'),
  783. ('unused', 'S2')]}}
  784. return nev_ext_header_types
  785. def __nev_data_masks(self, packet_ids):
  786. """
  787. Defines data masks for different .nev file specifications depending on
  788. the given packet identifiers.
  789. """
  790. __nev_data_masks = {
  791. 'NonNeural': {
  792. 'a': (packet_ids == 0)},
  793. 'Spikes': {
  794. # Version 2.1 & 2.2
  795. 'a': (0 < packet_ids) & (packet_ids <= 255),
  796. # Version>=2.3
  797. 'b': (0 < packet_ids) & (packet_ids <= 2048)},
  798. 'Comments': {
  799. 'a': (packet_ids == 0xFFFF)},
  800. 'VideoSync': {
  801. 'a': (packet_ids == 0xFFFE)},
  802. 'TrackingEvents': {
  803. 'a': (packet_ids == 0xFFFD)},
  804. 'ButtonTrigger': {
  805. 'a': (packet_ids == 0xFFFC)},
  806. 'ConfigEvent': {
  807. 'a': (packet_ids == 0xFFFB)}}
  808. return __nev_data_masks
  809. def __nev_data_types(self, data_size):
  810. """
  811. Defines data types for different .nev file specifications depending on
  812. the given packet identifiers.
  813. """
  814. __nev_data_types = {
  815. 'NonNeural': {
  816. # Version 2.1 & 2.2
  817. 'a': [
  818. ('timestamp', 'uint32'),
  819. ('packet_id', 'uint16'),
  820. ('packet_insertion_reason', 'uint8'),
  821. ('reserved', 'uint8'),
  822. ('digital_input', 'uint16'),
  823. ('analog_input_channel_1', 'int16'),
  824. ('analog_input_channel_2', 'int16'),
  825. ('analog_input_channel_3', 'int16'),
  826. ('analog_input_channel_4', 'int16'),
  827. ('analog_input_channel_5', 'int16'),
  828. ('unused', 'S{}'.format(data_size - 20))],
  829. # Version>=2.3
  830. 'b': [
  831. ('timestamp', 'uint32'),
  832. ('packet_id', 'uint16'),
  833. ('packet_insertion_reason', 'uint8'),
  834. ('reserved', 'uint8'),
  835. ('digital_input', 'uint16'),
  836. ('unused', 'S{}'.format(data_size - 10))]},
  837. 'Spikes': {
  838. 'a': [
  839. ('timestamp', 'uint32'),
  840. ('packet_id', 'uint16'),
  841. ('unit_class_nb', 'uint8'),
  842. ('reserved', 'uint8'),
  843. ('waveform', 'S{}'.format(data_size - 8))]},
  844. 'Comments': {
  845. 'a': [
  846. ('timestamp', 'uint32'),
  847. ('packet_id', 'uint16'),
  848. ('char_set', 'uint8'),
  849. ('flag', 'uint8'),
  850. ('data', 'uint32'),
  851. ('comment', 'S{}'.format(data_size - 12))]},
  852. 'VideoSync': {
  853. 'a': [
  854. ('timestamp', 'uint32'),
  855. ('packet_id', 'uint16'),
  856. ('video_file_nb', 'uint16'),
  857. ('video_frame_nb', 'uint32'),
  858. ('video_elapsed_time', 'uint32'),
  859. ('video_source_id', 'uint32'),
  860. ('unused', 'int8', (data_size - 20,))]},
  861. 'TrackingEvents': {
  862. 'a': [
  863. ('timestamp', 'uint32'),
  864. ('packet_id', 'uint16'),
  865. ('parent_id', 'uint16'),
  866. ('node_id', 'uint16'),
  867. ('node_count', 'uint16'),
  868. ('point_count', 'uint16'),
  869. ('tracking_points', 'uint16', ((data_size - 14) // 2,))]},
  870. 'ButtonTrigger': {
  871. 'a': [
  872. ('timestamp', 'uint32'),
  873. ('packet_id', 'uint16'),
  874. ('trigger_type', 'uint16'),
  875. ('unused', 'int8', (data_size - 8,))]},
  876. 'ConfigEvent': {
  877. 'a': [
  878. ('timestamp', 'uint32'),
  879. ('packet_id', 'uint16'),
  880. ('config_change_type', 'uint16'),
  881. ('config_changed', 'S{}'.format(data_size - 8))]}}
  882. return __nev_data_types
  883. def __nev_params(self, param_name):
  884. """
  885. Returns wanted nev parameter.
  886. """
  887. nev_parameters = {
  888. 'bytes_in_data_packets':
  889. self.__nev_basic_header['bytes_in_data_packets'],
  890. 'rec_datetime': datetime.datetime(
  891. year=self.__nev_basic_header['year'],
  892. month=self.__nev_basic_header['month'],
  893. day=self.__nev_basic_header['day'],
  894. hour=self.__nev_basic_header['hour'],
  895. minute=self.__nev_basic_header['minute'],
  896. second=self.__nev_basic_header['second'],
  897. microsecond=self.__nev_basic_header['millisecond']),
  898. 'max_res': self.__nev_basic_header['timestamp_resolution'],
  899. 'channel_ids': self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  900. 'channel_labels': self.__channel_labels[self.__nev_spec](),
  901. 'event_unit': pq.CompoundUnit("1.0/{} * s".format(
  902. self.__nev_basic_header['timestamp_resolution'])),
  903. 'nb_units': dict(zip(
  904. self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  905. self.__nev_ext_header[b'NEUEVWAV']['nb_sorted_units'])),
  906. 'digitization_factor': dict(zip(
  907. self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  908. self.__nev_ext_header[b'NEUEVWAV']['digitization_factor'])),
  909. 'data_size': self.__nev_basic_header['bytes_in_data_packets'],
  910. 'waveform_size': self.__waveform_size[self.__nev_spec](),
  911. 'waveform_dtypes': self.__get_waveforms_dtype(),
  912. 'waveform_sampling_rate':
  913. self.__nev_basic_header['sample_resolution'] * pq.Hz,
  914. 'waveform_time_unit': pq.CompoundUnit("1.0/{} * s".format(
  915. self.__nev_basic_header['sample_resolution'])),
  916. 'waveform_unit': pq.uV}
  917. return nev_parameters[param_name]
  918. def __get_file_size(self, filename):
  919. """
  920. Returns the file size in bytes for the given file.
  921. """
  922. filebuf = open(filename, 'rb')
  923. filebuf.seek(0, os.SEEK_END)
  924. file_size = filebuf.tell()
  925. filebuf.close()
  926. return file_size
  927. def __get_min_time(self):
  928. """
  929. Returns the smallest time that can be determined from the recording for
  930. use as the lower bound n in an interval [n,m).
  931. """
  932. tp = []
  933. if self._avail_files['nev']:
  934. tp.extend(self.__get_nev_rec_times()[0])
  935. for nsx_i in self._avail_nsx:
  936. tp.extend(self.__nsx_rec_times[self.__nsx_spec[nsx_i]](nsx_i)[0])
  937. return min(tp)
  938. def __get_max_time(self):
  939. """
  940. Returns the largest time that can be determined from the recording for
  941. use as the upper bound m in an interval [n,m).
  942. """
  943. tp = []
  944. if self._avail_files['nev']:
  945. tp.extend(self.__get_nev_rec_times()[1])
  946. for nsx_i in self._avail_nsx:
  947. tp.extend(self.__nsx_rec_times[self.__nsx_spec[nsx_i]](nsx_i)[1])
  948. return max(tp)
  949. def __get_nev_rec_times(self):
  950. """
  951. Extracts minimum and maximum time points from a nev file.
  952. """
  953. filename = '.'.join([self._filenames['nev'], 'nev'])
  954. dt = [('timestamp', 'uint32')]
  955. offset = \
  956. self.__get_file_size(filename) - \
  957. self.__nev_params('bytes_in_data_packets')
  958. last_data_packet = np.memmap(
  959. filename, mode='r', offset=offset, dtype=dt)[0]
  960. n_starts = [0 * self.__nev_params('event_unit')]
  961. n_stops = [
  962. last_data_packet['timestamp'] * self.__nev_params('event_unit')]
  963. return n_starts, n_stops
  964. def __get_nsx_rec_times_variant_a(self, nsx_nb):
  965. """
  966. Extracts minimum and maximum time points from a 2.1 nsx file.
  967. """
  968. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  969. t_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  970. 'time_unit', nsx_nb)
  971. highest_res = self.__nev_params('event_unit')
  972. bytes_in_headers = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  973. 'bytes_in_headers', nsx_nb)
  974. nb_data_points = int(
  975. (self.__get_file_size(filename) - bytes_in_headers)
  976. / (2 * self.__nsx_basic_header[nsx_nb]['channel_count']) - 1)
  977. # add n_start
  978. n_starts = [(0 * t_unit).rescale(highest_res)]
  979. # add n_stop
  980. n_stops = [(nb_data_points * t_unit).rescale(highest_res)]
  981. return n_starts, n_stops
  982. def __get_nsx_rec_times_variant_b(self, nsx_nb):
  983. """
  984. Extracts minimum and maximum time points from a 2.2 or 2.3 nsx file.
  985. """
  986. t_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  987. 'time_unit', nsx_nb)
  988. highest_res = self.__nev_params('event_unit')
  989. n_starts = []
  990. n_stops = []
  991. # add n-start and n_stop for all data blocks
  992. for data_bl in self.__nsx_data_header[nsx_nb].keys():
  993. ts0 = self.__nsx_data_header[nsx_nb][data_bl]['timestamp']
  994. nbdp = self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points']
  995. # add n_start
  996. start = ts0 * t_unit
  997. n_starts.append(start.rescale(highest_res))
  998. # add n_stop
  999. stop = start + nbdp * t_unit
  1000. n_stops.append(stop.rescale(highest_res))
  1001. return sorted(n_starts), sorted(n_stops)
  1002. def __get_waveforms_dtype(self):
  1003. """
  1004. Extracts the actual waveform dtype set for each channel.
  1005. """
  1006. # Blackrock code giving the approiate dtype
  1007. conv = {0: 'int8', 1: 'int8', 2: 'int16', 4: 'int32'}
  1008. # get all electrode ids from nev ext header
  1009. all_el_ids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1010. # get the dtype of waveform (this is stupidly complicated)
  1011. if self.__is_set(
  1012. np.array(self.__nev_basic_header['additionnal_flags']), 0):
  1013. dtype_waveforms = {k: 'int16' for k in all_el_ids}
  1014. else:
  1015. # extract bytes per waveform
  1016. waveform_bytes = \
  1017. self.__nev_ext_header[b'NEUEVWAV']['bytes_per_waveform']
  1018. # extract dtype for waveforms fro each electrode
  1019. dtype_waveforms = dict(zip(all_el_ids, conv[waveform_bytes]))
  1020. return dtype_waveforms
  1021. def __get_channel_labels_variant_a(self):
  1022. """
  1023. Returns labels for all channels for file spec 2.1
  1024. """
  1025. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1026. labels = []
  1027. for elid in elids:
  1028. if elid < 129:
  1029. labels.append('chan%i' % elid)
  1030. else:
  1031. labels.append('ainp%i' % (elid - 129 + 1))
  1032. return dict(zip(elids, labels))
  1033. def __get_channel_labels_variant_b(self):
  1034. """
  1035. Returns labels for all channels for file spec 2.2 and 2.3
  1036. """
  1037. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1038. labels = self.__nev_ext_header[b'NEUEVLBL']['label']
  1039. return dict(zip(elids, labels)) if len(labels) > 0 else None
  1040. def __get_waveform_size_variant_a(self):
  1041. """
  1042. Returns wavform sizes for all channels for file spec 2.1 and 2.2
  1043. """
  1044. wf_dtypes = self.__get_waveforms_dtype()
  1045. nb_bytes_wf = self.__nev_basic_header['bytes_in_data_packets'] - 8
  1046. wf_sizes = {
  1047. ch: int(nb_bytes_wf / np.dtype(dt).itemsize) for ch, dt in
  1048. wf_dtypes.items()}
  1049. return wf_sizes
  1050. def __get_waveform_size_variant_b(self):
  1051. """
  1052. Returns wavform sizes for all channels for file spec 2.3
  1053. """
  1054. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1055. spike_widths = self.__nev_ext_header[b'NEUEVWAV']['spike_width']
  1056. return dict(zip(elids, spike_widths))
  1057. def __get_left_sweep_waveforms(self):
  1058. """
  1059. Returns left sweep of waveforms for each channel. Left sweep is defined
  1060. as the time from the beginning of the waveform to the trigger time of
  1061. the corresponding spike.
  1062. """
  1063. # TODO: Double check if this is the actual setting for Blackrock
  1064. wf_t_unit = self.__nev_params('waveform_time_unit')
  1065. all_ch = self.__nev_params('channel_ids')
  1066. # TODO: Double check if this is the correct assumption (10 samples)
  1067. # default value: threshold crossing after 10 samples of waveform
  1068. wf_left_sweep = {ch: 10 * wf_t_unit for ch in all_ch}
  1069. # non-default: threshold crossing at center of waveform
  1070. # wf_size = self.__nev_params('waveform_size')
  1071. # wf_left_sweep = dict(
  1072. # [(ch, (wf_size[ch] / 2) * wf_t_unit) for ch in all_ch])
  1073. return wf_left_sweep
  1074. def __get_nsx_param_variant_a(self, param_name, nsx_nb):
  1075. """
  1076. Returns parameter (param_name) for a given nsx (nsx_nb) for file spec
  1077. 2.1.
  1078. """
  1079. # Here, min/max_analog_val and min/max_digital_val are not available in
  1080. # the nsx, so that we must estimate these parameters from the
  1081. # digitization factor of the nev (information by Kian Torab, Blackrock
  1082. # Microsystems). Here dig_factor=max_analog_val/max_digital_val. We set
  1083. # max_digital_val to 1000, and max_analog_val=dig_factor. dig_factor is
  1084. # given in nV by definition, so the units turn out to be uV.
  1085. labels = []
  1086. dig_factor = []
  1087. for elid in self.__nsx_ext_header[nsx_nb]['electrode_id']:
  1088. if self._avail_files['nev']:
  1089. # This is a workaround for the DigitalFactor overflow in NEV
  1090. # files recorded with buggy Cerebus system.
  1091. # Fix taken from: NMPK toolbox by Blackrock,
  1092. # file openNEV, line 464,
  1093. # git rev. d0a25eac902704a3a29fa5dfd3aed0744f4733ed
  1094. df = self.__nev_params('digitization_factor')[elid]
  1095. if df == 21516:
  1096. df = 152592.547
  1097. dig_factor.append(df)
  1098. else:
  1099. dig_factor.append(None)
  1100. if elid < 129:
  1101. labels.append('chan%i' % elid)
  1102. else:
  1103. labels.append('ainp%i' % (elid - 129 + 1))
  1104. nsx_parameters = {
  1105. 'labels': labels,
  1106. 'units': np.array(
  1107. [b'uV']
  1108. * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1109. 'min_analog_val': -1 * np.array(dig_factor),
  1110. 'max_analog_val': np.array(dig_factor),
  1111. 'min_digital_val': np.array(
  1112. [-1000] * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1113. 'max_digital_val': np.array(
  1114. [1000] * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1115. 'timestamp_resolution': 30000,
  1116. 'bytes_in_headers':
  1117. self.__nsx_basic_header[nsx_nb].dtype.itemsize
  1118. + self.__nsx_ext_header[nsx_nb].dtype.itemsize
  1119. * self.__nsx_basic_header[nsx_nb]['channel_count'],
  1120. 'sampling_rate':
  1121. 30000 / self.__nsx_basic_header[nsx_nb]['period'] * pq.Hz,
  1122. 'time_unit': pq.CompoundUnit("1.0/{}*s".format(
  1123. 30000 / self.__nsx_basic_header[nsx_nb]['period']))}
  1124. return nsx_parameters[param_name]
  1125. def __get_nsx_param_variant_b(self, param_name, nsx_nb):
  1126. """
  1127. Returns parameter (param_name) for a given nsx (nsx_nb) for file spec
  1128. 2.2 and 2.3.
  1129. """
  1130. nsx_parameters = {
  1131. 'labels':
  1132. self.__nsx_ext_header[nsx_nb]['electrode_label'],
  1133. 'units':
  1134. self.__nsx_ext_header[nsx_nb]['units'],
  1135. 'min_analog_val':
  1136. self.__nsx_ext_header[nsx_nb]['min_analog_val'],
  1137. 'max_analog_val':
  1138. self.__nsx_ext_header[nsx_nb]['max_analog_val'],
  1139. 'min_digital_val':
  1140. self.__nsx_ext_header[nsx_nb]['min_digital_val'],
  1141. 'max_digital_val':
  1142. self.__nsx_ext_header[nsx_nb]['max_digital_val'],
  1143. 'timestamp_resolution':
  1144. self.__nsx_basic_header[nsx_nb]['timestamp_resolution'],
  1145. 'bytes_in_headers':
  1146. self.__nsx_basic_header[nsx_nb]['bytes_in_headers'],
  1147. 'sampling_rate':
  1148. self.__nsx_basic_header[nsx_nb]['timestamp_resolution']
  1149. / self.__nsx_basic_header[nsx_nb]['period'] * pq.Hz,
  1150. 'time_unit': pq.CompoundUnit("1.0/{}*s".format(
  1151. self.__nsx_basic_header[nsx_nb]['timestamp_resolution']
  1152. / self.__nsx_basic_header[nsx_nb]['period']))}
  1153. return nsx_parameters[param_name]
  1154. def __get_nsx_databl_param_variant_a(
  1155. self, param_name, nsx_nb, n_start=None, n_stop=None):
  1156. """
  1157. Returns data block parameter (param_name) for a given nsx (nsx_nb) for
  1158. file spec 2.1. Arg 'n_start' should not be specified! It is only set
  1159. for compatibility reasons with higher file spec.
  1160. """
  1161. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  1162. t_starts, t_stops = \
  1163. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1164. bytes_in_headers = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1165. 'bytes_in_headers', nsx_nb)
  1166. # extract parameters from nsx basic extended and data header
  1167. data_parameters = {
  1168. 'nb_data_points': int(
  1169. (self.__get_file_size(filename) - bytes_in_headers)
  1170. / (2 * self.__nsx_basic_header[nsx_nb]['channel_count']) - 1),
  1171. 'databl_idx': 1,
  1172. 'databl_t_start': t_starts[0],
  1173. 'databl_t_stop': t_stops[0]}
  1174. return data_parameters[param_name]
  1175. def __get_nsx_databl_param_variant_b(
  1176. self, param_name, nsx_nb, n_start, n_stop):
  1177. """
  1178. Returns data block parameter (param_name) for a given nsx (nsx_nb) with
  1179. a wanted n_start for file spec 2.2 and 2.3.
  1180. """
  1181. t_starts, t_stops = \
  1182. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1183. # data header
  1184. for d_bl in self.__nsx_data_header[nsx_nb].keys():
  1185. # from "data header" with corresponding t_start and t_stop
  1186. data_parameters = {
  1187. 'nb_data_points':
  1188. self.__nsx_data_header[nsx_nb][d_bl]['nb_data_points'],
  1189. 'databl_idx': d_bl,
  1190. 'databl_t_start': t_starts[d_bl - 1],
  1191. 'databl_t_stop': t_stops[d_bl - 1]}
  1192. if t_starts[d_bl - 1] <= n_start < n_stop <= t_stops[d_bl - 1]:
  1193. return data_parameters[param_name]
  1194. elif n_start < t_starts[d_bl - 1] < n_stop <= t_stops[d_bl - 1]:
  1195. self._print_verbose(
  1196. "User n_start ({}) is smaller than the corresponding "
  1197. "t_start of the available ns{} datablock "
  1198. "({}).".format(n_start, nsx_nb, t_starts[d_bl - 1]))
  1199. return data_parameters[param_name]
  1200. elif t_starts[d_bl - 1] <= n_start < t_stops[d_bl - 1] < n_stop:
  1201. self._print_verbose(
  1202. "User n_stop ({}) is larger than the corresponding "
  1203. "t_stop of the available ns{} datablock "
  1204. "({}).".format(n_stop, nsx_nb, t_stops[d_bl - 1]))
  1205. return data_parameters[param_name]
  1206. elif n_start < t_starts[d_bl - 1] < t_stops[d_bl - 1] < n_stop:
  1207. self._print_verbose(
  1208. "User n_start ({}) is smaller than the corresponding "
  1209. "t_start and user n_stop ({}) is larger than the "
  1210. "corresponding t_stop of the available ns{} datablock "
  1211. "({}).".format(
  1212. n_start, n_stop, nsx_nb,
  1213. (t_starts[d_bl - 1], t_stops[d_bl - 1])))
  1214. return data_parameters[param_name]
  1215. else:
  1216. continue
  1217. raise ValueError(
  1218. "User n_start and n_stop are all smaller or larger than the "
  1219. "t_start and t_stops of all available ns%i datablocks" % nsx_nb)
  1220. def __get_nonneural_evtypes_variant_a(self, data):
  1221. """
  1222. Defines event types and the necessary parameters to extract them from
  1223. a 2.1 and 2.2 nev file.
  1224. """
  1225. # TODO: add annotations of nev ext header (NSASEXEX) to event types
  1226. # digital events
  1227. event_types = {
  1228. 'digital_input_port': {
  1229. 'name': 'digital_input_port',
  1230. 'field': 'digital_input',
  1231. 'mask': self.__is_set(data['packet_insertion_reason'], 0),
  1232. 'desc': "Events of the digital input port"},
  1233. 'serial_input_port': {
  1234. 'name': 'serial_input_port',
  1235. 'field': 'digital_input',
  1236. 'mask':
  1237. self.__is_set(data['packet_insertion_reason'], 0)
  1238. & self.__is_set(data['packet_insertion_reason'], 7),
  1239. 'desc': "Events of the serial input port"}}
  1240. # analog input events via threshold crossings
  1241. for ch in range(5):
  1242. event_types.update({
  1243. 'analog_input_channel_{}'.format(ch + 1): {
  1244. 'name': 'analog_input_channel_{}'.format(ch + 1),
  1245. 'field': 'analog_input_channel_{}'.format(ch + 1),
  1246. 'mask': self.__is_set(
  1247. data['packet_insertion_reason'], ch + 1),
  1248. 'desc': "Values of analog input channel {} in mV "
  1249. "(+/- 5000)".format(ch + 1)}})
  1250. # TODO: define field and desc
  1251. event_types.update({
  1252. 'periodic_sampling_events': {
  1253. 'name': 'periodic_sampling_events',
  1254. 'field': 'digital_input',
  1255. 'mask': self.__is_set(data['packet_insertion_reason'], 6),
  1256. 'desc': 'Periodic sampling event of a certain frequency'}})
  1257. return event_types
  1258. def __get_nonneural_evtypes_variant_b(self, data):
  1259. """
  1260. Defines event types and the necessary parameters to extract them from
  1261. a 2.3 nev file.
  1262. """
  1263. # digital events
  1264. event_types = {
  1265. 'digital_input_port': {
  1266. 'name': 'digital_input_port',
  1267. 'field': 'digital_input',
  1268. 'mask': self.__is_set(data['packet_insertion_reason'], 0),
  1269. 'desc': "Events of the digital input port"},
  1270. 'serial_input_port': {
  1271. 'name': 'serial_input_port',
  1272. 'field': 'digital_input',
  1273. 'mask':
  1274. self.__is_set(data['packet_insertion_reason'], 0)
  1275. & self.__is_set(data['packet_insertion_reason'], 7),
  1276. 'desc': "Events of the serial input port"}}
  1277. return event_types
  1278. def __get_unit_classification(self, un_id):
  1279. """
  1280. Returns the Blackrock unit classification of an online spike sorting
  1281. for the given unit id (un_id).
  1282. """
  1283. # Blackrock unit classification
  1284. if un_id == 0:
  1285. return 'unclassified'
  1286. elif 1 <= un_id <= 16:
  1287. return '{}'.format(un_id)
  1288. elif 17 <= un_id <= 244:
  1289. raise ValueError(
  1290. "Unit id {} is not used by daq system".format(un_id))
  1291. elif un_id == 255:
  1292. return 'noise'
  1293. else:
  1294. raise ValueError("Unit id {} cannot be classified".format(un_id))
  1295. def __is_set(self, flag, pos):
  1296. """
  1297. Checks if bit is set at the given position for flag. If flag is an
  1298. array, an array will be returned.
  1299. """
  1300. return flag & (1 << pos) > 0
  1301. def __transform_nsx_to_load(self, nsx_to_load):
  1302. """
  1303. Transforms the input argument nsx_to_load to a list of integers.
  1304. """
  1305. if hasattr(nsx_to_load, "__len__") and len(nsx_to_load) == 0:
  1306. nsx_to_load = None
  1307. if isinstance(nsx_to_load, int):
  1308. nsx_to_load = [nsx_to_load]
  1309. if isinstance(nsx_to_load, str):
  1310. if nsx_to_load.lower() == 'none':
  1311. nsx_to_load = None
  1312. elif nsx_to_load.lower() == 'all':
  1313. nsx_to_load = self._avail_nsx
  1314. else:
  1315. raise ValueError("Invalid specification of nsx_to_load.")
  1316. if nsx_to_load:
  1317. for nsx_nb in nsx_to_load:
  1318. if not self._avail_files['ns' + str(nsx_nb)]:
  1319. raise ValueError("ns%i is not available" % nsx_nb)
  1320. return nsx_to_load
  1321. def __transform_channels(self, channels, nsx_to_load):
  1322. """
  1323. Transforms the input argument channels to a list of integers.
  1324. """
  1325. all_channels = []
  1326. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  1327. if nsx_to_load is not None:
  1328. for nsx_nb in nsx_to_load:
  1329. all_channels.extend(
  1330. self.__nsx_ext_header[nsx_nb]['electrode_id'].astype(int))
  1331. elec_id = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1332. all_channels.extend(elec_id.astype(int))
  1333. all_channels = np.unique(all_channels).tolist()
  1334. if hasattr(channels, "__len__") and len(channels) == 0:
  1335. channels = None
  1336. if isinstance(channels, int):
  1337. channels = [channels]
  1338. if isinstance(channels, str):
  1339. if channels.lower() == 'none':
  1340. channels = None
  1341. elif channels.lower() == 'all':
  1342. channels = all_channels
  1343. else:
  1344. raise ValueError("Invalid channel specification.")
  1345. if channels:
  1346. if len(set(all_channels) & set(channels)) < len(channels):
  1347. self._print_verbose(
  1348. "Ignoring unknown channel ID(s) specified in in channels.")
  1349. # Make sure, all channels are valid and contain no duplicates
  1350. channels = list(set(all_channels).intersection(set(channels)))
  1351. else:
  1352. self._print_verbose("No channel is specified, therefore no "
  1353. "time series and unit data is loaded.")
  1354. return channels
  1355. def __transform_units(self, units, channels):
  1356. """
  1357. Transforms the input argument nsx_to_load to a dictionary, where keys
  1358. (channels) are int, and values (units) are lists of integers.
  1359. """
  1360. if isinstance(units, dict):
  1361. for ch, u in units.items():
  1362. if ch not in channels:
  1363. self._print_verbose(
  1364. "Units contain a channel id which is not listed in "
  1365. "channels")
  1366. if isinstance(u, int):
  1367. units[ch] = [u]
  1368. if hasattr(u, '__len__') and len(u) == 0:
  1369. units[ch] = None
  1370. if isinstance(u, str):
  1371. if u.lower() == 'none':
  1372. units[ch] = None
  1373. elif u.lower() == 'all':
  1374. units[ch] = list(range(17))
  1375. units[ch].append(255)
  1376. else:
  1377. raise ValueError("Invalid unit specification.")
  1378. else:
  1379. if hasattr(units, "__len__") and len(units) == 0:
  1380. units = None
  1381. if isinstance(units, str):
  1382. if units.lower() == 'none':
  1383. units = None
  1384. elif units.lower() == 'all':
  1385. units = list(range(17))
  1386. units.append(255)
  1387. else:
  1388. raise ValueError("Invalid unit specification.")
  1389. if isinstance(units, int):
  1390. units = [units]
  1391. if (channels is None) and (units is not None):
  1392. raise ValueError(
  1393. 'At least one channel needs to be loaded to load units')
  1394. if units:
  1395. units = dict(zip(channels, [units] * len(channels)))
  1396. if units is None:
  1397. self._print_verbose("No units are specified, therefore no "
  1398. "unit or spiketrain is loaded.")
  1399. return units
  1400. def __transform_times(self, n, default_n):
  1401. """
  1402. Transforms the input argument n_start or n_stop (n) to a list of
  1403. quantities. In case n is None, it is set to a default value provided by
  1404. the given function (default_n).
  1405. """
  1406. highest_res = self.__nev_params('event_unit')
  1407. if isinstance(n, pq.Quantity):
  1408. n = [n.rescale(highest_res)]
  1409. elif hasattr(n, "__len__"):
  1410. n = [tp.rescale(highest_res) if tp is not None
  1411. else default_n for tp in n]
  1412. elif n is None:
  1413. n = [default_n]
  1414. else:
  1415. raise ValueError('Invalid specification of n_start/n_stop.')
  1416. return n
  1417. def __merge_time_ranges(
  1418. self, user_n_starts, user_n_stops, nsx_to_load):
  1419. """
  1420. Merges after a validation the user specified n_starts and n_stops with
  1421. the intrinsically given n_starts and n_stops (from e.g, recording
  1422. pauses) of the file set.
  1423. Final n_starts and n_stops are chosen, so that the time range of each
  1424. resulting segment is set to the best meaningful maximum. This means
  1425. that the duration of the signals stored in the segments might be
  1426. smaller than the actually set duration of the segment.
  1427. """
  1428. # define the higest time resolution
  1429. # (for accurate manipulations of the time settings)
  1430. max_time = self.__get_max_time()
  1431. min_time = self.__get_min_time()
  1432. highest_res = self.__nev_params('event_unit')
  1433. user_n_starts = self.__transform_times(
  1434. user_n_starts, min_time)
  1435. user_n_stops = self.__transform_times(
  1436. user_n_stops, max_time)
  1437. # check if user provided as many n_starts as n_stops
  1438. if len(user_n_starts) != len(user_n_stops):
  1439. raise ValueError("n_starts and n_stops must be of equal length")
  1440. # if necessary reset max n_stop to max time of file set
  1441. start_stop_id = 0
  1442. while start_stop_id < len(user_n_starts):
  1443. if user_n_starts[start_stop_id] < min_time:
  1444. user_n_starts[start_stop_id] = min_time
  1445. self._print_verbose(
  1446. "Entry of n_start '{}' is smaller than min time of the file "
  1447. "set: n_start set to min time of file set"
  1448. "".format(user_n_starts[start_stop_id]))
  1449. if user_n_stops[start_stop_id] > max_time:
  1450. user_n_stops[start_stop_id] = max_time
  1451. self._print_verbose(
  1452. "Entry of n_stop '{}' is larger than max time of the file "
  1453. "set: n_stop set to max time of file set"
  1454. "".format(user_n_stops[start_stop_id]))
  1455. if (user_n_stops[start_stop_id] < min_time
  1456. or user_n_starts[start_stop_id] > max_time):
  1457. user_n_stops.pop(start_stop_id)
  1458. user_n_starts.pop(start_stop_id)
  1459. self._print_verbose(
  1460. "Entry of n_start is larger than max time or entry of "
  1461. "n_stop is smaller than min time of the "
  1462. "file set: n_start and n_stop are ignored")
  1463. continue
  1464. start_stop_id += 1
  1465. # get intrinsic time settings of nsx files (incl. rec pauses)
  1466. n_starts_files = []
  1467. n_stops_files = []
  1468. if nsx_to_load is not None:
  1469. for nsx_nb in nsx_to_load:
  1470. start_stop = \
  1471. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1472. n_starts_files.append(start_stop[0])
  1473. n_stops_files.append(start_stop[1])
  1474. # reducing n_starts from wanted nsx files to minima
  1475. # (keep recording pause if it occurs)
  1476. if len(n_starts_files) > 0:
  1477. if np.shape(n_starts_files)[1] > 1:
  1478. n_starts_files = [
  1479. tp * highest_res for tp in np.min(n_starts_files, axis=1)]
  1480. else:
  1481. n_starts_files = [
  1482. tp * highest_res for tp in np.min(n_starts_files, axis=0)]
  1483. # reducing n_starts from wanted nsx files to maxima
  1484. # (keep recording pause if it occurs)
  1485. if len(n_stops_files) > 0:
  1486. if np.shape(n_stops_files)[1] > 1:
  1487. n_stops_files = [
  1488. tp * highest_res for tp in np.max(n_stops_files, axis=1)]
  1489. else:
  1490. n_stops_files = [
  1491. tp * highest_res for tp in np.max(n_stops_files, axis=0)]
  1492. # merge user time settings with intrinsic nsx time settings
  1493. n_starts = []
  1494. n_stops = []
  1495. for start, stop in zip(user_n_starts, user_n_stops):
  1496. # check if start and stop of user create a positive time interval
  1497. if not start < stop:
  1498. raise ValueError(
  1499. "t(i) in n_starts has to be smaller than t(i) in n_stops")
  1500. # Reduce n_starts_files to given intervals of user & add start
  1501. if len(n_starts_files) > 0:
  1502. mask = (n_starts_files > start) & (n_starts_files < stop)
  1503. red_n_starts_files = np.array(n_starts_files)[mask]
  1504. merged_n_starts = [start] + [
  1505. tp * highest_res for tp in red_n_starts_files]
  1506. else:
  1507. merged_n_starts = [start]
  1508. # Reduce n_stops_files to given intervals of user & add stop
  1509. if len(n_stops_files) > 0:
  1510. mask = (n_stops_files > start) & (n_stops_files < stop)
  1511. red_n_stops_files = np.array(n_stops_files)[mask]
  1512. merged_n_stops = [
  1513. tp * highest_res for tp in red_n_stops_files] + [stop]
  1514. else:
  1515. merged_n_stops = [stop]
  1516. # Define combined user and file n_starts and n_stops
  1517. # case one:
  1518. if len(merged_n_starts) == len(merged_n_stops):
  1519. if len(merged_n_starts) + len(merged_n_stops) == 2:
  1520. n_starts.extend(merged_n_starts)
  1521. n_stops.extend(merged_n_stops)
  1522. if len(merged_n_starts) + len(merged_n_stops) > 2:
  1523. merged_n_starts.remove(merged_n_starts[1])
  1524. n_starts.extend([merged_n_starts])
  1525. merged_n_stops.remove(merged_n_stops[-2])
  1526. n_stops.extend(merged_n_stops)
  1527. # case two:
  1528. elif len(merged_n_starts) < len(merged_n_stops):
  1529. n_starts.extend(merged_n_starts)
  1530. merged_n_stops.remove(merged_n_stops[-2])
  1531. n_stops.extend(merged_n_stops)
  1532. # case three:
  1533. elif len(merged_n_starts) > len(merged_n_stops):
  1534. merged_n_starts.remove(merged_n_starts[1])
  1535. n_starts.extend(merged_n_starts)
  1536. n_stops.extend(merged_n_stops)
  1537. if len(n_starts) > len(user_n_starts) and \
  1538. len(n_stops) > len(user_n_stops):
  1539. self._print_verbose(
  1540. "Additional recording pauses were detected. There will be "
  1541. "more segments than the user expects.")
  1542. return n_starts, n_stops
  1543. def __read_event(self, n_start, n_stop, data, ev_dict, lazy=False):
  1544. """
  1545. Creates an event for non-neural experimental events in nev data.
  1546. """
  1547. event_unit = self.__nev_params('event_unit')
  1548. if lazy:
  1549. times = []
  1550. labels = np.array([], dtype='S')
  1551. else:
  1552. times = data['timestamp'][ev_dict['mask']] * event_unit
  1553. labels = data[ev_dict['field']][ev_dict['mask']].astype(str)
  1554. # mask for given time interval
  1555. mask = (times >= n_start) & (times < n_stop)
  1556. if np.sum(mask) > 0:
  1557. ev = Event(
  1558. times=times[mask].astype(float),
  1559. labels=labels[mask],
  1560. name=ev_dict['name'],
  1561. description=ev_dict['desc'])
  1562. if lazy:
  1563. ev.lazy_shape = np.sum(mask)
  1564. else:
  1565. ev = None
  1566. return ev
  1567. def __read_spiketrain(
  1568. self, n_start, n_stop, spikes, channel_id, unit_id,
  1569. load_waveforms=False, scaling='raw', lazy=False):
  1570. """
  1571. Creates spiketrains for Spikes in nev data.
  1572. """
  1573. event_unit = self.__nev_params('event_unit')
  1574. # define a name for spiketrain
  1575. # (unique identifier: 1000 * elid + unit_nb)
  1576. name = "Unit {}".format(1000 * channel_id + unit_id)
  1577. # define description for spiketrain
  1578. desc = 'SpikeTrain from channel: {}, unit: {}'.format(
  1579. channel_id, self.__get_unit_classification(unit_id))
  1580. # get spike times for given time interval
  1581. if not lazy:
  1582. times = spikes['timestamp'] * event_unit
  1583. mask = (times >= n_start) & (times <= n_stop)
  1584. times = times[mask].astype(float)
  1585. else:
  1586. times = np.array([]) * event_unit
  1587. st = SpikeTrain(
  1588. times=times,
  1589. name=name,
  1590. description=desc,
  1591. file_origin='.'.join([self._filenames['nev'], 'nev']),
  1592. t_start=n_start,
  1593. t_stop=n_stop)
  1594. if lazy:
  1595. st.lazy_shape = np.shape(times)
  1596. # load waveforms if requested
  1597. if load_waveforms and not lazy:
  1598. wf_dtype = self.__nev_params('waveform_dtypes')[channel_id]
  1599. wf_size = self.__nev_params('waveform_size')[channel_id]
  1600. waveforms = spikes['waveform'].flatten().view(wf_dtype)
  1601. waveforms = waveforms.reshape(int(spikes.size), 1, int(wf_size))
  1602. if scaling == 'voltage':
  1603. st.waveforms = (
  1604. waveforms[mask] * self.__nev_params('waveform_unit')
  1605. * self.__nev_params('digitization_factor')[channel_id]
  1606. / 1000.)
  1607. elif scaling == 'raw':
  1608. st.waveforms = waveforms[mask] * pq.dimensionless
  1609. else:
  1610. raise ValueError(
  1611. 'Unkown option {1} for parameter scaling.'.format(scaling))
  1612. st.sampling_rate = self.__nev_params('waveform_sampling_rate')
  1613. st.left_sweep = self.__get_left_sweep_waveforms()[channel_id]
  1614. # add additional annotations
  1615. st.annotate(
  1616. unit_id=int(unit_id),
  1617. channel_id=int(channel_id))
  1618. return st
  1619. def __read_analogsignal(
  1620. self, n_start, n_stop, signal, channel_id, nsx_nb,
  1621. scaling='raw', lazy=False):
  1622. """
  1623. Creates analogsignal for signal of channel in nsx data.
  1624. """
  1625. # TODO: The following part is extremely slow, since the memmaps for the
  1626. # headers are created again and again. In particular, this makes lazy
  1627. # loading slow as well. Solution would be to create header memmaps up
  1628. # front.
  1629. # get parameters
  1630. sampling_rate = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1631. 'sampling_rate', nsx_nb)
  1632. nsx_time_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1633. 'time_unit', nsx_nb)
  1634. max_ana = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1635. 'max_analog_val', nsx_nb)
  1636. min_ana = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1637. 'min_analog_val', nsx_nb)
  1638. max_dig = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1639. 'max_digital_val', nsx_nb)
  1640. min_dig = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1641. 'min_digital_val', nsx_nb)
  1642. units = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1643. 'units', nsx_nb)
  1644. labels = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1645. 'labels', nsx_nb)
  1646. dbl_idx = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1647. 'databl_idx', nsx_nb, n_start, n_stop)
  1648. t_start = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1649. 'databl_t_start', nsx_nb, n_start, n_stop)
  1650. t_stop = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1651. 'databl_t_stop', nsx_nb, n_start, n_stop)
  1652. elids_nsx = list(self.__nsx_ext_header[nsx_nb]['electrode_id'])
  1653. if channel_id in elids_nsx:
  1654. idx_ch = elids_nsx.index(channel_id)
  1655. else:
  1656. return None
  1657. description = \
  1658. "AnalogSignal from channel: {}, label: {}, nsx: {}".format(
  1659. channel_id, labels[idx_ch], nsx_nb)
  1660. # TODO: Find a more time/memory efficient way to handle lazy loading
  1661. data_times = np.arange(
  1662. t_start.item(), t_stop.item(),
  1663. self.__nsx_basic_header[nsx_nb]['period']) * t_start.units
  1664. mask = (data_times >= n_start) & (data_times < n_stop)
  1665. if lazy:
  1666. lazy_shape = (np.sum(mask),)
  1667. sig_ch = np.array([], dtype='float32')
  1668. sig_unit = pq.dimensionless
  1669. t_start = n_start.rescale('s')
  1670. else:
  1671. data_times = data_times[mask].astype(float)
  1672. if scaling == 'voltage':
  1673. if not self._avail_files['nev']:
  1674. raise ValueError(
  1675. 'Cannot convert signals in filespec 2.1 nsX '
  1676. 'files to voltage without nev file.')
  1677. sig_ch = signal[dbl_idx][:, idx_ch][mask].astype('float32')
  1678. # transform dig value to physical value
  1679. sym_ana = (max_ana[idx_ch] == -min_ana[idx_ch])
  1680. sym_dig = (max_dig[idx_ch] == -min_dig[idx_ch])
  1681. if sym_ana and sym_dig:
  1682. sig_ch *= float(max_ana[idx_ch]) / float(max_dig[idx_ch])
  1683. else:
  1684. # general case (same result as above for symmetric input)
  1685. sig_ch -= min_dig[idx_ch]
  1686. sig_ch *= float(max_ana[idx_ch] - min_ana[idx_ch]) / \
  1687. float(max_dig[idx_ch] - min_dig[idx_ch])
  1688. sig_ch += float(min_ana[idx_ch])
  1689. sig_unit = units[idx_ch].decode()
  1690. elif scaling == 'raw':
  1691. sig_ch = signal[dbl_idx][:, idx_ch][mask].astype(int)
  1692. sig_unit = pq.dimensionless
  1693. else:
  1694. raise ValueError(
  1695. 'Unkown option {1} for parameter '
  1696. 'scaling.'.format(scaling))
  1697. t_start = data_times[0].rescale(nsx_time_unit)
  1698. anasig = AnalogSignal(
  1699. signal=pq.Quantity(sig_ch, sig_unit, copy=False),
  1700. sampling_rate=sampling_rate,
  1701. t_start=t_start,
  1702. name=labels[idx_ch],
  1703. description=description,
  1704. file_origin='.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb]))
  1705. if lazy:
  1706. anasig.lazy_shape = lazy_shape
  1707. anasig.annotate(
  1708. nsx=nsx_nb,
  1709. channel_id=int(channel_id))
  1710. return anasig
  1711. def __read_unit(self, unit_id, channel_id):
  1712. """
  1713. Creates unit with unit id for given channel id.
  1714. """
  1715. # define a name for spiketrain
  1716. # (unique identifier: 1000 * elid + unit_nb)
  1717. name = "Unit {}".format(1000 * channel_id + unit_id)
  1718. # define description for spiketrain
  1719. desc = 'Unit from channel: {}, id: {}'.format(
  1720. channel_id, self.__get_unit_classification(unit_id))
  1721. un = Unit(
  1722. name=name,
  1723. description=desc,
  1724. file_origin='.'.join([self._filenames['nev'], 'nev']))
  1725. # add additional annotations
  1726. un.annotate(
  1727. unit_id=int(unit_id),
  1728. channel_id=int(channel_id))
  1729. return un
  1730. def __read_channelindex(
  1731. self, channel_id, index=None, channel_units=None, cascade=True):
  1732. """
  1733. Returns a ChannelIndex with the given index for the given channels
  1734. containing a neo.core.unit.Unit object list of the given units.
  1735. """
  1736. flt_type = {0: 'None', 1: 'Butterworth'}
  1737. chidx = ChannelIndex(
  1738. np.array([channel_id]),
  1739. file_origin=self.filename)
  1740. if index is not None:
  1741. chidx.index = np.array(index, np.dtype('i'))
  1742. chidx.name = "ChannelIndex {}".format(chidx.index)
  1743. else:
  1744. chidx.name = "ChannelIndex"
  1745. if self._avail_files['nev']:
  1746. channel_labels = self.__nev_params('channel_labels')
  1747. if channel_labels is not None:
  1748. chidx.channel_names = np.array([channel_labels[channel_id]])
  1749. chidx.channel_ids = np.array([channel_id])
  1750. # additional annotations from nev
  1751. if channel_id in self.__nev_ext_header[b'NEUEVWAV']['electrode_id']:
  1752. get_idx = list(
  1753. self.__nev_ext_header[b'NEUEVWAV']['electrode_id']).index(
  1754. channel_id)
  1755. chidx.annotate(
  1756. connector_ID=self.__nev_ext_header[
  1757. b'NEUEVWAV']['physical_connector'][get_idx],
  1758. connector_pinID=self.__nev_ext_header[
  1759. b'NEUEVWAV']['connector_pin'][get_idx],
  1760. nev_dig_factor=self.__nev_ext_header[
  1761. b'NEUEVWAV']['digitization_factor'][get_idx],
  1762. nev_energy_threshold=self.__nev_ext_header[
  1763. b'NEUEVWAV']['energy_threshold'][get_idx] * pq.uV,
  1764. nev_hi_threshold=self.__nev_ext_header[
  1765. b'NEUEVWAV']['hi_threshold'][get_idx] * pq.uV,
  1766. nev_lo_threshold=self.__nev_ext_header[
  1767. b'NEUEVWAV']['lo_threshold'][get_idx] * pq.uV,
  1768. nb_sorted_units=self.__nev_ext_header[
  1769. b'NEUEVWAV']['nb_sorted_units'][get_idx],
  1770. waveform_size=self.__waveform_size[self.__nev_spec](
  1771. )[channel_id] * self.__nev_params('waveform_time_unit'))
  1772. # additional annotations from nev (only for file_spec > 2.1)
  1773. if self.__nev_spec in ['2.2', '2.3']:
  1774. get_idx = list(
  1775. self.__nev_ext_header[
  1776. b'NEUEVFLT']['electrode_id']).index(
  1777. channel_id)
  1778. # filter type codes (extracted from blackrock manual)
  1779. chidx.annotate(
  1780. nev_hi_freq_corner=self.__nev_ext_header[b'NEUEVFLT'][
  1781. 'hi_freq_corner'][get_idx]
  1782. / 1000. * pq.Hz,
  1783. nev_hi_freq_order=self.__nev_ext_header[b'NEUEVFLT'][
  1784. 'hi_freq_order'][get_idx],
  1785. nev_hi_freq_type=flt_type[self.__nev_ext_header[
  1786. b'NEUEVFLT']['hi_freq_type'][get_idx]],
  1787. nev_lo_freq_corner=self.__nev_ext_header[
  1788. b'NEUEVFLT']['lo_freq_corner'][get_idx]
  1789. / 1000. * pq.Hz,
  1790. nev_lo_freq_order=self.__nev_ext_header[
  1791. b'NEUEVFLT']['lo_freq_order'][get_idx],
  1792. nev_lo_freq_type=flt_type[self.__nev_ext_header[
  1793. b'NEUEVFLT']['lo_freq_type'][get_idx]])
  1794. # additional information about the LFP signal
  1795. if self.__nev_spec in ['2.2', '2.3'] and self.__nsx_ext_header:
  1796. # It does not matter which nsX file to ask for this info
  1797. k = list(self.__nsx_ext_header.keys())[0]
  1798. if channel_id in self.__nsx_ext_header[k]['electrode_id']:
  1799. get_idx = list(
  1800. self.__nsx_ext_header[k]['electrode_id']).index(
  1801. channel_id)
  1802. chidx.annotate(
  1803. nsx_hi_freq_corner=self.__nsx_ext_header[k][
  1804. 'hi_freq_corner'][get_idx] / 1000. * pq.Hz,
  1805. nsx_lo_freq_corner=self.__nsx_ext_header[k][
  1806. 'lo_freq_corner'][get_idx] / 1000. * pq.Hz,
  1807. nsx_hi_freq_order=self.__nsx_ext_header[k][
  1808. 'hi_freq_order'][get_idx],
  1809. nsx_lo_freq_order=self.__nsx_ext_header[k][
  1810. 'lo_freq_order'][get_idx],
  1811. nsx_hi_freq_type=flt_type[
  1812. self.__nsx_ext_header[k]['hi_freq_type'][get_idx]],
  1813. nsx_lo_freq_type=flt_type[
  1814. self.__nsx_ext_header[k]['hi_freq_type'][get_idx]])
  1815. chidx.description = \
  1816. "Container for units and groups analogsignals of one recording " \
  1817. "channel across segments."
  1818. if not cascade:
  1819. return chidx
  1820. if self._avail_files['nev']:
  1821. # read nev data
  1822. nev_data = self.__nev_data_reader[self.__nev_spec]()
  1823. if channel_units is not None:
  1824. # extract first data for channel
  1825. ch_mask = (nev_data['Spikes']['packet_id'] == channel_id)
  1826. data_ch = nev_data['Spikes'][ch_mask]
  1827. for un_id in channel_units:
  1828. if un_id in np.unique(data_ch['unit_class_nb']):
  1829. un = self.__read_unit(
  1830. unit_id=un_id, channel_id=channel_id)
  1831. chidx.units.append(un)
  1832. chidx.create_many_to_one_relationship()
  1833. return chidx
  1834. def read_segment(
  1835. self, n_start, n_stop, name=None, description=None, index=None,
  1836. nsx_to_load='none', channels='none', units='none',
  1837. load_waveforms=False, load_events=False, scaling='raw',
  1838. lazy=False, cascade=True):
  1839. """
  1840. Returns an annotated neo.core.segment.Segment.
  1841. Args:
  1842. n_start (Quantity):
  1843. Start time of maximum time range of signals contained in this
  1844. segment.
  1845. n_stop (Quantity):
  1846. Stop time of maximum time range of signals contained in this
  1847. segment.
  1848. name (None, string):
  1849. If None, name is set to default, otherwise it is set to user
  1850. input.
  1851. description (None, string):
  1852. If None, description is set to default, otherwise it is set to
  1853. user input.
  1854. index (None, int):
  1855. If not None, index of segment is set to user index.
  1856. nsx_to_load (int, list, str):
  1857. ID(s) of nsx file(s) from which to load data, e.g., if set to
  1858. 5 only data from the ns5 file are loaded. If 'none' or empty
  1859. list, no nsx files and therefore no analog signals are loaded.
  1860. If 'all', data from all available nsx are loaded.
  1861. channels (int, list, str):
  1862. Channel id(s) from which to load data. If 'none' or empty list,
  1863. no channels and therefore no analog signal or spiketrains are
  1864. loaded. If 'all', all available channels are loaded.
  1865. units (int, list, str, dict):
  1866. ID(s) of unit(s) to load. If 'none' or empty list, no units and
  1867. therefore no spiketrains are loaded. If 'all', all available
  1868. units are loaded. If dict, the above can be specified
  1869. individually for each channel (keys), e.g. {1: 5, 2: 'all'}
  1870. loads unit 5 from channel 1 and all units from channel 2.
  1871. load_waveforms (boolean):
  1872. If True, waveforms are attached to all loaded spiketrains.
  1873. load_events (boolean):
  1874. If True, all recorded events are loaded.
  1875. scaling (str):
  1876. Determines whether time series of individual
  1877. electrodes/channels are returned as AnalogSignals containing
  1878. raw integer samples ('raw'), or scaled to arrays of floats
  1879. representing voltage ('voltage'). Note that for file
  1880. specification 2.1 and lower, the option 'voltage' requires a
  1881. nev file to be present.
  1882. lazy (boolean):
  1883. If True, only the shape of the data is loaded.
  1884. cascade (boolean):
  1885. If True, only the segment without children is returned.
  1886. Returns:
  1887. Segment (neo.Segment):
  1888. Returns the specified segment. See documentation of
  1889. `read_block()` for a full list of annotations of all child
  1890. objects.
  1891. """
  1892. # Make sure that input args are transformed into correct instances
  1893. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  1894. channels = self.__transform_channels(channels, nsx_to_load)
  1895. units = self.__transform_units(units, channels)
  1896. seg = Segment(file_origin=self.filename)
  1897. # set user defined annotations if they were provided
  1898. if index is None:
  1899. seg.index = 0
  1900. else:
  1901. seg.index = index
  1902. if name is None:
  1903. seg.name = "Segment {}".format(seg.index)
  1904. else:
  1905. seg.name = name
  1906. if description is None:
  1907. seg.description = "Segment containing data from t_min to t_max."
  1908. else:
  1909. seg.description = description
  1910. if not cascade:
  1911. return seg
  1912. if self._avail_files['nev']:
  1913. # filename = self._filenames['nev'] + '.nev'
  1914. # annotate segment according to file headers
  1915. seg.rec_datetime = datetime.datetime(
  1916. year=self.__nev_basic_header['year'],
  1917. month=self.__nev_basic_header['month'],
  1918. day=self.__nev_basic_header['day'],
  1919. hour=self.__nev_basic_header['hour'],
  1920. minute=self.__nev_basic_header['minute'],
  1921. second=self.__nev_basic_header['second'],
  1922. microsecond=self.__nev_basic_header['millisecond'])
  1923. # read nev data
  1924. nev_data = self.__nev_data_reader[self.__nev_spec]()
  1925. # read non-neural experimental events
  1926. if load_events:
  1927. ev_dict = self.__nonneural_evtypes[self.__nev_spec](
  1928. nev_data['NonNeural'])
  1929. for ev_type in ev_dict.keys():
  1930. ev = self.__read_event(
  1931. n_start=n_start,
  1932. n_stop=n_stop,
  1933. data=nev_data['NonNeural'],
  1934. ev_dict=ev_dict[ev_type],
  1935. lazy=lazy)
  1936. if ev is not None:
  1937. seg.events.append(ev)
  1938. # TODO: not yet implemented (only avail in nev_spec 2.3)
  1939. # videosync events
  1940. # trackingevents events
  1941. # buttontrigger events
  1942. # configevent events
  1943. # get spiketrain
  1944. if units is not None:
  1945. not_existing_units = []
  1946. for ch_id in units.keys():
  1947. # extract first data for channel
  1948. ch_mask = (nev_data['Spikes']['packet_id'] == ch_id)
  1949. data_ch = nev_data['Spikes'][ch_mask]
  1950. if units[ch_id] is not None:
  1951. for un_id in units[ch_id]:
  1952. if un_id in np.unique(data_ch['unit_class_nb']):
  1953. # extract then data for unit if unit exists
  1954. un_mask = (data_ch['unit_class_nb'] == un_id)
  1955. data_un = data_ch[un_mask]
  1956. st = self.__read_spiketrain(
  1957. n_start=n_start,
  1958. n_stop=n_stop,
  1959. spikes=data_un,
  1960. channel_id=ch_id,
  1961. unit_id=un_id,
  1962. load_waveforms=load_waveforms,
  1963. scaling=scaling,
  1964. lazy=lazy)
  1965. seg.spiketrains.append(st)
  1966. else:
  1967. not_existing_units.append(un_id)
  1968. if not_existing_units:
  1969. self._print_verbose(
  1970. "Units {} on channel {} do not "
  1971. "exist".format(not_existing_units, ch_id))
  1972. else:
  1973. self._print_verbose(
  1974. "There are no units specified for channel "
  1975. "{}".format(ch_id))
  1976. if nsx_to_load is not None:
  1977. for nsx_nb in nsx_to_load:
  1978. # read nsx data
  1979. nsx_data = \
  1980. self.__nsx_data_reader[self.__nsx_spec[nsx_nb]](nsx_nb)
  1981. # read Analogsignals
  1982. for ch_id in channels:
  1983. anasig = self.__read_analogsignal(
  1984. n_start=n_start,
  1985. n_stop=n_stop,
  1986. signal=nsx_data,
  1987. channel_id=ch_id,
  1988. nsx_nb=nsx_nb,
  1989. scaling=scaling,
  1990. lazy=lazy)
  1991. if anasig is not None:
  1992. seg.analogsignals.append(anasig)
  1993. # TODO: not yet implemented
  1994. # if self._avail_files['sif']:
  1995. # sif_header = self._read_sif(self._filenames['sif'] + '.sif')
  1996. # TODO: not yet implemented
  1997. # if self._avail_files['ccf']:
  1998. # ccf_header = self._read_sif(self._filenames['ccf'] + '.ccf')
  1999. seg.create_many_to_one_relationship()
  2000. return seg
  2001. def read_block(
  2002. self, index=None, name=None, description=None, nsx_to_load='none',
  2003. n_starts=None, n_stops=None, channels='none', units='none',
  2004. load_waveforms=False, load_events=False, scaling='raw',
  2005. lazy=False, cascade=True):
  2006. """
  2007. Args:
  2008. index (None, int):
  2009. If not None, index of block is set to user input.
  2010. name (None, str):
  2011. If None, name is set to default, otherwise it is set to user
  2012. input.
  2013. description (None, str):
  2014. If None, description is set to default, otherwise it is set to
  2015. user input.
  2016. nsx_to_load (int, list, str):
  2017. ID(s) of nsx file(s) from which to load data, e.g., if set to
  2018. 5 only data from the ns5 file are loaded. If 'none' or empty
  2019. list, no nsx files and therefore no analog signals are loaded.
  2020. If 'all', data from all available nsx are loaded.
  2021. n_starts (None, Quantity, list):
  2022. Start times for data in each segment. Number of entries must be
  2023. equal to length of n_stops. If None, intrinsic recording start
  2024. times of files set are used.
  2025. n_stops (None, Quantity, list):
  2026. Stop times for data in each segment. Number of entries must be
  2027. equal to length of n_starts. If None, intrinsic recording stop
  2028. times of files set are used.
  2029. channels (int, list, str):
  2030. Channel id(s) from which to load data. If 'none' or empty list,
  2031. no channels and therefore no analog signal or spiketrains are
  2032. loaded. If 'all', all available channels are loaded.
  2033. units (int, list, str, dict):
  2034. ID(s) of unit(s) to load. If 'none' or empty list, no units and
  2035. therefore no spiketrains are loaded. If 'all', all available
  2036. units are loaded. If dict, the above can be specified
  2037. individually for each channel (keys), e.g. {1: 5, 2: 'all'}
  2038. loads unit 5 from channel 1 and all units from channel 2.
  2039. load_waveforms (boolean):
  2040. If True, waveforms are attached to all loaded spiketrains.
  2041. load_events (boolean):
  2042. If True, all recorded events are loaded.
  2043. scaling (str):
  2044. Determines whether time series of individual
  2045. electrodes/channels are returned as AnalogSignals containing
  2046. raw integer samples ('raw'), or scaled to arrays of floats
  2047. representing voltage ('voltage'). Note that for file
  2048. specification 2.1 and lower, the option 'voltage' requires a
  2049. nev file to be present.
  2050. lazy (bool):
  2051. If True, only the shape of the data is loaded.
  2052. cascade (bool or "lazy"):
  2053. If True, only the block without children is returned.
  2054. Returns:
  2055. Block (neo.segment.Block):
  2056. Block linking all loaded Neo objects.
  2057. Block annotations:
  2058. avail_file_set (list):
  2059. List of extensions of all available files for the given
  2060. recording.
  2061. avail_nsx (list of int):
  2062. List of integers specifying the .nsX files available,
  2063. e.g., [2, 5] indicates that an ns2 and and ns5 file are
  2064. available.
  2065. avail_nev (bool):
  2066. True if a .nev file is available.
  2067. avail_ccf (bool):
  2068. True if a .ccf file is available.
  2069. avail_sif (bool):
  2070. True if a .sif file is available.
  2071. rec_pauses (bool):
  2072. True if the session contains a recording pause (i.e.,
  2073. multiple segments).
  2074. nb_segments (int):
  2075. Number of segments created after merging recording
  2076. times specified by user with the intrinsic ones of the
  2077. file set.
  2078. Segment annotations:
  2079. None.
  2080. ChannelIndex annotations:
  2081. waveform_size (Quantitiy):
  2082. Length of time used to save spike waveforms (in units
  2083. of 1/30000 s).
  2084. nev_hi_freq_corner (Quantitiy),
  2085. nev_lo_freq_corner (Quantitiy),
  2086. nev_hi_freq_order (int), nev_lo_freq_order (int),
  2087. nev_hi_freq_type (str), nev_lo_freq_type (str),
  2088. nev_hi_threshold, nev_lo_threshold,
  2089. nev_energy_threshold (quantity):
  2090. Indicates parameters of spike detection.
  2091. nsx_hi_freq_corner (Quantity),
  2092. nsx_lo_freq_corner (Quantity)
  2093. nsx_hi_freq_order (int), nsx_lo_freq_order (int),
  2094. nsx_hi_freq_type (str), nsx_lo_freq_type (str)
  2095. Indicates parameters of the filtered signal in one of
  2096. the files ns1-ns5 (ns6, if available, is not filtered).
  2097. nev_dig_factor (int):
  2098. Digitization factor in microvolts of the nev file, used
  2099. to convert raw samples to volt.
  2100. connector_ID, connector_pinID (int):
  2101. ID of connector and pin on the connector where the
  2102. channel was recorded from.
  2103. nb_sorted_units (int):
  2104. Number of sorted units on this channel (noise, mua and
  2105. sua).
  2106. Unit annotations:
  2107. unit_id (int):
  2108. ID of the unit.
  2109. channel_id (int):
  2110. Channel ID (Blackrock ID) from which the unit was
  2111. loaded (equiv. to the single list entry in the
  2112. attribute channel_ids of ChannelIndex parent).
  2113. AnalogSignal annotations:
  2114. nsx (int):
  2115. nsX file the signal was loaded from, e.g., 5 indicates
  2116. the .ns5 file.
  2117. channel_id (int):
  2118. Channel ID (Blackrock ID) from which the signal was
  2119. loaded.
  2120. Spiketrain annotations:
  2121. unit_id (int):
  2122. ID of the unit from which the spikes were recorded.
  2123. channel_id (int):
  2124. Channel ID (Blackrock ID) from which the spikes were
  2125. loaded.
  2126. Event annotations:
  2127. The resulting Block contains one Event object with the name
  2128. `digital_input_port`. It contains all digitally recorded
  2129. events, with the event code coded in the labels of the
  2130. Event. The Event object contains no further annotation.
  2131. """
  2132. # Make sure that input args are transformed into correct instances
  2133. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  2134. channels = self.__transform_channels(channels, nsx_to_load)
  2135. units = self.__transform_units(units, channels)
  2136. # Create block
  2137. bl = Block(file_origin=self.filename)
  2138. # set user defined annotations if they were provided
  2139. if index is not None:
  2140. bl.index = index
  2141. if name is None:
  2142. bl.name = "Blackrock Data Block"
  2143. else:
  2144. bl.name = name
  2145. if description is None:
  2146. bl.description = "Block of data from Blackrock file set."
  2147. else:
  2148. bl.description = description
  2149. if self._avail_files['nev']:
  2150. bl.rec_datetime = self.__nev_params('rec_datetime')
  2151. bl.annotate(
  2152. avail_file_set=[k for k, v in self._avail_files.items() if v])
  2153. bl.annotate(avail_nsx=self._avail_nsx)
  2154. bl.annotate(avail_nev=self._avail_files['nev'])
  2155. bl.annotate(avail_sif=self._avail_files['sif'])
  2156. bl.annotate(avail_ccf=self._avail_files['ccf'])
  2157. bl.annotate(rec_pauses=False)
  2158. # Test n_starts and n_stops user requirements and combine them if
  2159. # possible with file internal n_starts and n_stops from rec pauses.
  2160. n_starts, n_stops = \
  2161. self.__merge_time_ranges(n_starts, n_stops, nsx_to_load)
  2162. bl.annotate(nb_segments=len(n_starts))
  2163. if not cascade:
  2164. return bl
  2165. # read segment
  2166. for seg_idx, (n_start, n_stop) in enumerate(zip(n_starts, n_stops)):
  2167. seg = self.read_segment(
  2168. n_start=n_start,
  2169. n_stop=n_stop,
  2170. index=seg_idx,
  2171. nsx_to_load=nsx_to_load,
  2172. channels=channels,
  2173. units=units,
  2174. load_waveforms=load_waveforms,
  2175. load_events=load_events,
  2176. scaling=scaling,
  2177. lazy=lazy,
  2178. cascade=cascade)
  2179. bl.segments.append(seg)
  2180. # read channelindexes
  2181. if channels:
  2182. for ch_id in channels:
  2183. if units and ch_id in units.keys():
  2184. ch_units = units[ch_id]
  2185. else:
  2186. ch_units = None
  2187. chidx = self.__read_channelindex(
  2188. channel_id=ch_id,
  2189. index=0,
  2190. channel_units=ch_units,
  2191. cascade=cascade)
  2192. for seg in bl.segments:
  2193. if ch_units:
  2194. for un in chidx.units:
  2195. sts = seg.filter(
  2196. targdict={'name': un.name},
  2197. objects='SpikeTrain')
  2198. for st in sts:
  2199. un.spiketrains.append(st)
  2200. anasigs = seg.filter(
  2201. targdict={'channel_id': ch_id},
  2202. objects='AnalogSignal')
  2203. for anasig in anasigs:
  2204. chidx.analogsignals.append(anasig)
  2205. bl.channel_indexes.append(chidx)
  2206. bl.create_many_to_one_relationship()
  2207. return bl
  2208. def __str__(self):
  2209. """
  2210. Prints summary of the Blackrock data file set.
  2211. """
  2212. output = "\nFile Origins for Blackrock File Set\n" \
  2213. "====================================\n"
  2214. for ftype in self._filenames.keys():
  2215. output += ftype + ':' + self._filenames[ftype] + '\n'
  2216. if self._avail_files['nev']:
  2217. output += "\nEvent Parameters (NEV)\n" \
  2218. "====================================\n" \
  2219. "Timestamp resolution (Hz): " + \
  2220. str(self.__nev_basic_header['timestamp_resolution']) + \
  2221. "\nWaveform resolution (Hz): " + \
  2222. str(self.__nev_basic_header['sample_resolution'])
  2223. if b'NEUEVWAV' in self.__nev_ext_header.keys():
  2224. avail_el = \
  2225. self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  2226. con = \
  2227. self.__nev_ext_header[b'NEUEVWAV']['physical_connector']
  2228. pin = \
  2229. self.__nev_ext_header[b'NEUEVWAV']['connector_pin']
  2230. nb_units = \
  2231. self.__nev_ext_header[b'NEUEVWAV']['nb_sorted_units']
  2232. output += "\n\nAvailable electrode IDs:\n" \
  2233. "====================================\n"
  2234. for i, el in enumerate(avail_el):
  2235. output += "Electrode ID %i: " % el
  2236. channel_labels = self.__nev_params('channel_labels')
  2237. if channel_labels is not None:
  2238. output += "label %s: " % channel_labels[el]
  2239. output += "connector: %i, " % con[i]
  2240. output += "pin: %i, " % pin[i]
  2241. output += 'nb_units: %i\n' % nb_units[i]
  2242. for nsx_nb in self._avail_nsx:
  2243. analog_res = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  2244. 'sampling_rate', nsx_nb)
  2245. avail_el = [
  2246. el for el in self.__nsx_ext_header[nsx_nb]['electrode_id']]
  2247. output += "\nAnalog Parameters (NS" \
  2248. + str(nsx_nb) + ")\n===================================="
  2249. output += "\nResolution (Hz): %i" % analog_res
  2250. output += "\nAvailable channel IDs: " + \
  2251. ", ".join(["%i" % a for a in avail_el]) + "\n"
  2252. return output