blackrockio_v4.py 103 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567
  1. # -*- coding: utf-8 -*-
  2. """
  3. Module for reading data from files in the Blackrock format.
  4. This module is an older implementation with old neo.io API.
  5. A new class Blackrock compunded by BlackrockRawIO and BaseFromIO
  6. superseed this one.
  7. This work is based on:
  8. * Chris Rodgers - first version
  9. * Michael Denker, Lyuba Zehl - second version
  10. * Samuel Garcia - third version
  11. * Lyuba Zehl, Michael Denker - fourth version
  12. This IO supports reading only.
  13. This IO is able to read:
  14. * the nev file which contains spikes
  15. * ns1, ns2, .., ns6 files that contain signals at different sampling rates
  16. This IO can handle the following Blackrock file specifications:
  17. * 2.1
  18. * 2.2
  19. * 2.3
  20. The neural data channels are 1 - 128.
  21. The analog inputs are 129 - 144. (129 - 137 AC coupled, 138 - 144 DC coupled)
  22. spike- and event-data; 30000 Hz
  23. "ns1": "analog data: 500 Hz",
  24. "ns2": "analog data: 1000 Hz",
  25. "ns3": "analog data: 2000 Hz",
  26. "ns4": "analog data: 10000 Hz",
  27. "ns5": "analog data: 30000 Hz",
  28. "ns6": "analog data: 30000 Hz (no digital filter)"
  29. TODO:
  30. * videosync events (file spec 2.3)
  31. * tracking events (file spec 2.3)
  32. * buttontrigger events (file spec 2.3)
  33. * config events (file spec 2.3)
  34. * check left sweep settings of Blackrock
  35. * check nsx offsets (file spec 2.1)
  36. * add info of nev ext header (NSASEXEX) to non-neural events
  37. (file spec 2.1 and 2.2)
  38. * read sif file information
  39. * read ccf file information
  40. * fix reading of periodic sampling events (non-neural event type)
  41. (file spec 2.1 and 2.2)
  42. """
  43. from __future__ import division
  44. import datetime
  45. import os
  46. import re
  47. import numpy as np
  48. import quantities as pq
  49. import neo
  50. from neo.io.baseio import BaseIO
  51. from neo.core import (Block, Segment, SpikeTrain, Unit, Event,
  52. ChannelIndex, AnalogSignal)
  53. if __name__ == '__main__':
  54. pass
  55. class BlackrockIO(BaseIO):
  56. """
  57. Class for reading data in from a file set recorded by the Blackrock
  58. (Cerebus) recording system.
  59. Upon initialization, the class is linked to the available set of Blackrock
  60. files. Data can be read as a neo Block or neo Segment object using the
  61. read_block or read_segment function, respectively.
  62. Note: This routine will handle files according to specification 2.1, 2.2,
  63. and 2.3. Recording pauses that may occur in file specifications 2.2 and
  64. 2.3 are automatically extracted and the data set is split into different
  65. segments.
  66. Inherits from:
  67. neo.io.BaseIO
  68. The Blackrock data format consists not of a single file, but a set of
  69. different files. This constructor associates itself with a set of files
  70. that constitute a common data set. By default, all files belonging to
  71. the file set have the same base name, but different extensions.
  72. However, by using the override parameters, individual filenames can
  73. be set.
  74. Args:
  75. filename (string):
  76. File name (without extension) of the set of Blackrock files to
  77. associate with. Any .nsX or .nev, .sif, or .ccf extensions are
  78. ignored when parsing this parameter.
  79. nsx_override (string):
  80. File name of the .nsX files (without extension). If None,
  81. filename is used.
  82. Default: None.
  83. nev_override (string):
  84. File name of the .nev file (without extension). If None,
  85. filename is used.
  86. Default: None.
  87. sif_override (string):
  88. File name of the .sif file (without extension). If None,
  89. filename is used.
  90. Default: None.
  91. ccf_override (string):
  92. File name of the .ccf file (without extension). If None,
  93. filename is used.
  94. Default: None.
  95. verbose (boolean):
  96. If True, the class will output additional diagnostic
  97. information on stdout.
  98. Default: False
  99. Returns:
  100. -
  101. Examples:
  102. >>> a = BlackrockIO('myfile')
  103. Loads a set of file consisting of files myfile.ns1, ...,
  104. myfile.ns6, and myfile.nev
  105. >>> b = BlackrockIO('myfile', nev_override='sorted')
  106. Loads the analog data from the set of files myfile.ns1, ...,
  107. myfile.ns6, but reads spike/event data from sorted.nev
  108. """
  109. # Class variables demonstrating capabilities of this IO
  110. is_readable = True
  111. is_writable = False
  112. # This IO can only manipulate continuous data, spikes, and events
  113. supported_objects = [
  114. Block, Segment, Event, AnalogSignal, SpikeTrain, Unit, ChannelIndex]
  115. readable_objects = [Block, Segment]
  116. writeable_objects = []
  117. has_header = False
  118. is_streameable = False
  119. read_params = {
  120. neo.Block: [
  121. ('nsx_to_load', {
  122. 'value': 'none',
  123. 'label': "List of nsx files (ids, int) to read."}),
  124. ('n_starts', {
  125. 'value': None,
  126. 'label': "List of n_start points (Quantity) to create "
  127. "segments from."}),
  128. ('n_stops', {
  129. 'value': None,
  130. 'label': "List of n_stop points (Quantity) to create "
  131. "segments from."}),
  132. ('channels', {
  133. 'value': 'none',
  134. 'label': "List of channels (ids, int) to load data from."}),
  135. ('units', {
  136. 'value': 'none',
  137. 'label': "Dictionary for units (values, list of int) to load "
  138. "for each channel (key, int)."}),
  139. ('load_waveforms', {
  140. 'value': False,
  141. 'label': "States if waveforms should be loaded and attached "
  142. "to spiketrain"}),
  143. ('load_events', {
  144. 'value': False,
  145. 'label': "States if events should be loaded."})],
  146. neo.Segment: [
  147. ('n_start', {
  148. 'label': "Start time point (Quantity) for segment"}),
  149. ('n_stop', {
  150. 'label': "Stop time point (Quantity) for segment"}),
  151. ('nsx_to_load', {
  152. 'value': 'none',
  153. 'label': "List of nsx files (ids, int) to read."}),
  154. ('channels', {
  155. 'value': 'none',
  156. 'label': "List of channels (ids, int) to load data from."}),
  157. ('units', {
  158. 'value': 'none',
  159. 'label': "Dictionary for units (values, list of int) to load "
  160. "for each channel (key, int)."}),
  161. ('load_waveforms', {
  162. 'value': False,
  163. 'label': "States if waveforms should be loaded and attached "
  164. "to spiketrain"}),
  165. ('load_events', {
  166. 'value': False,
  167. 'label': "States if events should be loaded."})]}
  168. write_params = {}
  169. name = 'Blackrock IO'
  170. description = "This IO reads .nev/.nsX file of the Blackrock " + \
  171. "(Cerebus) recordings system."
  172. # The possible file extensions of the Cerebus system and their content:
  173. # ns1: contains analog data; sampled at 500 Hz (+ digital filters)
  174. # ns2: contains analog data; sampled at 1000 Hz (+ digital filters)
  175. # ns3: contains analog data; sampled at 2000 Hz (+ digital filters)
  176. # ns4: contains analog data; sampled at 10000 Hz (+ digital filters)
  177. # ns5: contains analog data; sampled at 30000 Hz (+ digital filters)
  178. # ns6: contains analog data; sampled at 30000 Hz (no digital filters)
  179. # nev: contains spike- and event-data; sampled at 30000 Hz
  180. # sif: contains institution and patient info (XML)
  181. # ccf: contains Cerebus configurations
  182. extensions = ['ns' + str(_) for _ in range(1, 7)]
  183. extensions.extend(['nev', 'sif', 'ccf'])
  184. mode = 'file'
  185. def __init__(self, filename, nsx_override=None, nev_override=None,
  186. sif_override=None, ccf_override=None, verbose=False):
  187. """
  188. Initialize the BlackrockIO class.
  189. """
  190. BaseIO.__init__(self)
  191. # Used to avoid unnecessary repetition of verbose messages
  192. self.__verbose_messages = []
  193. # remove extension from base _filenames
  194. for ext in self.extensions:
  195. self.filename = re.sub(
  196. os.path.extsep + ext + '$', '', filename)
  197. # remove extensions from overrides
  198. self._filenames = {}
  199. if nsx_override:
  200. self._filenames['nsx'] = re.sub(
  201. os.path.extsep + r'ns[1,2,3,4,5,6]$', '', nsx_override)
  202. else:
  203. self._filenames['nsx'] = self.filename
  204. if nev_override:
  205. self._filenames['nev'] = re.sub(
  206. os.path.extsep + r'nev$', '', nev_override)
  207. else:
  208. self._filenames['nev'] = self.filename
  209. if sif_override:
  210. self._filenames['sif'] = re.sub(
  211. os.path.extsep + r'sif$', '', sif_override)
  212. else:
  213. self._filenames['sif'] = self.filename
  214. if ccf_override:
  215. self._filenames['ccf'] = re.sub(
  216. os.path.extsep + r'ccf$', '', ccf_override)
  217. else:
  218. self._filenames['ccf'] = self.filename
  219. # check which files are available
  220. self._avail_files = dict.fromkeys(self.extensions, False)
  221. self._avail_nsx = []
  222. for ext in self.extensions:
  223. if ext.startswith('ns'):
  224. file2check = ''.join(
  225. [self._filenames['nsx'], os.path.extsep, ext])
  226. else:
  227. file2check = ''.join(
  228. [self._filenames[ext], os.path.extsep, ext])
  229. if os.path.exists(file2check):
  230. self._print_verbose("Found " + file2check + ".")
  231. self._avail_files[ext] = True
  232. if ext.startswith('ns'):
  233. self._avail_nsx.append(int(ext[-1]))
  234. # check if there are any files present
  235. if not any(list(self._avail_files.values())):
  236. raise IOError(
  237. 'No Blackrock files present at {}'.format(filename))
  238. # check if manually specified files were found
  239. exts = ['nsx', 'nev', 'sif', 'ccf']
  240. ext_overrides = [nsx_override, nev_override, sif_override, ccf_override]
  241. for ext, ext_override in zip(exts, ext_overrides):
  242. if ext_override is not None and self._avail_files[ext] is False:
  243. raise ValueError('Specified {} file {} could not be '
  244. 'found.'.format(ext, ext_override))
  245. # These dictionaries are used internally to map the file specification
  246. # revision of the nsx and nev files to one of the reading routines
  247. self.__nsx_header_reader = {
  248. '2.1': self.__read_nsx_header_variant_a,
  249. '2.2': self.__read_nsx_header_variant_b,
  250. '2.3': self.__read_nsx_header_variant_b}
  251. self.__nsx_dataheader_reader = {
  252. '2.1': self.__read_nsx_dataheader_variant_a,
  253. '2.2': self.__read_nsx_dataheader_variant_b,
  254. '2.3': self.__read_nsx_dataheader_variant_b}
  255. self.__nsx_data_reader = {
  256. '2.1': self.__read_nsx_data_variant_a,
  257. '2.2': self.__read_nsx_data_variant_b,
  258. '2.3': self.__read_nsx_data_variant_b}
  259. self.__nev_header_reader = {
  260. '2.1': self.__read_nev_header_variant_a,
  261. '2.2': self.__read_nev_header_variant_b,
  262. '2.3': self.__read_nev_header_variant_c}
  263. self.__nev_data_reader = {
  264. '2.1': self.__read_nev_data_variant_a,
  265. '2.2': self.__read_nev_data_variant_a,
  266. '2.3': self.__read_nev_data_variant_b}
  267. self.__nsx_params = {
  268. '2.1': self.__get_nsx_param_variant_a,
  269. '2.2': self.__get_nsx_param_variant_b,
  270. '2.3': self.__get_nsx_param_variant_b}
  271. self.__nsx_databl_param = {
  272. '2.1': self.__get_nsx_databl_param_variant_a,
  273. '2.2': self.__get_nsx_databl_param_variant_b,
  274. '2.3': self.__get_nsx_databl_param_variant_b}
  275. self.__waveform_size = {
  276. '2.1': self.__get_waveform_size_variant_a,
  277. '2.2': self.__get_waveform_size_variant_a,
  278. '2.3': self.__get_waveform_size_variant_b}
  279. self.__channel_labels = {
  280. '2.1': self.__get_channel_labels_variant_a,
  281. '2.2': self.__get_channel_labels_variant_b,
  282. '2.3': self.__get_channel_labels_variant_b}
  283. self.__nsx_rec_times = {
  284. '2.1': self.__get_nsx_rec_times_variant_a,
  285. '2.2': self.__get_nsx_rec_times_variant_b,
  286. '2.3': self.__get_nsx_rec_times_variant_b}
  287. self.__nonneural_evtypes = {
  288. '2.1': self.__get_nonneural_evtypes_variant_a,
  289. '2.2': self.__get_nonneural_evtypes_variant_a,
  290. '2.3': self.__get_nonneural_evtypes_variant_b}
  291. # Load file spec and headers of available nev file
  292. if self._avail_files['nev']:
  293. # read nev file specification
  294. self.__nev_spec = self.__extract_nev_file_spec()
  295. self._print_verbose('Specification Version ' + self.__nev_spec)
  296. # read nev headers
  297. self.__nev_basic_header, self.__nev_ext_header = \
  298. self.__nev_header_reader[self.__nev_spec]()
  299. # Load file spec and headers of available nsx files
  300. self.__nsx_spec = {}
  301. self.__nsx_basic_header = {}
  302. self.__nsx_ext_header = {}
  303. self.__nsx_data_header = {}
  304. for nsx_nb in self._avail_nsx:
  305. # read nsx file specification
  306. self.__nsx_spec[nsx_nb] = self.__extract_nsx_file_spec(nsx_nb)
  307. # read nsx headers
  308. self.__nsx_basic_header[nsx_nb], self.__nsx_ext_header[nsx_nb] = \
  309. self.__nsx_header_reader[self.__nsx_spec[nsx_nb]](nsx_nb)
  310. # Read nsx data header(s) for nsx
  311. self.__nsx_data_header[nsx_nb] = self.__nsx_dataheader_reader[
  312. self.__nsx_spec[nsx_nb]](nsx_nb)
  313. def _print_verbose(self, text):
  314. """
  315. Print a verbose diagnostic message (string).
  316. """
  317. if self.__verbose_messages:
  318. if text not in self.__verbose_messages:
  319. self.__verbose_messages.append(text)
  320. print(str(self.__class__.__name__) + ': ' + text)
  321. def __extract_nsx_file_spec(self, nsx_nb):
  322. """
  323. Extract file specification from an .nsx file.
  324. """
  325. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  326. # Header structure of files specification 2.2 and higher. For files 2.1
  327. # and lower, the entries ver_major and ver_minor are not supported.
  328. dt0 = [
  329. ('file_id', 'S8'),
  330. ('ver_major', 'uint8'),
  331. ('ver_minor', 'uint8')]
  332. nsx_file_id = np.fromfile(filename, count=1, dtype=dt0)[0]
  333. if nsx_file_id['file_id'].decode() == 'NEURALSG':
  334. spec = '2.1'
  335. elif nsx_file_id['file_id'].decode() == 'NEURALCD':
  336. spec = '{0}.{1}'.format(
  337. nsx_file_id['ver_major'], nsx_file_id['ver_minor'])
  338. else:
  339. raise IOError('Unsupported NSX file type.')
  340. return spec
  341. def __extract_nev_file_spec(self):
  342. """
  343. Extract file specification from an .nev file
  344. """
  345. filename = '.'.join([self._filenames['nev'], 'nev'])
  346. # Header structure of files specification 2.2 and higher. For files 2.1
  347. # and lower, the entries ver_major and ver_minor are not supported.
  348. dt0 = [
  349. ('file_id', 'S8'),
  350. ('ver_major', 'uint8'),
  351. ('ver_minor', 'uint8')]
  352. nev_file_id = np.fromfile(filename, count=1, dtype=dt0)[0]
  353. if nev_file_id['file_id'].decode() == 'NEURALEV':
  354. spec = '{0}.{1}'.format(
  355. nev_file_id['ver_major'], nev_file_id['ver_minor'])
  356. else:
  357. raise IOError('NEV file type {0} is not supported'.format(
  358. nev_file_id['file_id']))
  359. return spec
  360. def __read_nsx_header_variant_a(self, nsx_nb):
  361. """
  362. Extract nsx header information from a 2.1 .nsx file
  363. """
  364. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  365. # basic header (file_id: NEURALCD)
  366. dt0 = [
  367. ('file_id', 'S8'),
  368. # label of sampling groun (e.g. "1kS/s" or "LFP Low")
  369. ('label', 'S16'),
  370. # number of 1/30000 seconds between data points
  371. # (e.g., if sampling rate "1 kS/s", period equals "30")
  372. ('period', 'uint32'),
  373. ('channel_count', 'uint32')]
  374. nsx_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  375. # "extended" header (last field of file_id: NEURALCD)
  376. # (to facilitate compatibility with higher file specs)
  377. offset_dt0 = np.dtype(dt0).itemsize
  378. shape = nsx_basic_header['channel_count']
  379. # originally called channel_id in Blackrock user manual
  380. # (to facilitate compatibility with higher file specs)
  381. dt1 = [('electrode_id', 'uint32')]
  382. nsx_ext_header = np.memmap(
  383. filename, mode='r', shape=shape, offset=offset_dt0, dtype=dt1)
  384. return nsx_basic_header, nsx_ext_header
  385. def __read_nsx_header_variant_b(self, nsx_nb):
  386. """
  387. Extract nsx header information from a 2.2 or 2.3 .nsx file
  388. """
  389. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  390. # basic header (file_id: NEURALCD)
  391. dt0 = [
  392. ('file_id', 'S8'),
  393. # file specification split into major and minor version number
  394. ('ver_major', 'uint8'),
  395. ('ver_minor', 'uint8'),
  396. # bytes of basic & extended header
  397. ('bytes_in_headers', 'uint32'),
  398. # label of the sampling group (e.g., "1 kS/s" or "LFP low")
  399. ('label', 'S16'),
  400. ('comment', 'S256'),
  401. ('period', 'uint32'),
  402. ('timestamp_resolution', 'uint32'),
  403. # time origin: 2byte uint16 values for ...
  404. ('year', 'uint16'),
  405. ('month', 'uint16'),
  406. ('weekday', 'uint16'),
  407. ('day', 'uint16'),
  408. ('hour', 'uint16'),
  409. ('minute', 'uint16'),
  410. ('second', 'uint16'),
  411. ('millisecond', 'uint16'),
  412. # number of channel_count match number of extended headers
  413. ('channel_count', 'uint32')]
  414. nsx_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  415. # extended header (type: CC)
  416. offset_dt0 = np.dtype(dt0).itemsize
  417. shape = nsx_basic_header['channel_count']
  418. dt1 = [
  419. ('type', 'S2'),
  420. ('electrode_id', 'uint16'),
  421. ('electrode_label', 'S16'),
  422. # used front-end amplifier bank (e.g., A, B, C, D)
  423. ('physical_connector', 'uint8'),
  424. # used connector pin (e.g., 1-37 on bank A, B, C or D)
  425. ('connector_pin', 'uint8'),
  426. # digital and analog value ranges of the signal
  427. ('min_digital_val', 'int16'),
  428. ('max_digital_val', 'int16'),
  429. ('min_analog_val', 'int16'),
  430. ('max_analog_val', 'int16'),
  431. # units of the analog range values ("mV" or "uV")
  432. ('units', 'S16'),
  433. # filter settings used to create nsx from source signal
  434. ('hi_freq_corner', 'uint32'),
  435. ('hi_freq_order', 'uint32'),
  436. ('hi_freq_type', 'uint16'), # 0=None, 1=Butterworth
  437. ('lo_freq_corner', 'uint32'),
  438. ('lo_freq_order', 'uint32'),
  439. ('lo_freq_type', 'uint16')] # 0=None, 1=Butterworth
  440. nsx_ext_header = np.memmap(
  441. filename, mode='r', shape=shape, offset=offset_dt0, dtype=dt1)
  442. return nsx_basic_header, nsx_ext_header
  443. def __read_nsx_dataheader(self, nsx_nb, offset):
  444. """
  445. Reads data header following the given offset of an nsx file.
  446. """
  447. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  448. # dtypes data header
  449. dt2 = [
  450. ('header', 'uint8'),
  451. ('timestamp', 'uint32'),
  452. ('nb_data_points', 'uint32')]
  453. return np.memmap(
  454. filename, mode='r', dtype=dt2, shape=1, offset=offset)[0]
  455. def __read_nsx_dataheader_variant_a(
  456. self, nsx_nb, filesize=None, offset=None):
  457. """
  458. Reads None for the nsx data header of file spec 2.1. Introduced to
  459. facilitate compatibility with higher file spec.
  460. """
  461. return None
  462. def __read_nsx_dataheader_variant_b(
  463. self, nsx_nb, filesize=None, offset=None, ):
  464. """
  465. Reads the nsx data header for each data block following the offset of
  466. file spec 2.2 and 2.3.
  467. """
  468. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  469. filesize = self.__get_file_size(filename)
  470. data_header = {}
  471. index = 0
  472. if offset is None:
  473. offset = self.__nsx_basic_header[nsx_nb]['bytes_in_headers']
  474. while offset < filesize:
  475. index += 1
  476. dh = self.__read_nsx_dataheader(nsx_nb, offset)
  477. data_header[index] = {
  478. 'header': dh['header'],
  479. 'timestamp': dh['timestamp'],
  480. 'nb_data_points': dh['nb_data_points'],
  481. 'offset_to_data_block': offset + dh.dtype.itemsize}
  482. # data size = number of data points * (2bytes * number of channels)
  483. # use of `int` avoids overflow problem
  484. data_size = int(dh['nb_data_points']) * \
  485. int(self.__nsx_basic_header[nsx_nb]['channel_count']) * 2
  486. # define new offset (to possible next data block)
  487. offset = data_header[index]['offset_to_data_block'] + data_size
  488. return data_header
  489. def __read_nsx_data_variant_a(self, nsx_nb):
  490. """
  491. Extract nsx data from a 2.1 .nsx file
  492. """
  493. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  494. # get shape of data
  495. shape = (
  496. self.__nsx_databl_param['2.1']('nb_data_points', nsx_nb),
  497. self.__nsx_basic_header[nsx_nb]['channel_count'])
  498. offset = self.__nsx_params['2.1']('bytes_in_headers', nsx_nb)
  499. # read nsx data
  500. # store as dict for compatibility with higher file specs
  501. data = {1: np.memmap(
  502. filename, mode='r', dtype='int16', shape=shape, offset=offset)}
  503. return data
  504. def __read_nsx_data_variant_b(self, nsx_nb):
  505. """
  506. Extract nsx data (blocks) from a 2.2 or 2.3 .nsx file. Blocks can arise
  507. if the recording was paused by the user.
  508. """
  509. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  510. data = {}
  511. for data_bl in self.__nsx_data_header[nsx_nb].keys():
  512. # get shape and offset of data
  513. shape = (
  514. self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points'],
  515. self.__nsx_basic_header[nsx_nb]['channel_count'])
  516. offset = \
  517. self.__nsx_data_header[nsx_nb][data_bl]['offset_to_data_block']
  518. # read data
  519. data[data_bl] = np.memmap(
  520. filename, mode='r', dtype='int16', shape=shape, offset=offset)
  521. return data
  522. def __read_nev_header(self, ext_header_variants):
  523. """
  524. Extract nev header information from a 2.1 .nsx file
  525. """
  526. filename = '.'.join([self._filenames['nev'], 'nev'])
  527. # basic header
  528. dt0 = [
  529. # Set to "NEURALEV"
  530. ('file_type_id', 'S8'),
  531. ('ver_major', 'uint8'),
  532. ('ver_minor', 'uint8'),
  533. # Flags
  534. ('additionnal_flags', 'uint16'),
  535. # File index of first data sample
  536. ('bytes_in_headers', 'uint32'),
  537. # Number of bytes per data packet (sample)
  538. ('bytes_in_data_packets', 'uint32'),
  539. # Time resolution of time stamps in Hz
  540. ('timestamp_resolution', 'uint32'),
  541. # Sampling frequency of waveforms in Hz
  542. ('sample_resolution', 'uint32'),
  543. ('year', 'uint16'),
  544. ('month', 'uint16'),
  545. ('weekday', 'uint16'),
  546. ('day', 'uint16'),
  547. ('hour', 'uint16'),
  548. ('minute', 'uint16'),
  549. ('second', 'uint16'),
  550. ('millisecond', 'uint16'),
  551. ('application_to_create_file', 'S32'),
  552. ('comment_field', 'S256'),
  553. # Number of extended headers
  554. ('nb_ext_headers', 'uint32')]
  555. nev_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  556. # extended header
  557. # this consist in N block with code 8bytes + 24 data bytes
  558. # the data bytes depend on the code and need to be converted
  559. # cafilename_nsx, segse by case
  560. shape = nev_basic_header['nb_ext_headers']
  561. offset_dt0 = np.dtype(dt0).itemsize
  562. # This is the common structure of the beginning of extended headers
  563. dt1 = [
  564. ('packet_id', 'S8'),
  565. ('info_field', 'S24')]
  566. raw_ext_header = np.memmap(
  567. filename, mode='r', offset=offset_dt0, dtype=dt1, shape=shape)
  568. nev_ext_header = {}
  569. for packet_id in ext_header_variants.keys():
  570. mask = (raw_ext_header['packet_id'] == packet_id)
  571. dt2 = self.__nev_ext_header_types()[packet_id][
  572. ext_header_variants[packet_id]]
  573. nev_ext_header[packet_id] = raw_ext_header.view(dt2)[mask]
  574. return nev_basic_header, nev_ext_header
  575. def __read_nev_header_variant_a(self):
  576. """
  577. Extract nev header information from a 2.1 .nev file
  578. """
  579. ext_header_variants = {
  580. b'NEUEVWAV': 'a',
  581. b'ARRAYNME': 'a',
  582. b'ECOMMENT': 'a',
  583. b'CCOMMENT': 'a',
  584. b'MAPFILE': 'a',
  585. b'NSASEXEV': 'a'}
  586. return self.__read_nev_header(ext_header_variants)
  587. def __read_nev_header_variant_b(self):
  588. """
  589. Extract nev header information from a 2.2 .nev file
  590. """
  591. ext_header_variants = {
  592. b'NEUEVWAV': 'b',
  593. b'ARRAYNME': 'a',
  594. b'ECOMMENT': 'a',
  595. b'CCOMMENT': 'a',
  596. b'MAPFILE': 'a',
  597. b'NEUEVLBL': 'a',
  598. b'NEUEVFLT': 'a',
  599. b'DIGLABEL': 'a',
  600. b'NSASEXEV': 'a'}
  601. return self.__read_nev_header(ext_header_variants)
  602. def __read_nev_header_variant_c(self):
  603. """
  604. Extract nev header information from a 2.3 .nev file
  605. """
  606. ext_header_variants = {
  607. b'NEUEVWAV': 'b',
  608. b'ARRAYNME': 'a',
  609. b'ECOMMENT': 'a',
  610. b'CCOMMENT': 'a',
  611. b'MAPFILE': 'a',
  612. b'NEUEVLBL': 'a',
  613. b'NEUEVFLT': 'a',
  614. b'DIGLABEL': 'a',
  615. b'VIDEOSYN': 'a',
  616. b'TRACKOBJ': 'a'}
  617. return self.__read_nev_header(ext_header_variants)
  618. def __read_nev_data(self, nev_data_masks, nev_data_types):
  619. """
  620. Extract nev data from a 2.1 or 2.2 .nev file
  621. """
  622. filename = '.'.join([self._filenames['nev'], 'nev'])
  623. data_size = self.__nev_basic_header['bytes_in_data_packets']
  624. header_size = self.__nev_basic_header['bytes_in_headers']
  625. # read all raw data packets and markers
  626. dt0 = [
  627. ('timestamp', 'uint32'),
  628. ('packet_id', 'uint16'),
  629. ('value', 'S{0}'.format(data_size - 6))]
  630. raw_data = np.memmap(filename, mode='r', offset=header_size, dtype=dt0)
  631. masks = self.__nev_data_masks(raw_data['packet_id'])
  632. types = self.__nev_data_types(data_size)
  633. data = {}
  634. for k, v in nev_data_masks.items():
  635. data[k] = raw_data.view(types[k][nev_data_types[k]])[masks[k][v]]
  636. return data
  637. def __read_nev_data_variant_a(self):
  638. """
  639. Extract nev data from a 2.1 & 2.2 .nev file
  640. """
  641. nev_data_masks = {
  642. 'NonNeural': 'a',
  643. 'Spikes': 'a'}
  644. nev_data_types = {
  645. 'NonNeural': 'a',
  646. 'Spikes': 'a'}
  647. return self.__read_nev_data(nev_data_masks, nev_data_types)
  648. def __read_nev_data_variant_b(self):
  649. """
  650. Extract nev data from a 2.3 .nev file
  651. """
  652. nev_data_masks = {
  653. 'NonNeural': 'a',
  654. 'Spikes': 'b',
  655. 'Comments': 'a',
  656. 'VideoSync': 'a',
  657. 'TrackingEvents': 'a',
  658. 'ButtonTrigger': 'a',
  659. 'ConfigEvent': 'a'}
  660. nev_data_types = {
  661. 'NonNeural': 'b',
  662. 'Spikes': 'a',
  663. 'Comments': 'a',
  664. 'VideoSync': 'a',
  665. 'TrackingEvents': 'a',
  666. 'ButtonTrigger': 'a',
  667. 'ConfigEvent': 'a'}
  668. return self.__read_nev_data(nev_data_masks, nev_data_types)
  669. def __nev_ext_header_types(self):
  670. """
  671. Defines extended header types for different .nev file specifications.
  672. """
  673. nev_ext_header_types = {
  674. b'NEUEVWAV': {
  675. # Version>=2.1
  676. 'a': [
  677. ('packet_id', 'S8'),
  678. ('electrode_id', 'uint16'),
  679. ('physical_connector', 'uint8'),
  680. ('connector_pin', 'uint8'),
  681. ('digitization_factor', 'uint16'),
  682. ('energy_threshold', 'uint16'),
  683. ('hi_threshold', 'int16'),
  684. ('lo_threshold', 'int16'),
  685. ('nb_sorted_units', 'uint8'),
  686. # number of bytes per waveform sample
  687. ('bytes_per_waveform', 'uint8'),
  688. ('unused', 'S10')],
  689. # Version>=2.3
  690. 'b': [
  691. ('packet_id', 'S8'),
  692. ('electrode_id', 'uint16'),
  693. ('physical_connector', 'uint8'),
  694. ('connector_pin', 'uint8'),
  695. ('digitization_factor', 'uint16'),
  696. ('energy_threshold', 'uint16'),
  697. ('hi_threshold', 'int16'),
  698. ('lo_threshold', 'int16'),
  699. ('nb_sorted_units', 'uint8'),
  700. # number of bytes per waveform sample
  701. ('bytes_per_waveform', 'uint8'),
  702. # number of samples for each waveform
  703. ('spike_width', 'uint16'),
  704. ('unused', 'S8')]},
  705. b'ARRAYNME': {
  706. 'a': [
  707. ('packet_id', 'S8'),
  708. ('electrode_array_name', 'S24')]},
  709. b'ECOMMENT': {
  710. 'a': [
  711. ('packet_id', 'S8'),
  712. ('extra_comment', 'S24')]},
  713. b'CCOMMENT': {
  714. 'a': [
  715. ('packet_id', 'S8'),
  716. ('continued_comment', 'S24')]},
  717. b'MAPFILE': {
  718. 'a': [
  719. ('packet_id', 'S8'),
  720. ('mapFile', 'S24')]},
  721. b'NEUEVLBL': {
  722. 'a': [
  723. ('packet_id', 'S8'),
  724. ('electrode_id', 'uint16'),
  725. # label of this electrode
  726. ('label', 'S16'),
  727. ('unused', 'S6')]},
  728. b'NEUEVFLT': {
  729. 'a': [
  730. ('packet_id', 'S8'),
  731. ('electrode_id', 'uint16'),
  732. ('hi_freq_corner', 'uint32'),
  733. ('hi_freq_order', 'uint32'),
  734. # 0=None 1=Butterworth
  735. ('hi_freq_type', 'uint16'),
  736. ('lo_freq_corner', 'uint32'),
  737. ('lo_freq_order', 'uint32'),
  738. # 0=None 1=Butterworth
  739. ('lo_freq_type', 'uint16'),
  740. ('unused', 'S2')]},
  741. b'DIGLABEL': {
  742. 'a': [
  743. ('packet_id', 'S8'),
  744. # Read name of digital
  745. ('label', 'S16'),
  746. # 0=serial, 1=parallel
  747. ('mode', 'uint8'),
  748. ('unused', 'S7')]},
  749. b'NSASEXEV': {
  750. 'a': [
  751. ('packet_id', 'S8'),
  752. # Read frequency of periodic packet generation
  753. ('frequency', 'uint16'),
  754. # Read if digital input triggers events
  755. ('digital_input_config', 'uint8'),
  756. # Read if analog input triggers events
  757. ('analog_channel_1_config', 'uint8'),
  758. ('analog_channel_1_edge_detec_val', 'uint16'),
  759. ('analog_channel_2_config', 'uint8'),
  760. ('analog_channel_2_edge_detec_val', 'uint16'),
  761. ('analog_channel_3_config', 'uint8'),
  762. ('analog_channel_3_edge_detec_val', 'uint16'),
  763. ('analog_channel_4_config', 'uint8'),
  764. ('analog_channel_4_edge_detec_val', 'uint16'),
  765. ('analog_channel_5_config', 'uint8'),
  766. ('analog_channel_5_edge_detec_val', 'uint16'),
  767. ('unused', 'S6')]},
  768. b'VIDEOSYN': {
  769. 'a': [
  770. ('packet_id', 'S8'),
  771. ('video_source_id', 'uint16'),
  772. ('video_source', 'S16'),
  773. ('frame_rate', 'float32'),
  774. ('unused', 'S2')]},
  775. b'TRACKOBJ': {
  776. 'a': [
  777. ('packet_id', 'S8'),
  778. ('trackable_type', 'uint16'),
  779. ('trackable_id', 'uint16'),
  780. ('point_count', 'uint16'),
  781. ('video_source', 'S16'),
  782. ('unused', 'S2')]}}
  783. return nev_ext_header_types
  784. def __nev_data_masks(self, packet_ids):
  785. """
  786. Defines data masks for different .nev file specifications depending on
  787. the given packet identifiers.
  788. """
  789. __nev_data_masks = {
  790. 'NonNeural': {
  791. 'a': (packet_ids == 0)},
  792. 'Spikes': {
  793. # Version 2.1 & 2.2
  794. 'a': (0 < packet_ids) & (packet_ids <= 255),
  795. # Version>=2.3
  796. 'b': (0 < packet_ids) & (packet_ids <= 2048)},
  797. 'Comments': {
  798. 'a': (packet_ids == 0xFFFF)},
  799. 'VideoSync': {
  800. 'a': (packet_ids == 0xFFFE)},
  801. 'TrackingEvents': {
  802. 'a': (packet_ids == 0xFFFD)},
  803. 'ButtonTrigger': {
  804. 'a': (packet_ids == 0xFFFC)},
  805. 'ConfigEvent': {
  806. 'a': (packet_ids == 0xFFFB)}}
  807. return __nev_data_masks
  808. def __nev_data_types(self, data_size):
  809. """
  810. Defines data types for different .nev file specifications depending on
  811. the given packet identifiers.
  812. """
  813. __nev_data_types = {
  814. 'NonNeural': {
  815. # Version 2.1 & 2.2
  816. 'a': [
  817. ('timestamp', 'uint32'),
  818. ('packet_id', 'uint16'),
  819. ('packet_insertion_reason', 'uint8'),
  820. ('reserved', 'uint8'),
  821. ('digital_input', 'uint16'),
  822. ('analog_input_channel_1', 'int16'),
  823. ('analog_input_channel_2', 'int16'),
  824. ('analog_input_channel_3', 'int16'),
  825. ('analog_input_channel_4', 'int16'),
  826. ('analog_input_channel_5', 'int16'),
  827. ('unused', 'S{0}'.format(data_size - 20))],
  828. # Version>=2.3
  829. 'b': [
  830. ('timestamp', 'uint32'),
  831. ('packet_id', 'uint16'),
  832. ('packet_insertion_reason', 'uint8'),
  833. ('reserved', 'uint8'),
  834. ('digital_input', 'uint16'),
  835. ('unused', 'S{0}'.format(data_size - 10))]},
  836. 'Spikes': {
  837. 'a': [
  838. ('timestamp', 'uint32'),
  839. ('packet_id', 'uint16'),
  840. ('unit_class_nb', 'uint8'),
  841. ('reserved', 'uint8'),
  842. ('waveform', 'S{0}'.format(data_size - 8))]},
  843. 'Comments': {
  844. 'a': [
  845. ('timestamp', 'uint32'),
  846. ('packet_id', 'uint16'),
  847. ('char_set', 'uint8'),
  848. ('flag', 'uint8'),
  849. ('data', 'uint32'),
  850. ('comment', 'S{0}'.format(data_size - 12))]},
  851. 'VideoSync': {
  852. 'a': [
  853. ('timestamp', 'uint32'),
  854. ('packet_id', 'uint16'),
  855. ('video_file_nb', 'uint16'),
  856. ('video_frame_nb', 'uint32'),
  857. ('video_elapsed_time', 'uint32'),
  858. ('video_source_id', 'uint32'),
  859. ('unused', 'int8', (data_size - 20,))]},
  860. 'TrackingEvents': {
  861. 'a': [
  862. ('timestamp', 'uint32'),
  863. ('packet_id', 'uint16'),
  864. ('parent_id', 'uint16'),
  865. ('node_id', 'uint16'),
  866. ('node_count', 'uint16'),
  867. ('point_count', 'uint16'),
  868. ('tracking_points', 'uint16', ((data_size - 14) // 2,))]},
  869. 'ButtonTrigger': {
  870. 'a': [
  871. ('timestamp', 'uint32'),
  872. ('packet_id', 'uint16'),
  873. ('trigger_type', 'uint16'),
  874. ('unused', 'int8', (data_size - 8,))]},
  875. 'ConfigEvent': {
  876. 'a': [
  877. ('timestamp', 'uint32'),
  878. ('packet_id', 'uint16'),
  879. ('config_change_type', 'uint16'),
  880. ('config_changed', 'S{0}'.format(data_size - 8))]}}
  881. return __nev_data_types
  882. def __nev_params(self, param_name):
  883. """
  884. Returns wanted nev parameter.
  885. """
  886. nev_parameters = {
  887. 'bytes_in_data_packets':
  888. self.__nev_basic_header['bytes_in_data_packets'],
  889. 'rec_datetime': datetime.datetime(
  890. year=self.__nev_basic_header['year'],
  891. month=self.__nev_basic_header['month'],
  892. day=self.__nev_basic_header['day'],
  893. hour=self.__nev_basic_header['hour'],
  894. minute=self.__nev_basic_header['minute'],
  895. second=self.__nev_basic_header['second'],
  896. microsecond=self.__nev_basic_header['millisecond']),
  897. 'max_res': self.__nev_basic_header['timestamp_resolution'],
  898. 'channel_ids': self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  899. 'channel_labels': self.__channel_labels[self.__nev_spec](),
  900. 'event_unit': pq.CompoundUnit("1.0/{0} * s".format(
  901. self.__nev_basic_header['timestamp_resolution'])),
  902. 'nb_units': dict(zip(
  903. self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  904. self.__nev_ext_header[b'NEUEVWAV']['nb_sorted_units'])),
  905. 'digitization_factor': dict(zip(
  906. self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  907. self.__nev_ext_header[b'NEUEVWAV']['digitization_factor'])),
  908. 'data_size': self.__nev_basic_header['bytes_in_data_packets'],
  909. 'waveform_size': self.__waveform_size[self.__nev_spec](),
  910. 'waveform_dtypes': self.__get_waveforms_dtype(),
  911. 'waveform_sampling_rate':
  912. self.__nev_basic_header['sample_resolution'] * pq.Hz,
  913. 'waveform_time_unit': pq.CompoundUnit("1.0/{0} * s".format(
  914. self.__nev_basic_header['sample_resolution'])),
  915. 'waveform_unit': pq.uV}
  916. return nev_parameters[param_name]
  917. def __get_file_size(self, filename):
  918. """
  919. Returns the file size in bytes for the given file.
  920. """
  921. filebuf = open(filename, 'rb')
  922. filebuf.seek(0, os.SEEK_END)
  923. file_size = filebuf.tell()
  924. filebuf.close()
  925. return file_size
  926. def __get_min_time(self):
  927. """
  928. Returns the smallest time that can be determined from the recording for
  929. use as the lower bound n in an interval [n,m).
  930. """
  931. tp = []
  932. if self._avail_files['nev']:
  933. tp.extend(self.__get_nev_rec_times()[0])
  934. for nsx_i in self._avail_nsx:
  935. tp.extend(self.__nsx_rec_times[self.__nsx_spec[nsx_i]](nsx_i)[0])
  936. return min(tp)
  937. def __get_max_time(self):
  938. """
  939. Returns the largest time that can be determined from the recording for
  940. use as the upper bound m in an interval [n,m).
  941. """
  942. tp = []
  943. if self._avail_files['nev']:
  944. tp.extend(self.__get_nev_rec_times()[1])
  945. for nsx_i in self._avail_nsx:
  946. tp.extend(self.__nsx_rec_times[self.__nsx_spec[nsx_i]](nsx_i)[1])
  947. return max(tp)
  948. def __get_nev_rec_times(self):
  949. """
  950. Extracts minimum and maximum time points from a nev file.
  951. """
  952. filename = '.'.join([self._filenames['nev'], 'nev'])
  953. dt = [('timestamp', 'uint32')]
  954. offset = \
  955. self.__get_file_size(filename) - \
  956. self.__nev_params('bytes_in_data_packets')
  957. last_data_packet = np.memmap(
  958. filename, mode='r', offset=offset, dtype=dt)[0]
  959. n_starts = [0 * self.__nev_params('event_unit')]
  960. n_stops = [
  961. last_data_packet['timestamp'] * self.__nev_params('event_unit')]
  962. return n_starts, n_stops
  963. def __get_nsx_rec_times_variant_a(self, nsx_nb):
  964. """
  965. Extracts minimum and maximum time points from a 2.1 nsx file.
  966. """
  967. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  968. t_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  969. 'time_unit', nsx_nb)
  970. highest_res = self.__nev_params('event_unit')
  971. bytes_in_headers = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  972. 'bytes_in_headers', nsx_nb)
  973. nb_data_points = int(
  974. (self.__get_file_size(filename) - bytes_in_headers)
  975. / (2 * self.__nsx_basic_header[nsx_nb]['channel_count']) - 1)
  976. # add n_start
  977. n_starts = [(0 * t_unit).rescale(highest_res)]
  978. # add n_stop
  979. n_stops = [(nb_data_points * t_unit).rescale(highest_res)]
  980. return n_starts, n_stops
  981. def __get_nsx_rec_times_variant_b(self, nsx_nb):
  982. """
  983. Extracts minimum and maximum time points from a 2.2 or 2.3 nsx file.
  984. """
  985. t_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  986. 'time_unit', nsx_nb)
  987. highest_res = self.__nev_params('event_unit')
  988. n_starts = []
  989. n_stops = []
  990. # add n-start and n_stop for all data blocks
  991. for data_bl in self.__nsx_data_header[nsx_nb].keys():
  992. ts0 = self.__nsx_data_header[nsx_nb][data_bl]['timestamp']
  993. nbdp = self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points']
  994. # add n_start
  995. start = ts0 * t_unit
  996. n_starts.append(start.rescale(highest_res))
  997. # add n_stop
  998. stop = start + nbdp * t_unit
  999. n_stops.append(stop.rescale(highest_res))
  1000. return sorted(n_starts), sorted(n_stops)
  1001. def __get_waveforms_dtype(self):
  1002. """
  1003. Extracts the actual waveform dtype set for each channel.
  1004. """
  1005. # Blackrock code giving the approiate dtype
  1006. conv = {0: 'int8', 1: 'int8', 2: 'int16', 4: 'int32'}
  1007. # get all electrode ids from nev ext header
  1008. all_el_ids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1009. # get the dtype of waveform (this is stupidly complicated)
  1010. if self.__is_set(
  1011. np.array(self.__nev_basic_header['additionnal_flags']), 0):
  1012. dtype_waveforms = dict((k, 'int16') for k in all_el_ids)
  1013. else:
  1014. # extract bytes per waveform
  1015. waveform_bytes = \
  1016. self.__nev_ext_header[b'NEUEVWAV']['bytes_per_waveform']
  1017. # extract dtype for waveforms fro each electrode
  1018. dtype_waveforms = dict(zip(all_el_ids, conv[waveform_bytes]))
  1019. return dtype_waveforms
  1020. def __get_channel_labels_variant_a(self):
  1021. """
  1022. Returns labels for all channels for file spec 2.1
  1023. """
  1024. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1025. labels = []
  1026. for elid in elids:
  1027. if elid < 129:
  1028. labels.append('chan%i' % elid)
  1029. else:
  1030. labels.append('ainp%i' % (elid - 129 + 1))
  1031. return dict(zip(elids, labels))
  1032. def __get_channel_labels_variant_b(self):
  1033. """
  1034. Returns labels for all channels for file spec 2.2 and 2.3
  1035. """
  1036. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1037. labels = self.__nev_ext_header[b'NEUEVLBL']['label']
  1038. return dict(zip(elids, labels)) if len(labels) > 0 else None
  1039. def __get_waveform_size_variant_a(self):
  1040. """
  1041. Returns wavform sizes for all channels for file spec 2.1 and 2.2
  1042. """
  1043. wf_dtypes = self.__get_waveforms_dtype()
  1044. nb_bytes_wf = self.__nev_basic_header['bytes_in_data_packets'] - 8
  1045. wf_sizes = dict([
  1046. (ch, int(nb_bytes_wf / np.dtype(dt).itemsize)) for ch, dt in
  1047. wf_dtypes.items()])
  1048. return wf_sizes
  1049. def __get_waveform_size_variant_b(self):
  1050. """
  1051. Returns wavform sizes for all channels for file spec 2.3
  1052. """
  1053. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1054. spike_widths = self.__nev_ext_header[b'NEUEVWAV']['spike_width']
  1055. return dict(zip(elids, spike_widths))
  1056. def __get_left_sweep_waveforms(self):
  1057. """
  1058. Returns left sweep of waveforms for each channel. Left sweep is defined
  1059. as the time from the beginning of the waveform to the trigger time of
  1060. the corresponding spike.
  1061. """
  1062. # TODO: Double check if this is the actual setting for Blackrock
  1063. wf_t_unit = self.__nev_params('waveform_time_unit')
  1064. all_ch = self.__nev_params('channel_ids')
  1065. # TODO: Double check if this is the correct assumption (10 samples)
  1066. # default value: threshold crossing after 10 samples of waveform
  1067. wf_left_sweep = dict([(ch, 10 * wf_t_unit) for ch in all_ch])
  1068. # non-default: threshold crossing at center of waveform
  1069. # wf_size = self.__nev_params('waveform_size')
  1070. # wf_left_sweep = dict(
  1071. # [(ch, (wf_size[ch] / 2) * wf_t_unit) for ch in all_ch])
  1072. return wf_left_sweep
  1073. def __get_nsx_param_variant_a(self, param_name, nsx_nb):
  1074. """
  1075. Returns parameter (param_name) for a given nsx (nsx_nb) for file spec
  1076. 2.1.
  1077. """
  1078. # Here, min/max_analog_val and min/max_digital_val are not available in
  1079. # the nsx, so that we must estimate these parameters from the
  1080. # digitization factor of the nev (information by Kian Torab, Blackrock
  1081. # Microsystems). Here dig_factor=max_analog_val/max_digital_val. We set
  1082. # max_digital_val to 1000, and max_analog_val=dig_factor. dig_factor is
  1083. # given in nV by definition, so the units turn out to be uV.
  1084. labels = []
  1085. dig_factor = []
  1086. for elid in self.__nsx_ext_header[nsx_nb]['electrode_id']:
  1087. if self._avail_files['nev']:
  1088. # This is a workaround for the DigitalFactor overflow in NEV
  1089. # files recorded with buggy Cerebus system.
  1090. # Fix taken from: NMPK toolbox by Blackrock,
  1091. # file openNEV, line 464,
  1092. # git rev. d0a25eac902704a3a29fa5dfd3aed0744f4733ed
  1093. df = self.__nev_params('digitization_factor')[elid]
  1094. if df == 21516:
  1095. df = 152592.547
  1096. dig_factor.append(df)
  1097. else:
  1098. dig_factor.append(None)
  1099. if elid < 129:
  1100. labels.append('chan%i' % elid)
  1101. else:
  1102. labels.append('ainp%i' % (elid - 129 + 1))
  1103. nsx_parameters = {
  1104. 'labels': labels,
  1105. 'units': np.array(
  1106. [b'uV']
  1107. * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1108. 'min_analog_val': -1 * np.array(dig_factor),
  1109. 'max_analog_val': np.array(dig_factor),
  1110. 'min_digital_val': np.array(
  1111. [-1000] * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1112. 'max_digital_val': np.array(
  1113. [1000] * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1114. 'timestamp_resolution': 30000,
  1115. 'bytes_in_headers':
  1116. self.__nsx_basic_header[nsx_nb].dtype.itemsize
  1117. + self.__nsx_ext_header[nsx_nb].dtype.itemsize
  1118. * self.__nsx_basic_header[nsx_nb]['channel_count'],
  1119. 'sampling_rate':
  1120. 30000 / self.__nsx_basic_header[nsx_nb]['period'] * pq.Hz,
  1121. 'time_unit': pq.CompoundUnit("1.0/{0}*s".format(
  1122. 30000 / self.__nsx_basic_header[nsx_nb]['period']))}
  1123. return nsx_parameters[param_name]
  1124. def __get_nsx_param_variant_b(self, param_name, nsx_nb):
  1125. """
  1126. Returns parameter (param_name) for a given nsx (nsx_nb) for file spec
  1127. 2.2 and 2.3.
  1128. """
  1129. nsx_parameters = {
  1130. 'labels':
  1131. self.__nsx_ext_header[nsx_nb]['electrode_label'],
  1132. 'units':
  1133. self.__nsx_ext_header[nsx_nb]['units'],
  1134. 'min_analog_val':
  1135. self.__nsx_ext_header[nsx_nb]['min_analog_val'],
  1136. 'max_analog_val':
  1137. self.__nsx_ext_header[nsx_nb]['max_analog_val'],
  1138. 'min_digital_val':
  1139. self.__nsx_ext_header[nsx_nb]['min_digital_val'],
  1140. 'max_digital_val':
  1141. self.__nsx_ext_header[nsx_nb]['max_digital_val'],
  1142. 'timestamp_resolution':
  1143. self.__nsx_basic_header[nsx_nb]['timestamp_resolution'],
  1144. 'bytes_in_headers':
  1145. self.__nsx_basic_header[nsx_nb]['bytes_in_headers'],
  1146. 'sampling_rate':
  1147. self.__nsx_basic_header[nsx_nb]['timestamp_resolution']
  1148. / self.__nsx_basic_header[nsx_nb]['period'] * pq.Hz,
  1149. 'time_unit': pq.CompoundUnit("1.0/{0}*s".format(
  1150. self.__nsx_basic_header[nsx_nb]['timestamp_resolution']
  1151. / self.__nsx_basic_header[nsx_nb]['period']))}
  1152. return nsx_parameters[param_name]
  1153. def __get_nsx_databl_param_variant_a(
  1154. self, param_name, nsx_nb, n_start=None, n_stop=None):
  1155. """
  1156. Returns data block parameter (param_name) for a given nsx (nsx_nb) for
  1157. file spec 2.1. Arg 'n_start' should not be specified! It is only set
  1158. for compatibility reasons with higher file spec.
  1159. """
  1160. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  1161. t_starts, t_stops = \
  1162. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1163. bytes_in_headers = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1164. 'bytes_in_headers', nsx_nb)
  1165. # extract parameters from nsx basic extended and data header
  1166. data_parameters = {
  1167. 'nb_data_points': int(
  1168. (self.__get_file_size(filename) - bytes_in_headers)
  1169. / (2 * self.__nsx_basic_header[nsx_nb]['channel_count']) - 1),
  1170. 'databl_idx': 1,
  1171. 'databl_t_start': t_starts[0],
  1172. 'databl_t_stop': t_stops[0]}
  1173. return data_parameters[param_name]
  1174. def __get_nsx_databl_param_variant_b(
  1175. self, param_name, nsx_nb, n_start, n_stop):
  1176. """
  1177. Returns data block parameter (param_name) for a given nsx (nsx_nb) with
  1178. a wanted n_start for file spec 2.2 and 2.3.
  1179. """
  1180. t_starts, t_stops = \
  1181. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1182. # data header
  1183. for d_bl in self.__nsx_data_header[nsx_nb].keys():
  1184. # from "data header" with corresponding t_start and t_stop
  1185. data_parameters = {
  1186. 'nb_data_points':
  1187. self.__nsx_data_header[nsx_nb][d_bl]['nb_data_points'],
  1188. 'databl_idx': d_bl,
  1189. 'databl_t_start': t_starts[d_bl - 1],
  1190. 'databl_t_stop': t_stops[d_bl - 1]}
  1191. if t_starts[d_bl - 1] <= n_start < n_stop <= t_stops[d_bl - 1]:
  1192. return data_parameters[param_name]
  1193. elif n_start < t_starts[d_bl - 1] < n_stop <= t_stops[d_bl - 1]:
  1194. self._print_verbose(
  1195. "User n_start ({0}) is smaller than the corresponding "
  1196. "t_start of the available ns{1} datablock "
  1197. "({2}).".format(n_start, nsx_nb, t_starts[d_bl - 1]))
  1198. return data_parameters[param_name]
  1199. elif t_starts[d_bl - 1] <= n_start < t_stops[d_bl - 1] < n_stop:
  1200. self._print_verbose(
  1201. "User n_stop ({0}) is larger than the corresponding "
  1202. "t_stop of the available ns{1} datablock "
  1203. "({2}).".format(n_stop, nsx_nb, t_stops[d_bl - 1]))
  1204. return data_parameters[param_name]
  1205. elif n_start < t_starts[d_bl - 1] < t_stops[d_bl - 1] < n_stop:
  1206. self._print_verbose(
  1207. "User n_start ({0}) is smaller than the corresponding "
  1208. "t_start and user n_stop ({1}) is larger than the "
  1209. "corresponding t_stop of the available ns{2} datablock "
  1210. "({3}).".format(
  1211. n_start, n_stop, nsx_nb,
  1212. (t_starts[d_bl - 1], t_stops[d_bl - 1])))
  1213. return data_parameters[param_name]
  1214. else:
  1215. continue
  1216. raise ValueError(
  1217. "User n_start and n_stop are all smaller or larger than the "
  1218. "t_start and t_stops of all available ns%i datablocks" % nsx_nb)
  1219. def __get_nonneural_evtypes_variant_a(self, data):
  1220. """
  1221. Defines event types and the necessary parameters to extract them from
  1222. a 2.1 and 2.2 nev file.
  1223. """
  1224. # TODO: add annotations of nev ext header (NSASEXEX) to event types
  1225. # digital events
  1226. event_types = {
  1227. 'digital_input_port': {
  1228. 'name': 'digital_input_port',
  1229. 'field': 'digital_input',
  1230. 'mask': self.__is_set(data['packet_insertion_reason'], 0),
  1231. 'desc': "Events of the digital input port"},
  1232. 'serial_input_port': {
  1233. 'name': 'serial_input_port',
  1234. 'field': 'digital_input',
  1235. 'mask':
  1236. self.__is_set(data['packet_insertion_reason'], 0)
  1237. & self.__is_set(data['packet_insertion_reason'], 7),
  1238. 'desc': "Events of the serial input port"}}
  1239. # analog input events via threshold crossings
  1240. for ch in range(5):
  1241. event_types.update({
  1242. 'analog_input_channel_{0}'.format(ch + 1): {
  1243. 'name': 'analog_input_channel_{0}'.format(ch + 1),
  1244. 'field': 'analog_input_channel_{0}'.format(ch + 1),
  1245. 'mask': self.__is_set(
  1246. data['packet_insertion_reason'], ch + 1),
  1247. 'desc': "Values of analog input channel {0} in mV "
  1248. "(+/- 5000)".format(ch + 1)}})
  1249. # TODO: define field and desc
  1250. event_types.update({
  1251. 'periodic_sampling_events': {
  1252. 'name': 'periodic_sampling_events',
  1253. 'field': 'digital_input',
  1254. 'mask': self.__is_set(data['packet_insertion_reason'], 6),
  1255. 'desc': 'Periodic sampling event of a certain frequency'}})
  1256. return event_types
  1257. def __get_nonneural_evtypes_variant_b(self, data):
  1258. """
  1259. Defines event types and the necessary parameters to extract them from
  1260. a 2.3 nev file.
  1261. """
  1262. # digital events
  1263. event_types = {
  1264. 'digital_input_port': {
  1265. 'name': 'digital_input_port',
  1266. 'field': 'digital_input',
  1267. 'mask': self.__is_set(data['packet_insertion_reason'], 0),
  1268. 'desc': "Events of the digital input port"},
  1269. 'serial_input_port': {
  1270. 'name': 'serial_input_port',
  1271. 'field': 'digital_input',
  1272. 'mask':
  1273. self.__is_set(data['packet_insertion_reason'], 0)
  1274. & self.__is_set(data['packet_insertion_reason'], 7),
  1275. 'desc': "Events of the serial input port"}}
  1276. return event_types
  1277. def __get_unit_classification(self, un_id):
  1278. """
  1279. Returns the Blackrock unit classification of an online spike sorting
  1280. for the given unit id (un_id).
  1281. """
  1282. # Blackrock unit classification
  1283. if un_id == 0:
  1284. return 'unclassified'
  1285. elif 1 <= un_id <= 16:
  1286. return '{0}'.format(un_id)
  1287. elif 17 <= un_id <= 244:
  1288. raise ValueError(
  1289. "Unit id {0} is not used by daq system".format(un_id))
  1290. elif un_id == 255:
  1291. return 'noise'
  1292. else:
  1293. raise ValueError("Unit id {0} cannot be classified".format(un_id))
  1294. def __is_set(self, flag, pos):
  1295. """
  1296. Checks if bit is set at the given position for flag. If flag is an
  1297. array, an array will be returned.
  1298. """
  1299. return flag & (1 << pos) > 0
  1300. def __transform_nsx_to_load(self, nsx_to_load):
  1301. """
  1302. Transforms the input argument nsx_to_load to a list of integers.
  1303. """
  1304. if hasattr(nsx_to_load, "__len__") and len(nsx_to_load) == 0:
  1305. nsx_to_load = None
  1306. if isinstance(nsx_to_load, int):
  1307. nsx_to_load = [nsx_to_load]
  1308. if isinstance(nsx_to_load, str):
  1309. if nsx_to_load.lower() == 'none':
  1310. nsx_to_load = None
  1311. elif nsx_to_load.lower() == 'all':
  1312. nsx_to_load = self._avail_nsx
  1313. else:
  1314. raise ValueError("Invalid specification of nsx_to_load.")
  1315. if nsx_to_load:
  1316. for nsx_nb in nsx_to_load:
  1317. if not self._avail_files['ns' + str(nsx_nb)]:
  1318. raise ValueError("ns%i is not available" % nsx_nb)
  1319. return nsx_to_load
  1320. def __transform_channels(self, channels, nsx_to_load):
  1321. """
  1322. Transforms the input argument channels to a list of integers.
  1323. """
  1324. all_channels = []
  1325. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  1326. if nsx_to_load is not None:
  1327. for nsx_nb in nsx_to_load:
  1328. all_channels.extend(
  1329. self.__nsx_ext_header[nsx_nb]['electrode_id'].astype(int))
  1330. elec_id = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1331. all_channels.extend(elec_id.astype(int))
  1332. all_channels = np.unique(all_channels).tolist()
  1333. if hasattr(channels, "__len__") and len(channels) == 0:
  1334. channels = None
  1335. if isinstance(channels, int):
  1336. channels = [channels]
  1337. if isinstance(channels, str):
  1338. if channels.lower() == 'none':
  1339. channels = None
  1340. elif channels.lower() == 'all':
  1341. channels = all_channels
  1342. else:
  1343. raise ValueError("Invalid channel specification.")
  1344. if channels:
  1345. if len(set(all_channels) & set(channels)) < len(channels):
  1346. self._print_verbose(
  1347. "Ignoring unknown channel ID(s) specified in in channels.")
  1348. # Make sure, all channels are valid and contain no duplicates
  1349. channels = list(set(all_channels).intersection(set(channels)))
  1350. else:
  1351. self._print_verbose("No channel is specified, therefore no "
  1352. "time series and unit data is loaded.")
  1353. return channels
  1354. def __transform_units(self, units, channels):
  1355. """
  1356. Transforms the input argument nsx_to_load to a dictionary, where keys
  1357. (channels) are int, and values (units) are lists of integers.
  1358. """
  1359. if isinstance(units, dict):
  1360. for ch, u in units.items():
  1361. if ch not in channels:
  1362. self._print_verbose(
  1363. "Units contain a channel id which is not listed in "
  1364. "channels")
  1365. if isinstance(u, int):
  1366. units[ch] = [u]
  1367. if hasattr(u, '__len__') and len(u) == 0:
  1368. units[ch] = None
  1369. if isinstance(u, str):
  1370. if u.lower() == 'none':
  1371. units[ch] = None
  1372. elif u.lower() == 'all':
  1373. units[ch] = list(range(17))
  1374. units[ch].append(255)
  1375. else:
  1376. raise ValueError("Invalid unit specification.")
  1377. else:
  1378. if hasattr(units, "__len__") and len(units) == 0:
  1379. units = None
  1380. if isinstance(units, str):
  1381. if units.lower() == 'none':
  1382. units = None
  1383. elif units.lower() == 'all':
  1384. units = list(range(17))
  1385. units.append(255)
  1386. else:
  1387. raise ValueError("Invalid unit specification.")
  1388. if isinstance(units, int):
  1389. units = [units]
  1390. if (channels is None) and (units is not None):
  1391. raise ValueError(
  1392. 'At least one channel needs to be loaded to load units')
  1393. if units:
  1394. units = dict(zip(channels, [units] * len(channels)))
  1395. if units is None:
  1396. self._print_verbose("No units are specified, therefore no "
  1397. "unit or spiketrain is loaded.")
  1398. return units
  1399. def __transform_times(self, n, default_n):
  1400. """
  1401. Transforms the input argument n_start or n_stop (n) to a list of
  1402. quantities. In case n is None, it is set to a default value provided by
  1403. the given function (default_n).
  1404. """
  1405. highest_res = self.__nev_params('event_unit')
  1406. if isinstance(n, pq.Quantity):
  1407. n = [n.rescale(highest_res)]
  1408. elif hasattr(n, "__len__"):
  1409. n = [tp.rescale(highest_res) if tp is not None
  1410. else default_n for tp in n]
  1411. elif n is None:
  1412. n = [default_n]
  1413. else:
  1414. raise ValueError('Invalid specification of n_start/n_stop.')
  1415. return n
  1416. def __merge_time_ranges(
  1417. self, user_n_starts, user_n_stops, nsx_to_load):
  1418. """
  1419. Merges after a validation the user specified n_starts and n_stops with
  1420. the intrinsically given n_starts and n_stops (from e.g, recording
  1421. pauses) of the file set.
  1422. Final n_starts and n_stops are chosen, so that the time range of each
  1423. resulting segment is set to the best meaningful maximum. This means
  1424. that the duration of the signals stored in the segments might be
  1425. smaller than the actually set duration of the segment.
  1426. """
  1427. # define the higest time resolution
  1428. # (for accurate manipulations of the time settings)
  1429. max_time = self.__get_max_time()
  1430. min_time = self.__get_min_time()
  1431. highest_res = self.__nev_params('event_unit')
  1432. user_n_starts = self.__transform_times(
  1433. user_n_starts, min_time)
  1434. user_n_stops = self.__transform_times(
  1435. user_n_stops, max_time)
  1436. # check if user provided as many n_starts as n_stops
  1437. if len(user_n_starts) != len(user_n_stops):
  1438. raise ValueError("n_starts and n_stops must be of equal length")
  1439. # if necessary reset max n_stop to max time of file set
  1440. start_stop_id = 0
  1441. while start_stop_id < len(user_n_starts):
  1442. if user_n_starts[start_stop_id] < min_time:
  1443. user_n_starts[start_stop_id] = min_time
  1444. self._print_verbose(
  1445. "Entry of n_start '{}' is smaller than min time of the file "
  1446. "set: n_start set to min time of file set"
  1447. "".format(user_n_starts[start_stop_id]))
  1448. if user_n_stops[start_stop_id] > max_time:
  1449. user_n_stops[start_stop_id] = max_time
  1450. self._print_verbose(
  1451. "Entry of n_stop '{}' is larger than max time of the file "
  1452. "set: n_stop set to max time of file set"
  1453. "".format(user_n_stops[start_stop_id]))
  1454. if (user_n_stops[start_stop_id] < min_time
  1455. or user_n_starts[start_stop_id] > max_time):
  1456. user_n_stops.pop(start_stop_id)
  1457. user_n_starts.pop(start_stop_id)
  1458. self._print_verbose(
  1459. "Entry of n_start is larger than max time or entry of "
  1460. "n_stop is smaller than min time of the "
  1461. "file set: n_start and n_stop are ignored")
  1462. continue
  1463. start_stop_id += 1
  1464. # get intrinsic time settings of nsx files (incl. rec pauses)
  1465. n_starts_files = []
  1466. n_stops_files = []
  1467. if nsx_to_load is not None:
  1468. for nsx_nb in nsx_to_load:
  1469. start_stop = \
  1470. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1471. n_starts_files.append(start_stop[0])
  1472. n_stops_files.append(start_stop[1])
  1473. # reducing n_starts from wanted nsx files to minima
  1474. # (keep recording pause if it occurs)
  1475. if len(n_starts_files) > 0:
  1476. if np.shape(n_starts_files)[1] > 1:
  1477. n_starts_files = [
  1478. tp * highest_res for tp in np.min(n_starts_files, axis=1)]
  1479. else:
  1480. n_starts_files = [
  1481. tp * highest_res for tp in np.min(n_starts_files, axis=0)]
  1482. # reducing n_starts from wanted nsx files to maxima
  1483. # (keep recording pause if it occurs)
  1484. if len(n_stops_files) > 0:
  1485. if np.shape(n_stops_files)[1] > 1:
  1486. n_stops_files = [
  1487. tp * highest_res for tp in np.max(n_stops_files, axis=1)]
  1488. else:
  1489. n_stops_files = [
  1490. tp * highest_res for tp in np.max(n_stops_files, axis=0)]
  1491. # merge user time settings with intrinsic nsx time settings
  1492. n_starts = []
  1493. n_stops = []
  1494. for start, stop in zip(user_n_starts, user_n_stops):
  1495. # check if start and stop of user create a positive time interval
  1496. if not start < stop:
  1497. raise ValueError(
  1498. "t(i) in n_starts has to be smaller than t(i) in n_stops")
  1499. # Reduce n_starts_files to given intervals of user & add start
  1500. if len(n_starts_files) > 0:
  1501. mask = (n_starts_files > start) & (n_starts_files < stop)
  1502. red_n_starts_files = np.array(n_starts_files)[mask]
  1503. merged_n_starts = [start] + [
  1504. tp * highest_res for tp in red_n_starts_files]
  1505. else:
  1506. merged_n_starts = [start]
  1507. # Reduce n_stops_files to given intervals of user & add stop
  1508. if len(n_stops_files) > 0:
  1509. mask = (n_stops_files > start) & (n_stops_files < stop)
  1510. red_n_stops_files = np.array(n_stops_files)[mask]
  1511. merged_n_stops = [
  1512. tp * highest_res for tp in red_n_stops_files] + [stop]
  1513. else:
  1514. merged_n_stops = [stop]
  1515. # Define combined user and file n_starts and n_stops
  1516. # case one:
  1517. if len(merged_n_starts) == len(merged_n_stops):
  1518. if len(merged_n_starts) + len(merged_n_stops) == 2:
  1519. n_starts.extend(merged_n_starts)
  1520. n_stops.extend(merged_n_stops)
  1521. if len(merged_n_starts) + len(merged_n_stops) > 2:
  1522. merged_n_starts.remove(merged_n_starts[1])
  1523. n_starts.extend([merged_n_starts])
  1524. merged_n_stops.remove(merged_n_stops[-2])
  1525. n_stops.extend(merged_n_stops)
  1526. # case two:
  1527. elif len(merged_n_starts) < len(merged_n_stops):
  1528. n_starts.extend(merged_n_starts)
  1529. merged_n_stops.remove(merged_n_stops[-2])
  1530. n_stops.extend(merged_n_stops)
  1531. # case three:
  1532. elif len(merged_n_starts) > len(merged_n_stops):
  1533. merged_n_starts.remove(merged_n_starts[1])
  1534. n_starts.extend(merged_n_starts)
  1535. n_stops.extend(merged_n_stops)
  1536. if len(n_starts) > len(user_n_starts) and \
  1537. len(n_stops) > len(user_n_stops):
  1538. self._print_verbose(
  1539. "Additional recording pauses were detected. There will be "
  1540. "more segments than the user expects.")
  1541. return n_starts, n_stops
  1542. def __read_event(self, n_start, n_stop, data, ev_dict, lazy=False):
  1543. """
  1544. Creates an event for non-neural experimental events in nev data.
  1545. """
  1546. event_unit = self.__nev_params('event_unit')
  1547. if lazy:
  1548. times = []
  1549. labels = np.array([], dtype='S')
  1550. else:
  1551. times = data['timestamp'][ev_dict['mask']] * event_unit
  1552. labels = data[ev_dict['field']][ev_dict['mask']].astype(str)
  1553. # mask for given time interval
  1554. mask = (times >= n_start) & (times < n_stop)
  1555. if np.sum(mask) > 0:
  1556. ev = Event(
  1557. times=times[mask].astype(float),
  1558. labels=labels[mask],
  1559. name=ev_dict['name'],
  1560. description=ev_dict['desc'])
  1561. if lazy:
  1562. ev.lazy_shape = np.sum(mask)
  1563. else:
  1564. ev = None
  1565. return ev
  1566. def __read_spiketrain(
  1567. self, n_start, n_stop, spikes, channel_id, unit_id,
  1568. load_waveforms=False, scaling='raw', lazy=False):
  1569. """
  1570. Creates spiketrains for Spikes in nev data.
  1571. """
  1572. event_unit = self.__nev_params('event_unit')
  1573. # define a name for spiketrain
  1574. # (unique identifier: 1000 * elid + unit_nb)
  1575. name = "Unit {0}".format(1000 * channel_id + unit_id)
  1576. # define description for spiketrain
  1577. desc = 'SpikeTrain from channel: {0}, unit: {1}'.format(
  1578. channel_id, self.__get_unit_classification(unit_id))
  1579. # get spike times for given time interval
  1580. if not lazy:
  1581. times = spikes['timestamp'] * event_unit
  1582. mask = (times >= n_start) & (times <= n_stop)
  1583. times = times[mask].astype(float)
  1584. else:
  1585. times = np.array([]) * event_unit
  1586. st = SpikeTrain(
  1587. times=times,
  1588. name=name,
  1589. description=desc,
  1590. file_origin='.'.join([self._filenames['nev'], 'nev']),
  1591. t_start=n_start,
  1592. t_stop=n_stop)
  1593. if lazy:
  1594. st.lazy_shape = np.shape(times)
  1595. # load waveforms if requested
  1596. if load_waveforms and not lazy:
  1597. wf_dtype = self.__nev_params('waveform_dtypes')[channel_id]
  1598. wf_size = self.__nev_params('waveform_size')[channel_id]
  1599. waveforms = spikes['waveform'].flatten().view(wf_dtype)
  1600. waveforms = waveforms.reshape(int(spikes.size), 1, int(wf_size))
  1601. if scaling == 'voltage':
  1602. st.waveforms = (
  1603. waveforms[mask] * self.__nev_params('waveform_unit')
  1604. * self.__nev_params('digitization_factor')[channel_id]
  1605. / 1000.)
  1606. elif scaling == 'raw':
  1607. st.waveforms = waveforms[mask] * pq.dimensionless
  1608. else:
  1609. raise ValueError(
  1610. 'Unkown option {1} for parameter scaling.'.format(scaling))
  1611. st.sampling_rate = self.__nev_params('waveform_sampling_rate')
  1612. st.left_sweep = self.__get_left_sweep_waveforms()[channel_id]
  1613. # add additional annotations
  1614. st.annotate(
  1615. unit_id=int(unit_id),
  1616. channel_id=int(channel_id))
  1617. return st
  1618. def __read_analogsignal(
  1619. self, n_start, n_stop, signal, channel_id, nsx_nb,
  1620. scaling='raw', lazy=False):
  1621. """
  1622. Creates analogsignal for signal of channel in nsx data.
  1623. """
  1624. # TODO: The following part is extremely slow, since the memmaps for the
  1625. # headers are created again and again. In particular, this makes lazy
  1626. # loading slow as well. Solution would be to create header memmaps up
  1627. # front.
  1628. # get parameters
  1629. sampling_rate = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1630. 'sampling_rate', nsx_nb)
  1631. nsx_time_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1632. 'time_unit', nsx_nb)
  1633. max_ana = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1634. 'max_analog_val', nsx_nb)
  1635. min_ana = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1636. 'min_analog_val', nsx_nb)
  1637. max_dig = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1638. 'max_digital_val', nsx_nb)
  1639. min_dig = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1640. 'min_digital_val', nsx_nb)
  1641. units = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1642. 'units', nsx_nb)
  1643. labels = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1644. 'labels', nsx_nb)
  1645. dbl_idx = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1646. 'databl_idx', nsx_nb, n_start, n_stop)
  1647. t_start = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1648. 'databl_t_start', nsx_nb, n_start, n_stop)
  1649. t_stop = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1650. 'databl_t_stop', nsx_nb, n_start, n_stop)
  1651. elids_nsx = list(self.__nsx_ext_header[nsx_nb]['electrode_id'])
  1652. if channel_id in elids_nsx:
  1653. idx_ch = elids_nsx.index(channel_id)
  1654. else:
  1655. return None
  1656. description = \
  1657. "AnalogSignal from channel: {0}, label: {1}, nsx: {2}".format(
  1658. channel_id, labels[idx_ch], nsx_nb)
  1659. # TODO: Find a more time/memory efficient way to handle lazy loading
  1660. data_times = np.arange(
  1661. t_start.item(), t_stop.item(),
  1662. self.__nsx_basic_header[nsx_nb]['period']) * t_start.units
  1663. mask = (data_times >= n_start) & (data_times < n_stop)
  1664. if lazy:
  1665. lazy_shape = (np.sum(mask),)
  1666. sig_ch = np.array([], dtype='float32')
  1667. sig_unit = pq.dimensionless
  1668. t_start = n_start.rescale('s')
  1669. else:
  1670. data_times = data_times[mask].astype(float)
  1671. if scaling == 'voltage':
  1672. if not self._avail_files['nev']:
  1673. raise ValueError(
  1674. 'Cannot convert signals in filespec 2.1 nsX '
  1675. 'files to voltage without nev file.')
  1676. sig_ch = signal[dbl_idx][:, idx_ch][mask].astype('float32')
  1677. # transform dig value to physical value
  1678. sym_ana = (max_ana[idx_ch] == -min_ana[idx_ch])
  1679. sym_dig = (max_dig[idx_ch] == -min_dig[idx_ch])
  1680. if sym_ana and sym_dig:
  1681. sig_ch *= float(max_ana[idx_ch]) / float(max_dig[idx_ch])
  1682. else:
  1683. # general case (same result as above for symmetric input)
  1684. sig_ch -= min_dig[idx_ch]
  1685. sig_ch *= float(max_ana[idx_ch] - min_ana[idx_ch]) / \
  1686. float(max_dig[idx_ch] - min_dig[idx_ch])
  1687. sig_ch += float(min_ana[idx_ch])
  1688. sig_unit = units[idx_ch].decode()
  1689. elif scaling == 'raw':
  1690. sig_ch = signal[dbl_idx][:, idx_ch][mask].astype(int)
  1691. sig_unit = pq.dimensionless
  1692. else:
  1693. raise ValueError(
  1694. 'Unkown option {1} for parameter '
  1695. 'scaling.'.format(scaling))
  1696. t_start = data_times[0].rescale(nsx_time_unit)
  1697. anasig = AnalogSignal(
  1698. signal=pq.Quantity(sig_ch, sig_unit, copy=False),
  1699. sampling_rate=sampling_rate,
  1700. t_start=t_start,
  1701. name=labels[idx_ch],
  1702. description=description,
  1703. file_origin='.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb]))
  1704. if lazy:
  1705. anasig.lazy_shape = lazy_shape
  1706. anasig.annotate(
  1707. nsx=nsx_nb,
  1708. channel_id=int(channel_id))
  1709. return anasig
  1710. def __read_unit(self, unit_id, channel_id):
  1711. """
  1712. Creates unit with unit id for given channel id.
  1713. """
  1714. # define a name for spiketrain
  1715. # (unique identifier: 1000 * elid + unit_nb)
  1716. name = "Unit {0}".format(1000 * channel_id + unit_id)
  1717. # define description for spiketrain
  1718. desc = 'Unit from channel: {0}, id: {1}'.format(
  1719. channel_id, self.__get_unit_classification(unit_id))
  1720. un = Unit(
  1721. name=name,
  1722. description=desc,
  1723. file_origin='.'.join([self._filenames['nev'], 'nev']))
  1724. # add additional annotations
  1725. un.annotate(
  1726. unit_id=int(unit_id),
  1727. channel_id=int(channel_id))
  1728. return un
  1729. def __read_channelindex(
  1730. self, channel_id, index=None, channel_units=None, cascade=True):
  1731. """
  1732. Returns a ChannelIndex with the given index for the given channels
  1733. containing a neo.core.unit.Unit object list of the given units.
  1734. """
  1735. flt_type = {0: 'None', 1: 'Butterworth'}
  1736. chidx = ChannelIndex(
  1737. np.array([channel_id]),
  1738. file_origin=self.filename)
  1739. if index is not None:
  1740. chidx.index = index
  1741. chidx.name = "ChannelIndex {0}".format(chidx.index)
  1742. else:
  1743. chidx.name = "ChannelIndex"
  1744. if self._avail_files['nev']:
  1745. channel_labels = self.__nev_params('channel_labels')
  1746. if channel_labels is not None:
  1747. chidx.channel_names = np.array([channel_labels[channel_id]])
  1748. chidx.channel_ids = np.array([channel_id])
  1749. # additional annotations from nev
  1750. if channel_id in self.__nev_ext_header[b'NEUEVWAV']['electrode_id']:
  1751. get_idx = list(
  1752. self.__nev_ext_header[b'NEUEVWAV']['electrode_id']).index(
  1753. channel_id)
  1754. chidx.annotate(
  1755. connector_ID=self.__nev_ext_header[
  1756. b'NEUEVWAV']['physical_connector'][get_idx],
  1757. connector_pinID=self.__nev_ext_header[
  1758. b'NEUEVWAV']['connector_pin'][get_idx],
  1759. nev_dig_factor=self.__nev_ext_header[
  1760. b'NEUEVWAV']['digitization_factor'][get_idx],
  1761. nev_energy_threshold=self.__nev_ext_header[
  1762. b'NEUEVWAV']['energy_threshold'][get_idx] * pq.uV,
  1763. nev_hi_threshold=self.__nev_ext_header[
  1764. b'NEUEVWAV']['hi_threshold'][get_idx] * pq.uV,
  1765. nev_lo_threshold=self.__nev_ext_header[
  1766. b'NEUEVWAV']['lo_threshold'][get_idx] * pq.uV,
  1767. nb_sorted_units=self.__nev_ext_header[
  1768. b'NEUEVWAV']['nb_sorted_units'][get_idx],
  1769. waveform_size=self.__waveform_size[self.__nev_spec](
  1770. )[channel_id] * self.__nev_params('waveform_time_unit'))
  1771. # additional annotations from nev (only for file_spec > 2.1)
  1772. if self.__nev_spec in ['2.2', '2.3']:
  1773. get_idx = list(
  1774. self.__nev_ext_header[
  1775. b'NEUEVFLT']['electrode_id']).index(
  1776. channel_id)
  1777. # filter type codes (extracted from blackrock manual)
  1778. chidx.annotate(
  1779. nev_hi_freq_corner=self.__nev_ext_header[b'NEUEVFLT'][
  1780. 'hi_freq_corner'][get_idx]
  1781. / 1000. * pq.Hz,
  1782. nev_hi_freq_order=self.__nev_ext_header[b'NEUEVFLT'][
  1783. 'hi_freq_order'][get_idx],
  1784. nev_hi_freq_type=flt_type[self.__nev_ext_header[
  1785. b'NEUEVFLT']['hi_freq_type'][get_idx]],
  1786. nev_lo_freq_corner=self.__nev_ext_header[
  1787. b'NEUEVFLT']['lo_freq_corner'][get_idx]
  1788. / 1000. * pq.Hz,
  1789. nev_lo_freq_order=self.__nev_ext_header[
  1790. b'NEUEVFLT']['lo_freq_order'][get_idx],
  1791. nev_lo_freq_type=flt_type[self.__nev_ext_header[
  1792. b'NEUEVFLT']['lo_freq_type'][get_idx]])
  1793. # additional information about the LFP signal
  1794. if self.__nev_spec in ['2.2', '2.3'] and self.__nsx_ext_header:
  1795. # It does not matter which nsX file to ask for this info
  1796. k = list(self.__nsx_ext_header.keys())[0]
  1797. if channel_id in self.__nsx_ext_header[k]['electrode_id']:
  1798. get_idx = list(
  1799. self.__nsx_ext_header[k]['electrode_id']).index(
  1800. channel_id)
  1801. chidx.annotate(
  1802. nsx_hi_freq_corner=self.__nsx_ext_header[k][
  1803. 'hi_freq_corner'][get_idx] / 1000. * pq.Hz,
  1804. nsx_lo_freq_corner=self.__nsx_ext_header[k][
  1805. 'lo_freq_corner'][get_idx] / 1000. * pq.Hz,
  1806. nsx_hi_freq_order=self.__nsx_ext_header[k][
  1807. 'hi_freq_order'][get_idx],
  1808. nsx_lo_freq_order=self.__nsx_ext_header[k][
  1809. 'lo_freq_order'][get_idx],
  1810. nsx_hi_freq_type=flt_type[
  1811. self.__nsx_ext_header[k]['hi_freq_type'][get_idx]],
  1812. nsx_lo_freq_type=flt_type[
  1813. self.__nsx_ext_header[k]['hi_freq_type'][get_idx]])
  1814. chidx.description = \
  1815. "Container for units and groups analogsignals of one recording " \
  1816. "channel across segments."
  1817. if not cascade:
  1818. return chidx
  1819. if self._avail_files['nev']:
  1820. # read nev data
  1821. nev_data = self.__nev_data_reader[self.__nev_spec]()
  1822. if channel_units is not None:
  1823. # extract first data for channel
  1824. ch_mask = (nev_data['Spikes']['packet_id'] == channel_id)
  1825. data_ch = nev_data['Spikes'][ch_mask]
  1826. for un_id in channel_units:
  1827. if un_id in np.unique(data_ch['unit_class_nb']):
  1828. un = self.__read_unit(
  1829. unit_id=un_id, channel_id=channel_id)
  1830. chidx.units.append(un)
  1831. chidx.create_many_to_one_relationship()
  1832. return chidx
  1833. def read_segment(
  1834. self, n_start, n_stop, name=None, description=None, index=None,
  1835. nsx_to_load='none', channels='none', units='none',
  1836. load_waveforms=False, load_events=False, scaling='raw',
  1837. lazy=False, cascade=True):
  1838. """
  1839. Returns an annotated neo.core.segment.Segment.
  1840. Args:
  1841. n_start (Quantity):
  1842. Start time of maximum time range of signals contained in this
  1843. segment.
  1844. n_stop (Quantity):
  1845. Stop time of maximum time range of signals contained in this
  1846. segment.
  1847. name (None, string):
  1848. If None, name is set to default, otherwise it is set to user
  1849. input.
  1850. description (None, string):
  1851. If None, description is set to default, otherwise it is set to
  1852. user input.
  1853. index (None, int):
  1854. If not None, index of segment is set to user index.
  1855. nsx_to_load (int, list, str):
  1856. ID(s) of nsx file(s) from which to load data, e.g., if set to
  1857. 5 only data from the ns5 file are loaded. If 'none' or empty
  1858. list, no nsx files and therefore no analog signals are loaded.
  1859. If 'all', data from all available nsx are loaded.
  1860. channels (int, list, str):
  1861. Channel id(s) from which to load data. If 'none' or empty list,
  1862. no channels and therefore no analog signal or spiketrains are
  1863. loaded. If 'all', all available channels are loaded.
  1864. units (int, list, str, dict):
  1865. ID(s) of unit(s) to load. If 'none' or empty list, no units and
  1866. therefore no spiketrains are loaded. If 'all', all available
  1867. units are loaded. If dict, the above can be specified
  1868. individually for each channel (keys), e.g. {1: 5, 2: 'all'}
  1869. loads unit 5 from channel 1 and all units from channel 2.
  1870. load_waveforms (boolean):
  1871. If True, waveforms are attached to all loaded spiketrains.
  1872. load_events (boolean):
  1873. If True, all recorded events are loaded.
  1874. scaling (str):
  1875. Determines whether time series of individual
  1876. electrodes/channels are returned as AnalogSignals containing
  1877. raw integer samples ('raw'), or scaled to arrays of floats
  1878. representing voltage ('voltage'). Note that for file
  1879. specification 2.1 and lower, the option 'voltage' requires a
  1880. nev file to be present.
  1881. lazy (boolean):
  1882. If True, only the shape of the data is loaded.
  1883. cascade (boolean):
  1884. If True, only the segment without children is returned.
  1885. Returns:
  1886. Segment (neo.Segment):
  1887. Returns the specified segment. See documentation of
  1888. `read_block()` for a full list of annotations of all child
  1889. objects.
  1890. """
  1891. # Make sure that input args are transformed into correct instances
  1892. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  1893. channels = self.__transform_channels(channels, nsx_to_load)
  1894. units = self.__transform_units(units, channels)
  1895. seg = Segment(file_origin=self.filename)
  1896. # set user defined annotations if they were provided
  1897. if index is None:
  1898. seg.index = 0
  1899. else:
  1900. seg.index = index
  1901. if name is None:
  1902. seg.name = "Segment {0}".format(seg.index)
  1903. else:
  1904. seg.name = name
  1905. if description is None:
  1906. seg.description = "Segment containing data from t_min to t_max."
  1907. else:
  1908. seg.description = description
  1909. if not cascade:
  1910. return seg
  1911. if self._avail_files['nev']:
  1912. # filename = self._filenames['nev'] + '.nev'
  1913. # annotate segment according to file headers
  1914. seg.rec_datetime = datetime.datetime(
  1915. year=self.__nev_basic_header['year'],
  1916. month=self.__nev_basic_header['month'],
  1917. day=self.__nev_basic_header['day'],
  1918. hour=self.__nev_basic_header['hour'],
  1919. minute=self.__nev_basic_header['minute'],
  1920. second=self.__nev_basic_header['second'],
  1921. microsecond=self.__nev_basic_header['millisecond'])
  1922. # read nev data
  1923. nev_data = self.__nev_data_reader[self.__nev_spec]()
  1924. # read non-neural experimental events
  1925. if load_events:
  1926. ev_dict = self.__nonneural_evtypes[self.__nev_spec](
  1927. nev_data['NonNeural'])
  1928. for ev_type in ev_dict.keys():
  1929. ev = self.__read_event(
  1930. n_start=n_start,
  1931. n_stop=n_stop,
  1932. data=nev_data['NonNeural'],
  1933. ev_dict=ev_dict[ev_type],
  1934. lazy=lazy)
  1935. if ev is not None:
  1936. seg.events.append(ev)
  1937. # TODO: not yet implemented (only avail in nev_spec 2.3)
  1938. # videosync events
  1939. # trackingevents events
  1940. # buttontrigger events
  1941. # configevent events
  1942. # get spiketrain
  1943. if units is not None:
  1944. not_existing_units = []
  1945. for ch_id in units.keys():
  1946. # extract first data for channel
  1947. ch_mask = (nev_data['Spikes']['packet_id'] == ch_id)
  1948. data_ch = nev_data['Spikes'][ch_mask]
  1949. if units[ch_id] is not None:
  1950. for un_id in units[ch_id]:
  1951. if un_id in np.unique(data_ch['unit_class_nb']):
  1952. # extract then data for unit if unit exists
  1953. un_mask = (data_ch['unit_class_nb'] == un_id)
  1954. data_un = data_ch[un_mask]
  1955. st = self.__read_spiketrain(
  1956. n_start=n_start,
  1957. n_stop=n_stop,
  1958. spikes=data_un,
  1959. channel_id=ch_id,
  1960. unit_id=un_id,
  1961. load_waveforms=load_waveforms,
  1962. scaling=scaling,
  1963. lazy=lazy)
  1964. seg.spiketrains.append(st)
  1965. else:
  1966. not_existing_units.append(un_id)
  1967. if not_existing_units:
  1968. self._print_verbose(
  1969. "Units {0} on channel {1} do not "
  1970. "exist".format(not_existing_units, ch_id))
  1971. else:
  1972. self._print_verbose(
  1973. "There are no units specified for channel "
  1974. "{0}".format(ch_id))
  1975. if nsx_to_load is not None:
  1976. for nsx_nb in nsx_to_load:
  1977. # read nsx data
  1978. nsx_data = \
  1979. self.__nsx_data_reader[self.__nsx_spec[nsx_nb]](nsx_nb)
  1980. # read Analogsignals
  1981. for ch_id in channels:
  1982. anasig = self.__read_analogsignal(
  1983. n_start=n_start,
  1984. n_stop=n_stop,
  1985. signal=nsx_data,
  1986. channel_id=ch_id,
  1987. nsx_nb=nsx_nb,
  1988. scaling=scaling,
  1989. lazy=lazy)
  1990. if anasig is not None:
  1991. seg.analogsignals.append(anasig)
  1992. # TODO: not yet implemented
  1993. # if self._avail_files['sif']:
  1994. # sif_header = self._read_sif(self._filenames['sif'] + '.sif')
  1995. # TODO: not yet implemented
  1996. # if self._avail_files['ccf']:
  1997. # ccf_header = self._read_sif(self._filenames['ccf'] + '.ccf')
  1998. seg.create_many_to_one_relationship()
  1999. return seg
  2000. def read_block(
  2001. self, index=None, name=None, description=None, nsx_to_load='none',
  2002. n_starts=None, n_stops=None, channels='none', units='none',
  2003. load_waveforms=False, load_events=False, scaling='raw',
  2004. lazy=False, cascade=True):
  2005. """
  2006. Args:
  2007. index (None, int):
  2008. If not None, index of block is set to user input.
  2009. name (None, str):
  2010. If None, name is set to default, otherwise it is set to user
  2011. input.
  2012. description (None, str):
  2013. If None, description is set to default, otherwise it is set to
  2014. user input.
  2015. nsx_to_load (int, list, str):
  2016. ID(s) of nsx file(s) from which to load data, e.g., if set to
  2017. 5 only data from the ns5 file are loaded. If 'none' or empty
  2018. list, no nsx files and therefore no analog signals are loaded.
  2019. If 'all', data from all available nsx are loaded.
  2020. n_starts (None, Quantity, list):
  2021. Start times for data in each segment. Number of entries must be
  2022. equal to length of n_stops. If None, intrinsic recording start
  2023. times of files set are used.
  2024. n_stops (None, Quantity, list):
  2025. Stop times for data in each segment. Number of entries must be
  2026. equal to length of n_starts. If None, intrinsic recording stop
  2027. times of files set are used.
  2028. channels (int, list, str):
  2029. Channel id(s) from which to load data. If 'none' or empty list,
  2030. no channels and therefore no analog signal or spiketrains are
  2031. loaded. If 'all', all available channels are loaded.
  2032. units (int, list, str, dict):
  2033. ID(s) of unit(s) to load. If 'none' or empty list, no units and
  2034. therefore no spiketrains are loaded. If 'all', all available
  2035. units are loaded. If dict, the above can be specified
  2036. individually for each channel (keys), e.g. {1: 5, 2: 'all'}
  2037. loads unit 5 from channel 1 and all units from channel 2.
  2038. load_waveforms (boolean):
  2039. If True, waveforms are attached to all loaded spiketrains.
  2040. load_events (boolean):
  2041. If True, all recorded events are loaded.
  2042. scaling (str):
  2043. Determines whether time series of individual
  2044. electrodes/channels are returned as AnalogSignals containing
  2045. raw integer samples ('raw'), or scaled to arrays of floats
  2046. representing voltage ('voltage'). Note that for file
  2047. specification 2.1 and lower, the option 'voltage' requires a
  2048. nev file to be present.
  2049. lazy (bool):
  2050. If True, only the shape of the data is loaded.
  2051. cascade (bool or "lazy"):
  2052. If True, only the block without children is returned.
  2053. Returns:
  2054. Block (neo.segment.Block):
  2055. Block linking all loaded Neo objects.
  2056. Block annotations:
  2057. avail_file_set (list):
  2058. List of extensions of all available files for the given
  2059. recording.
  2060. avail_nsx (list of int):
  2061. List of integers specifying the .nsX files available,
  2062. e.g., [2, 5] indicates that an ns2 and and ns5 file are
  2063. available.
  2064. avail_nev (bool):
  2065. True if a .nev file is available.
  2066. avail_ccf (bool):
  2067. True if a .ccf file is available.
  2068. avail_sif (bool):
  2069. True if a .sif file is available.
  2070. rec_pauses (bool):
  2071. True if the session contains a recording pause (i.e.,
  2072. multiple segments).
  2073. nb_segments (int):
  2074. Number of segments created after merging recording
  2075. times specified by user with the intrinsic ones of the
  2076. file set.
  2077. Segment annotations:
  2078. None.
  2079. ChannelIndex annotations:
  2080. waveform_size (Quantitiy):
  2081. Length of time used to save spike waveforms (in units
  2082. of 1/30000 s).
  2083. nev_hi_freq_corner (Quantitiy),
  2084. nev_lo_freq_corner (Quantitiy),
  2085. nev_hi_freq_order (int), nev_lo_freq_order (int),
  2086. nev_hi_freq_type (str), nev_lo_freq_type (str),
  2087. nev_hi_threshold, nev_lo_threshold,
  2088. nev_energy_threshold (quantity):
  2089. Indicates parameters of spike detection.
  2090. nsx_hi_freq_corner (Quantity),
  2091. nsx_lo_freq_corner (Quantity)
  2092. nsx_hi_freq_order (int), nsx_lo_freq_order (int),
  2093. nsx_hi_freq_type (str), nsx_lo_freq_type (str)
  2094. Indicates parameters of the filtered signal in one of
  2095. the files ns1-ns5 (ns6, if available, is not filtered).
  2096. nev_dig_factor (int):
  2097. Digitization factor in microvolts of the nev file, used
  2098. to convert raw samples to volt.
  2099. connector_ID, connector_pinID (int):
  2100. ID of connector and pin on the connector where the
  2101. channel was recorded from.
  2102. nb_sorted_units (int):
  2103. Number of sorted units on this channel (noise, mua and
  2104. sua).
  2105. Unit annotations:
  2106. unit_id (int):
  2107. ID of the unit.
  2108. channel_id (int):
  2109. Channel ID (Blackrock ID) from which the unit was
  2110. loaded (equiv. to the single list entry in the
  2111. attribute channel_ids of ChannelIndex parent).
  2112. AnalogSignal annotations:
  2113. nsx (int):
  2114. nsX file the signal was loaded from, e.g., 5 indicates
  2115. the .ns5 file.
  2116. channel_id (int):
  2117. Channel ID (Blackrock ID) from which the signal was
  2118. loaded.
  2119. Spiketrain annotations:
  2120. unit_id (int):
  2121. ID of the unit from which the spikes were recorded.
  2122. channel_id (int):
  2123. Channel ID (Blackrock ID) from which the spikes were
  2124. loaded.
  2125. Event annotations:
  2126. The resulting Block contains one Event object with the name
  2127. `digital_input_port`. It contains all digitally recorded
  2128. events, with the event code coded in the labels of the
  2129. Event. The Event object contains no further annotation.
  2130. """
  2131. # Make sure that input args are transformed into correct instances
  2132. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  2133. channels = self.__transform_channels(channels, nsx_to_load)
  2134. units = self.__transform_units(units, channels)
  2135. # Create block
  2136. bl = Block(file_origin=self.filename)
  2137. # set user defined annotations if they were provided
  2138. if index is not None:
  2139. bl.index = index
  2140. if name is None:
  2141. bl.name = "Blackrock Data Block"
  2142. else:
  2143. bl.name = name
  2144. if description is None:
  2145. bl.description = "Block of data from Blackrock file set."
  2146. else:
  2147. bl.description = description
  2148. if self._avail_files['nev']:
  2149. bl.rec_datetime = self.__nev_params('rec_datetime')
  2150. bl.annotate(
  2151. avail_file_set=[k for k, v in self._avail_files.items() if v])
  2152. bl.annotate(avail_nsx=self._avail_nsx)
  2153. bl.annotate(avail_nev=self._avail_files['nev'])
  2154. bl.annotate(avail_sif=self._avail_files['sif'])
  2155. bl.annotate(avail_ccf=self._avail_files['ccf'])
  2156. bl.annotate(rec_pauses=False)
  2157. # Test n_starts and n_stops user requirements and combine them if
  2158. # possible with file internal n_starts and n_stops from rec pauses.
  2159. n_starts, n_stops = \
  2160. self.__merge_time_ranges(n_starts, n_stops, nsx_to_load)
  2161. bl.annotate(nb_segments=len(n_starts))
  2162. if not cascade:
  2163. return bl
  2164. # read segment
  2165. for seg_idx, (n_start, n_stop) in enumerate(zip(n_starts, n_stops)):
  2166. seg = self.read_segment(
  2167. n_start=n_start,
  2168. n_stop=n_stop,
  2169. index=seg_idx,
  2170. nsx_to_load=nsx_to_load,
  2171. channels=channels,
  2172. units=units,
  2173. load_waveforms=load_waveforms,
  2174. load_events=load_events,
  2175. scaling=scaling,
  2176. lazy=lazy,
  2177. cascade=cascade)
  2178. bl.segments.append(seg)
  2179. # read channelindexes
  2180. if channels:
  2181. for ch_id in channels:
  2182. if units and ch_id in units.keys():
  2183. ch_units = units[ch_id]
  2184. else:
  2185. ch_units = None
  2186. chidx = self.__read_channelindex(
  2187. channel_id=ch_id,
  2188. index=0,
  2189. channel_units=ch_units,
  2190. cascade=cascade)
  2191. for seg in bl.segments:
  2192. if ch_units:
  2193. for un in chidx.units:
  2194. sts = seg.filter(
  2195. targdict={'name': un.name},
  2196. objects='SpikeTrain')
  2197. for st in sts:
  2198. un.spiketrains.append(st)
  2199. anasigs = seg.filter(
  2200. targdict={'channel_id': ch_id},
  2201. objects='AnalogSignal')
  2202. for anasig in anasigs:
  2203. chidx.analogsignals.append(anasig)
  2204. bl.channel_indexes.append(chidx)
  2205. bl.create_many_to_one_relationship()
  2206. return bl
  2207. def __str__(self):
  2208. """
  2209. Prints summary of the Blackrock data file set.
  2210. """
  2211. output = "\nFile Origins for Blackrock File Set\n" \
  2212. "====================================\n"
  2213. for ftype in self._filenames.keys():
  2214. output += ftype + ':' + self._filenames[ftype] + '\n'
  2215. if self._avail_files['nev']:
  2216. output += "\nEvent Parameters (NEV)\n" \
  2217. "====================================\n" \
  2218. "Timestamp resolution (Hz): " + \
  2219. str(self.__nev_basic_header['timestamp_resolution']) + \
  2220. "\nWaveform resolution (Hz): " + \
  2221. str(self.__nev_basic_header['sample_resolution'])
  2222. if b'NEUEVWAV' in self.__nev_ext_header.keys():
  2223. avail_el = \
  2224. self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  2225. con = \
  2226. self.__nev_ext_header[b'NEUEVWAV']['physical_connector']
  2227. pin = \
  2228. self.__nev_ext_header[b'NEUEVWAV']['connector_pin']
  2229. nb_units = \
  2230. self.__nev_ext_header[b'NEUEVWAV']['nb_sorted_units']
  2231. output += "\n\nAvailable electrode IDs:\n" \
  2232. "====================================\n"
  2233. for i, el in enumerate(avail_el):
  2234. output += "Electrode ID %i: " % el
  2235. channel_labels = self.__nev_params('channel_labels')
  2236. if channel_labels is not None:
  2237. output += "label %s: " % channel_labels[el]
  2238. output += "connector: %i, " % con[i]
  2239. output += "pin: %i, " % pin[i]
  2240. output += 'nb_units: %i\n' % nb_units[i]
  2241. for nsx_nb in self._avail_nsx:
  2242. analog_res = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  2243. 'sampling_rate', nsx_nb)
  2244. avail_el = [
  2245. el for el in self.__nsx_ext_header[nsx_nb]['electrode_id']]
  2246. output += "\nAnalog Parameters (NS" \
  2247. + str(nsx_nb) + ")\n===================================="
  2248. output += "\nResolution (Hz): %i" % analog_res
  2249. output += "\nAvailable channel IDs: " + \
  2250. ", ".join(["%i" % a for a in avail_el]) + "\n"
  2251. return output