Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

blackrockio.py 101 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538
  1. # -*- coding: utf-8 -*-
  2. """
  3. Module for reading data from files in the Blackrock format.
  4. This work is based on:
  5. * Chris Rodgers - first version
  6. * Michael Denker, Lyuba Zehl - second version
  7. * Samuel Garcia - third version
  8. * Lyuba Zehl, Michael Denker - fourth version
  9. This IO supports reading only.
  10. This IO is able to read:
  11. * the nev file which contains spikes
  12. * ns1, ns2, .., ns6 files that contain signals at different sampling rates
  13. This IO can handle the following Blackrock file specifications:
  14. * 2.1
  15. * 2.2
  16. * 2.3
  17. The neural data channels are 1 - 128.
  18. The analog inputs are 129 - 144. (129 - 137 AC coupled, 138 - 144 DC coupled)
  19. spike- and event-data; 30000 Hz
  20. "ns1": "analog data: 500 Hz",
  21. "ns2": "analog data: 1000 Hz",
  22. "ns3": "analog data: 2000 Hz",
  23. "ns4": "analog data: 10000 Hz",
  24. "ns5": "analog data: 30000 Hz",
  25. "ns6": "analog data: 30000 Hz (no digital filter)"
  26. TODO:
  27. * videosync events (file spec 2.3)
  28. * tracking events (file spec 2.3)
  29. * buttontrigger events (file spec 2.3)
  30. * config events (file spec 2.3)
  31. * check left sweep settings of Blackrock
  32. * check nsx offsets (file spec 2.1)
  33. * add info of nev ext header (NSASEXEX) to non-neural events
  34. (file spec 2.1 and 2.2)
  35. * read sif file information
  36. * read ccf file information
  37. * fix reading of periodic sampling events (non-neural event type)
  38. (file spec 2.1 and 2.2)
  39. """
  40. from __future__ import division
  41. import datetime
  42. import os
  43. import re
  44. import numpy as np
  45. import quantities as pq
  46. import neo
  47. from neo.io.baseio import BaseIO
  48. from neo.core import (Block, Segment, SpikeTrain, Unit, Event,
  49. ChannelIndex, AnalogSignal)
  50. if __name__ == '__main__':
  51. pass
  52. class BlackrockIO(BaseIO):
  53. """
  54. Class for reading data in from a file set recorded by the Blackrock
  55. (Cerebus) recording system.
  56. Upon initialization, the class is linked to the available set of Blackrock
  57. files. Data can be read as a neo Block or neo Segment object using the
  58. read_block or read_segment function, respectively.
  59. Note: This routine will handle files according to specification 2.1, 2.2,
  60. and 2.3. Recording pauses that may occur in file specifications 2.2 and
  61. 2.3 are automatically extracted and the data set is split into different
  62. segments.
  63. Inherits from:
  64. neo.io.BaseIO
  65. The Blackrock data format consists not of a single file, but a set of
  66. different files. This constructor associates itself with a set of files
  67. that constitute a common data set. By default, all files belonging to
  68. the file set have the same base name, but different extensions.
  69. However, by using the override parameters, individual filenames can
  70. be set.
  71. Args:
  72. filename (string):
  73. File name (without extension) of the set of Blackrock files to
  74. associate with. Any .nsX or .nev, .sif, or .ccf extensions are
  75. ignored when parsing this parameter.
  76. nsx_override (string):
  77. File name of the .nsX files (without extension). If None,
  78. filename is used.
  79. Default: None.
  80. nev_override (string):
  81. File name of the .nev file (without extension). If None,
  82. filename is used.
  83. Default: None.
  84. sif_override (string):
  85. File name of the .sif file (without extension). If None,
  86. filename is used.
  87. Default: None.
  88. ccf_override (string):
  89. File name of the .ccf file (without extension). If None,
  90. filename is used.
  91. Default: None.
  92. verbose (boolean):
  93. If True, the class will output additional diagnostic
  94. information on stdout.
  95. Default: False
  96. Returns:
  97. -
  98. Examples:
  99. >>> a = BlackrockIO('myfile')
  100. Loads a set of file consisting of files myfile.ns1, ...,
  101. myfile.ns6, and myfile.nev
  102. >>> b = BlackrockIO('myfile', nev_override='sorted')
  103. Loads the analog data from the set of files myfile.ns1, ...,
  104. myfile.ns6, but reads spike/event data from sorted.nev
  105. """
  106. # Class variables demonstrating capabilities of this IO
  107. is_readable = True
  108. is_writable = False
  109. # This IO can only manipulate continuous data, spikes, and events
  110. supported_objects = [
  111. Block, Segment, Event, AnalogSignal, SpikeTrain, Unit, ChannelIndex]
  112. readable_objects = [Block, Segment]
  113. writeable_objects = []
  114. has_header = False
  115. is_streameable = False
  116. read_params = {
  117. neo.Block: [
  118. ('nsx_to_load', {
  119. 'value': 'none',
  120. 'label': "List of nsx files (ids, int) to read."}),
  121. ('n_starts', {
  122. 'value': None,
  123. 'label': "List of n_start points (Quantity) to create "
  124. "segments from."}),
  125. ('n_stops', {
  126. 'value': None,
  127. 'label': "List of n_stop points (Quantity) to create "
  128. "segments from."}),
  129. ('channels', {
  130. 'value': 'none',
  131. 'label': "List of channels (ids, int) to load data from."}),
  132. ('units', {
  133. 'value': 'none',
  134. 'label': "Dictionary for units (values, list of int) to load "
  135. "for each channel (key, int)."}),
  136. ('load_waveforms', {
  137. 'value': False,
  138. 'label': "States if waveforms should be loaded and attached "
  139. "to spiketrain"}),
  140. ('load_events', {
  141. 'value': False,
  142. 'label': "States if events should be loaded."})],
  143. neo.Segment: [
  144. ('n_start', {
  145. 'label': "Start time point (Quantity) for segment"}),
  146. ('n_stop', {
  147. 'label': "Stop time point (Quantity) for segment"}),
  148. ('nsx_to_load', {
  149. 'value': 'none',
  150. 'label': "List of nsx files (ids, int) to read."}),
  151. ('channels', {
  152. 'value': 'none',
  153. 'label': "List of channels (ids, int) to load data from."}),
  154. ('units', {
  155. 'value': 'none',
  156. 'label': "Dictionary for units (values, list of int) to load "
  157. "for each channel (key, int)."}),
  158. ('load_waveforms', {
  159. 'value': False,
  160. 'label': "States if waveforms should be loaded and attached "
  161. "to spiketrain"}),
  162. ('load_events', {
  163. 'value': False,
  164. 'label': "States if events should be loaded."})]}
  165. write_params = {}
  166. name = 'Blackrock IO'
  167. description = "This IO reads .nev/.nsX file of the Blackrock " + \
  168. "(Cerebus) recordings system."
  169. # The possible file extensions of the Cerebus system and their content:
  170. # ns1: contains analog data; sampled at 500 Hz (+ digital filters)
  171. # ns2: contains analog data; sampled at 1000 Hz (+ digital filters)
  172. # ns3: contains analog data; sampled at 2000 Hz (+ digital filters)
  173. # ns4: contains analog data; sampled at 10000 Hz (+ digital filters)
  174. # ns5: contains analog data; sampled at 30000 Hz (+ digital filters)
  175. # ns6: contains analog data; sampled at 30000 Hz (no digital filters)
  176. # nev: contains spike- and event-data; sampled at 30000 Hz
  177. # sif: contains institution and patient info (XML)
  178. # ccf: contains Cerebus configurations
  179. extensions = ['ns' + str(_) for _ in range(1, 7)]
  180. extensions.extend(['nev', 'sif', 'ccf'])
  181. mode = 'file'
  182. def __init__(self, filename, nsx_override=None, nev_override=None,
  183. sif_override=None, ccf_override=None, verbose=False):
  184. """
  185. Initialize the BlackrockIO class.
  186. """
  187. BaseIO.__init__(self)
  188. # Used to avoid unnecessary repetition of verbose messages
  189. self.__verbose_messages = []
  190. # remove extension from base _filenames
  191. for ext in self.extensions:
  192. self.filename = re.sub(
  193. os.path.extsep + ext + '$', '', filename)
  194. # remove extensions from overrides
  195. self._filenames = {}
  196. if nsx_override:
  197. self._filenames['nsx'] = re.sub(
  198. os.path.extsep + 'ns[1,2,3,4,5,6]$', '', nsx_override)
  199. else:
  200. self._filenames['nsx'] = self.filename
  201. if nev_override:
  202. self._filenames['nev'] = re.sub(
  203. os.path.extsep + 'nev$', '', nev_override)
  204. else:
  205. self._filenames['nev'] = self.filename
  206. if sif_override:
  207. self._filenames['sif'] = re.sub(
  208. os.path.extsep + 'sif$', '', sif_override)
  209. else:
  210. self._filenames['sif'] = self.filename
  211. if ccf_override:
  212. self._filenames['ccf'] = re.sub(
  213. os.path.extsep + 'ccf$', '', ccf_override)
  214. else:
  215. self._filenames['ccf'] = self.filename
  216. # check which files are available
  217. self._avail_files = dict.fromkeys(self.extensions, False)
  218. self._avail_nsx = []
  219. for ext in self.extensions:
  220. if ext.startswith('ns'):
  221. file2check = ''.join(
  222. [self._filenames['nsx'], os.path.extsep, ext])
  223. else:
  224. file2check = ''.join(
  225. [self._filenames[ext], os.path.extsep, ext])
  226. if os.path.exists(file2check):
  227. self._print_verbose("Found " + file2check + ".")
  228. self._avail_files[ext] = True
  229. if ext.startswith('ns'):
  230. self._avail_nsx.append(int(ext[-1]))
  231. # These dictionaries are used internally to map the file specification
  232. # revision of the nsx and nev files to one of the reading routines
  233. self.__nsx_header_reader = {
  234. '2.1': self.__read_nsx_header_variant_a,
  235. '2.2': self.__read_nsx_header_variant_b,
  236. '2.3': self.__read_nsx_header_variant_b}
  237. self.__nsx_dataheader_reader = {
  238. '2.1': self.__read_nsx_dataheader_variant_a,
  239. '2.2': self.__read_nsx_dataheader_variant_b,
  240. '2.3': self.__read_nsx_dataheader_variant_b}
  241. self.__nsx_data_reader = {
  242. '2.1': self.__read_nsx_data_variant_a,
  243. '2.2': self.__read_nsx_data_variant_b,
  244. '2.3': self.__read_nsx_data_variant_b}
  245. self.__nev_header_reader = {
  246. '2.1': self.__read_nev_header_variant_a,
  247. '2.2': self.__read_nev_header_variant_b,
  248. '2.3': self.__read_nev_header_variant_c}
  249. self.__nev_data_reader = {
  250. '2.1': self.__read_nev_data_variant_a,
  251. '2.2': self.__read_nev_data_variant_a,
  252. '2.3': self.__read_nev_data_variant_b}
  253. self.__nsx_params = {
  254. '2.1': self.__get_nsx_param_variant_a,
  255. '2.2': self.__get_nsx_param_variant_b,
  256. '2.3': self.__get_nsx_param_variant_b}
  257. self.__nsx_databl_param = {
  258. '2.1': self.__get_nsx_databl_param_variant_a,
  259. '2.2': self.__get_nsx_databl_param_variant_b,
  260. '2.3': self.__get_nsx_databl_param_variant_b}
  261. self.__waveform_size = {
  262. '2.1': self.__get_waveform_size_variant_a,
  263. '2.2': self.__get_waveform_size_variant_a,
  264. '2.3': self.__get_waveform_size_variant_b}
  265. self.__channel_labels = {
  266. '2.1': self.__get_channel_labels_variant_a,
  267. '2.2': self.__get_channel_labels_variant_b,
  268. '2.3': self.__get_channel_labels_variant_b}
  269. self.__nsx_rec_times = {
  270. '2.1': self.__get_nsx_rec_times_variant_a,
  271. '2.2': self.__get_nsx_rec_times_variant_b,
  272. '2.3': self.__get_nsx_rec_times_variant_b}
  273. self.__nonneural_evtypes = {
  274. '2.1': self.__get_nonneural_evtypes_variant_a,
  275. '2.2': self.__get_nonneural_evtypes_variant_a,
  276. '2.3': self.__get_nonneural_evtypes_variant_b}
  277. # Load file spec and headers of available nev file
  278. if self._avail_files['nev']:
  279. # read nev file specification
  280. self.__nev_spec = self.__extract_nev_file_spec()
  281. self._print_verbose('Specification Version ' + self.__nev_spec)
  282. # read nev headers
  283. self.__nev_basic_header, self.__nev_ext_header = \
  284. self.__nev_header_reader[self.__nev_spec]()
  285. # Load file spec and headers of available nsx files
  286. self.__nsx_spec = {}
  287. self.__nsx_basic_header = {}
  288. self.__nsx_ext_header = {}
  289. self.__nsx_data_header = {}
  290. for nsx_nb in self._avail_nsx:
  291. # read nsx file specification
  292. self.__nsx_spec[nsx_nb] = self.__extract_nsx_file_spec(nsx_nb)
  293. # read nsx headers
  294. self.__nsx_basic_header[nsx_nb], self.__nsx_ext_header[nsx_nb] = \
  295. self.__nsx_header_reader[self.__nsx_spec[nsx_nb]](nsx_nb)
  296. # Read nsx data header(s) for nsx
  297. self.__nsx_data_header[nsx_nb] = self.__nsx_dataheader_reader[
  298. self.__nsx_spec[nsx_nb]](nsx_nb)
  299. def _print_verbose(self, text):
  300. """
  301. Print a verbose diagnostic message (string).
  302. """
  303. if self.__verbose_messages:
  304. if text not in self.__verbose_messages:
  305. self.__verbose_messages.append(text)
  306. print(str(self.__class__.__name__) + ': ' + text)
  307. def __extract_nsx_file_spec(self, nsx_nb):
  308. """
  309. Extract file specification from an .nsx file.
  310. """
  311. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  312. # Header structure of files specification 2.2 and higher. For files 2.1
  313. # and lower, the entries ver_major and ver_minor are not supported.
  314. dt0 = [
  315. ('file_id', 'S8'),
  316. ('ver_major', 'uint8'),
  317. ('ver_minor', 'uint8')]
  318. nsx_file_id = np.fromfile(filename, count=1, dtype=dt0)[0]
  319. if nsx_file_id['file_id'].decode() == 'NEURALSG':
  320. spec = '2.1'
  321. elif nsx_file_id['file_id'].decode() == 'NEURALCD':
  322. spec = '{0}.{1}'.format(
  323. nsx_file_id['ver_major'], nsx_file_id['ver_minor'])
  324. else:
  325. raise IOError('Unsupported NSX file type.')
  326. return spec
  327. def __extract_nev_file_spec(self):
  328. """
  329. Extract file specification from an .nsx file
  330. """
  331. filename = '.'.join([self._filenames['nsx'], 'nev'])
  332. # Header structure of files specification 2.2 and higher. For files 2.1
  333. # and lower, the entries ver_major and ver_minor are not supported.
  334. dt0 = [
  335. ('file_id', 'S8'),
  336. ('ver_major', 'uint8'),
  337. ('ver_minor', 'uint8')]
  338. nev_file_id = np.fromfile(filename, count=1, dtype=dt0)[0]
  339. if nev_file_id['file_id'].decode() == 'NEURALEV':
  340. spec = '{0}.{1}'.format(
  341. nev_file_id['ver_major'], nev_file_id['ver_minor'])
  342. else:
  343. raise IOError('NEV file type {0} is not supported'.format(
  344. nev_file_id['file_id']))
  345. return spec
  346. def __read_nsx_header_variant_a(self, nsx_nb):
  347. """
  348. Extract nsx header information from a 2.1 .nsx file
  349. """
  350. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  351. # basic header (file_id: NEURALCD)
  352. dt0 = [
  353. ('file_id', 'S8'),
  354. # label of sampling groun (e.g. "1kS/s" or "LFP Low")
  355. ('label', 'S16'),
  356. # number of 1/30000 seconds between data points
  357. # (e.g., if sampling rate "1 kS/s", period equals "30")
  358. ('period', 'uint32'),
  359. ('channel_count', 'uint32')]
  360. nsx_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  361. # "extended" header (last field of file_id: NEURALCD)
  362. # (to facilitate compatibility with higher file specs)
  363. offset_dt0 = np.dtype(dt0).itemsize
  364. shape = nsx_basic_header['channel_count']
  365. # originally called channel_id in Blackrock user manual
  366. # (to facilitate compatibility with higher file specs)
  367. dt1 = [('electrode_id', 'uint32')]
  368. nsx_ext_header = np.memmap(
  369. filename, shape=shape, offset=offset_dt0, dtype=dt1)
  370. return nsx_basic_header, nsx_ext_header
  371. def __read_nsx_header_variant_b(self, nsx_nb):
  372. """
  373. Extract nsx header information from a 2.2 or 2.3 .nsx file
  374. """
  375. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  376. # basic header (file_id: NEURALCD)
  377. dt0 = [
  378. ('file_id', 'S8'),
  379. # file specification split into major and minor version number
  380. ('ver_major', 'uint8'),
  381. ('ver_minor', 'uint8'),
  382. # bytes of basic & extended header
  383. ('bytes_in_headers', 'uint32'),
  384. # label of the sampling group (e.g., "1 kS/s" or "LFP low")
  385. ('label', 'S16'),
  386. ('comment', 'S256'),
  387. ('period', 'uint32'),
  388. ('timestamp_resolution', 'uint32'),
  389. # time origin: 2byte uint16 values for ...
  390. ('year', 'uint16'),
  391. ('month', 'uint16'),
  392. ('weekday', 'uint16'),
  393. ('day', 'uint16'),
  394. ('hour', 'uint16'),
  395. ('minute', 'uint16'),
  396. ('second', 'uint16'),
  397. ('millisecond', 'uint16'),
  398. # number of channel_count match number of extended headers
  399. ('channel_count', 'uint32')]
  400. nsx_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  401. # extended header (type: CC)
  402. offset_dt0 = np.dtype(dt0).itemsize
  403. shape = nsx_basic_header['channel_count']
  404. dt1 = [
  405. ('type', 'S2'),
  406. ('electrode_id', 'uint16'),
  407. ('electrode_label', 'S16'),
  408. # used front-end amplifier bank (e.g., A, B, C, D)
  409. ('physical_connector', 'uint8'),
  410. # used connector pin (e.g., 1-37 on bank A, B, C or D)
  411. ('connector_pin', 'uint8'),
  412. # digital and analog value ranges of the signal
  413. ('min_digital_val', 'int16'),
  414. ('max_digital_val', 'int16'),
  415. ('min_analog_val', 'int16'),
  416. ('max_analog_val', 'int16'),
  417. # units of the analog range values ("mV" or "uV")
  418. ('units', 'S16'),
  419. # filter settings used to create nsx from source signal
  420. ('hi_freq_corner', 'uint32'),
  421. ('hi_freq_order', 'uint32'),
  422. ('hi_freq_type', 'uint16'), # 0=None, 1=Butterworth
  423. ('lo_freq_corner', 'uint32'),
  424. ('lo_freq_order', 'uint32'),
  425. ('lo_freq_type', 'uint16')] # 0=None, 1=Butterworth
  426. nsx_ext_header = np.memmap(
  427. filename, shape=shape, offset=offset_dt0, dtype=dt1)
  428. return nsx_basic_header, nsx_ext_header
  429. def __read_nsx_dataheader(self, nsx_nb, offset):
  430. """
  431. Reads data header following the given offset of an nsx file.
  432. """
  433. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  434. # dtypes data header
  435. dt2 = [
  436. ('header', 'uint8'),
  437. ('timestamp', 'uint32'),
  438. ('nb_data_points', 'uint32')]
  439. return np.memmap(filename, dtype=dt2, shape=1, offset=offset)[0]
  440. def __read_nsx_dataheader_variant_a(
  441. self, nsx_nb, filesize=None, offset=None):
  442. """
  443. Reads None for the nsx data header of file spec 2.1. Introduced to
  444. facilitate compatibility with higher file spec.
  445. """
  446. return None
  447. def __read_nsx_dataheader_variant_b(
  448. self, nsx_nb, filesize=None, offset=None, ):
  449. """
  450. Reads the nsx data header for each data block following the offset of
  451. file spec 2.2 and 2.3.
  452. """
  453. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  454. filesize = self.__get_file_size(filename)
  455. data_header = {}
  456. index = 0
  457. if offset is None:
  458. offset = self.__nsx_basic_header[nsx_nb]['bytes_in_headers']
  459. while offset < filesize:
  460. index += 1
  461. dh = self.__read_nsx_dataheader(nsx_nb, offset)
  462. data_header[index] = {
  463. 'header': dh['header'],
  464. 'timestamp': dh['timestamp'],
  465. 'nb_data_points': dh['nb_data_points'],
  466. 'offset_to_data_block': offset + dh.dtype.itemsize}
  467. # data size = number of data points * (2bytes * number of channels)
  468. # use of `int` avoids overflow problem
  469. data_size = int(dh['nb_data_points']) * \
  470. int(self.__nsx_basic_header[nsx_nb]['channel_count']) * 2
  471. # define new offset (to possible next data block)
  472. offset = data_header[index]['offset_to_data_block'] + data_size
  473. return data_header
  474. def __read_nsx_data_variant_a(self, nsx_nb):
  475. """
  476. Extract nsx data from a 2.1 .nsx file
  477. """
  478. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  479. # get shape of data
  480. shape = (
  481. self.__nsx_databl_param['2.1']('nb_data_points', nsx_nb),
  482. self.__nsx_basic_header[nsx_nb]['channel_count'])
  483. offset = self.__nsx_params['2.1']('bytes_in_headers', nsx_nb)
  484. # read nsx data
  485. # store as dict for compatibility with higher file specs
  486. data = {1: np.memmap(
  487. filename, dtype='int16', shape=shape, offset=offset)}
  488. return data
  489. def __read_nsx_data_variant_b(self, nsx_nb):
  490. """
  491. Extract nsx data (blocks) from a 2.2 or 2.3 .nsx file. Blocks can arise
  492. if the recording was paused by the user.
  493. """
  494. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  495. data = {}
  496. for data_bl in self.__nsx_data_header[nsx_nb].keys():
  497. # get shape and offset of data
  498. shape = (
  499. self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points'],
  500. self.__nsx_basic_header[nsx_nb]['channel_count'])
  501. offset = \
  502. self.__nsx_data_header[nsx_nb][data_bl]['offset_to_data_block']
  503. # read data
  504. data[data_bl] = np.memmap(
  505. filename, dtype='int16', shape=shape, offset=offset)
  506. return data
  507. def __read_nev_header(self, ext_header_variants):
  508. """
  509. Extract nev header information from a 2.1 .nsx file
  510. """
  511. filename = '.'.join([self._filenames['nev'], 'nev'])
  512. # basic header
  513. dt0 = [
  514. # Set to "NEURALEV"
  515. ('file_type_id', 'S8'),
  516. ('ver_major', 'uint8'),
  517. ('ver_minor', 'uint8'),
  518. # Flags
  519. ('additionnal_flags', 'uint16'),
  520. # File index of first data sample
  521. ('bytes_in_headers', 'uint32'),
  522. # Number of bytes per data packet (sample)
  523. ('bytes_in_data_packets', 'uint32'),
  524. # Time resolution of time stamps in Hz
  525. ('timestamp_resolution', 'uint32'),
  526. # Sampling frequency of waveforms in Hz
  527. ('sample_resolution', 'uint32'),
  528. ('year', 'uint16'),
  529. ('month', 'uint16'),
  530. ('weekday', 'uint16'),
  531. ('day', 'uint16'),
  532. ('hour', 'uint16'),
  533. ('minute', 'uint16'),
  534. ('second', 'uint16'),
  535. ('millisecond', 'uint16'),
  536. ('application_to_create_file', 'S32'),
  537. ('comment_field', 'S256'),
  538. # Number of extended headers
  539. ('nb_ext_headers', 'uint32')]
  540. nev_basic_header = np.fromfile(filename, count=1, dtype=dt0)[0]
  541. # extended header
  542. # this consist in N block with code 8bytes + 24 data bytes
  543. # the data bytes depend on the code and need to be converted
  544. # cafilename_nsx, segse by case
  545. shape = nev_basic_header['nb_ext_headers']
  546. offset_dt0 = np.dtype(dt0).itemsize
  547. # This is the common structure of the beginning of extended headers
  548. dt1 = [
  549. ('packet_id', 'S8'),
  550. ('info_field', 'S24')]
  551. raw_ext_header = np.memmap(
  552. filename, offset=offset_dt0, dtype=dt1, shape=shape)
  553. nev_ext_header = {}
  554. for packet_id in ext_header_variants.keys():
  555. mask = (raw_ext_header['packet_id'] == packet_id)
  556. dt2 = self.__nev_ext_header_types()[packet_id][
  557. ext_header_variants[packet_id]]
  558. nev_ext_header[packet_id] = raw_ext_header.view(dt2)[mask]
  559. return nev_basic_header, nev_ext_header
  560. def __read_nev_header_variant_a(self):
  561. """
  562. Extract nev header information from a 2.1 .nev file
  563. """
  564. ext_header_variants = {
  565. b'NEUEVWAV': 'a',
  566. b'ARRAYNME': 'a',
  567. b'ECOMMENT': 'a',
  568. b'CCOMMENT': 'a',
  569. b'MAPFILE': 'a',
  570. b'NSASEXEV': 'a'}
  571. return self.__read_nev_header(ext_header_variants)
  572. def __read_nev_header_variant_b(self):
  573. """
  574. Extract nev header information from a 2.2 .nev file
  575. """
  576. ext_header_variants = {
  577. b'NEUEVWAV': 'b',
  578. b'ARRAYNME': 'a',
  579. b'ECOMMENT': 'a',
  580. b'CCOMMENT': 'a',
  581. b'MAPFILE': 'a',
  582. b'NEUEVLBL': 'a',
  583. b'NEUEVFLT': 'a',
  584. b'DIGLABEL': 'a',
  585. b'NSASEXEV': 'a'}
  586. return self.__read_nev_header(ext_header_variants)
  587. def __read_nev_header_variant_c(self):
  588. """
  589. Extract nev header information from a 2.3 .nev file
  590. """
  591. ext_header_variants = {
  592. b'NEUEVWAV': 'b',
  593. b'ARRAYNME': 'a',
  594. b'ECOMMENT': 'a',
  595. b'CCOMMENT': 'a',
  596. b'MAPFILE': 'a',
  597. b'NEUEVLBL': 'a',
  598. b'NEUEVFLT': 'a',
  599. b'DIGLABEL': 'a',
  600. b'VIDEOSYN': 'a',
  601. b'TRACKOBJ': 'a'}
  602. return self.__read_nev_header(ext_header_variants)
  603. def __read_nev_data(self, nev_data_masks, nev_data_types):
  604. """
  605. Extract nev data from a 2.1 or 2.2 .nev file
  606. """
  607. filename = '.'.join([self._filenames['nev'], 'nev'])
  608. data_size = self.__nev_basic_header['bytes_in_data_packets']
  609. header_size = self.__nev_basic_header['bytes_in_headers']
  610. # read all raw data packets and markers
  611. dt0 = [
  612. ('timestamp', 'uint32'),
  613. ('packet_id', 'uint16'),
  614. ('value', 'S{0}'.format(data_size - 6))]
  615. raw_data = np.memmap(filename, offset=header_size, dtype=dt0)
  616. masks = self.__nev_data_masks(raw_data['packet_id'])
  617. types = self.__nev_data_types(data_size)
  618. data = {}
  619. for k, v in nev_data_masks.items():
  620. data[k] = raw_data.view(types[k][nev_data_types[k]])[masks[k][v]]
  621. return data
  622. def __read_nev_data_variant_a(self):
  623. """
  624. Extract nev data from a 2.1 & 2.2 .nev file
  625. """
  626. nev_data_masks = {
  627. 'NonNeural': 'a',
  628. 'Spikes': 'a'}
  629. nev_data_types = {
  630. 'NonNeural': 'a',
  631. 'Spikes': 'a'}
  632. return self.__read_nev_data(nev_data_masks, nev_data_types)
  633. def __read_nev_data_variant_b(self):
  634. """
  635. Extract nev data from a 2.3 .nev file
  636. """
  637. nev_data_masks = {
  638. 'NonNeural': 'a',
  639. 'Spikes': 'b',
  640. 'Comments': 'a',
  641. 'VideoSync': 'a',
  642. 'TrackingEvents': 'a',
  643. 'ButtonTrigger': 'a',
  644. 'ConfigEvent': 'a'}
  645. nev_data_types = {
  646. 'NonNeural': 'b',
  647. 'Spikes': 'a',
  648. 'Comments': 'a',
  649. 'VideoSync': 'a',
  650. 'TrackingEvents': 'a',
  651. 'ButtonTrigger': 'a',
  652. 'ConfigEvent': 'a'}
  653. return self.__read_nev_data(nev_data_masks, nev_data_types)
  654. def __nev_ext_header_types(self):
  655. """
  656. Defines extended header types for different .nev file specifications.
  657. """
  658. nev_ext_header_types = {
  659. b'NEUEVWAV': {
  660. # Version>=2.1
  661. 'a': [
  662. ('packet_id', 'S8'),
  663. ('electrode_id', 'uint16'),
  664. ('physical_connector', 'uint8'),
  665. ('connector_pin', 'uint8'),
  666. ('digitization_factor', 'uint16'),
  667. ('energy_threshold', 'uint16'),
  668. ('hi_threshold', 'int16'),
  669. ('lo_threshold', 'int16'),
  670. ('nb_sorted_units', 'uint8'),
  671. # number of bytes per waveform sample
  672. ('bytes_per_waveform', 'uint8'),
  673. ('unused', 'S10')],
  674. # Version>=2.3
  675. 'b': [
  676. ('packet_id', 'S8'),
  677. ('electrode_id', 'uint16'),
  678. ('physical_connector', 'uint8'),
  679. ('connector_pin', 'uint8'),
  680. ('digitization_factor', 'uint16'),
  681. ('energy_threshold', 'uint16'),
  682. ('hi_threshold', 'int16'),
  683. ('lo_threshold', 'int16'),
  684. ('nb_sorted_units', 'uint8'),
  685. # number of bytes per waveform sample
  686. ('bytes_per_waveform', 'uint8'),
  687. # number of samples for each waveform
  688. ('spike_width', 'uint16'),
  689. ('unused', 'S8')]},
  690. b'ARRAYNME': {
  691. 'a': [
  692. ('packet_id', 'S8'),
  693. ('electrode_array_name', 'S24')]},
  694. b'ECOMMENT': {
  695. 'a': [
  696. ('packet_id', 'S8'),
  697. ('extra_comment', 'S24')]},
  698. b'CCOMMENT': {
  699. 'a': [
  700. ('packet_id', 'S8'),
  701. ('continued_comment', 'S24')]},
  702. b'MAPFILE': {
  703. 'a': [
  704. ('packet_id', 'S8'),
  705. ('mapFile', 'S24')]},
  706. b'NEUEVLBL': {
  707. 'a': [
  708. ('packet_id', 'S8'),
  709. ('electrode_id', 'uint16'),
  710. # label of this electrode
  711. ('label', 'S16'),
  712. ('unused', 'S6')]},
  713. b'NEUEVFLT': {
  714. 'a': [
  715. ('packet_id', 'S8'),
  716. ('electrode_id', 'uint16'),
  717. ('hi_freq_corner', 'uint32'),
  718. ('hi_freq_order', 'uint32'),
  719. # 0=None 1=Butterworth
  720. ('hi_freq_type', 'uint16'),
  721. ('lo_freq_corner', 'uint32'),
  722. ('lo_freq_order', 'uint32'),
  723. # 0=None 1=Butterworth
  724. ('lo_freq_type', 'uint16'),
  725. ('unused', 'S2')]},
  726. b'DIGLABEL': {
  727. 'a': [
  728. ('packet_id', 'S8'),
  729. # Read name of digital
  730. ('label', 'S16'),
  731. # 0=serial, 1=parallel
  732. ('mode', 'uint8'),
  733. ('unused', 'S7')]},
  734. b'NSASEXEV': {
  735. 'a': [
  736. ('packet_id', 'S8'),
  737. # Read frequency of periodic packet generation
  738. ('frequency', 'uint16'),
  739. # Read if digital input triggers events
  740. ('digital_input_config', 'uint8'),
  741. # Read if analog input triggers events
  742. ('analog_channel_1_config', 'uint8'),
  743. ('analog_channel_1_edge_detec_val', 'uint16'),
  744. ('analog_channel_2_config', 'uint8'),
  745. ('analog_channel_2_edge_detec_val', 'uint16'),
  746. ('analog_channel_3_config', 'uint8'),
  747. ('analog_channel_3_edge_detec_val', 'uint16'),
  748. ('analog_channel_4_config', 'uint8'),
  749. ('analog_channel_4_edge_detec_val', 'uint16'),
  750. ('analog_channel_5_config', 'uint8'),
  751. ('analog_channel_5_edge_detec_val', 'uint16'),
  752. ('unused', 'S6')]},
  753. b'VIDEOSYN': {
  754. 'a': [
  755. ('packet_id', 'S8'),
  756. ('video_source_id', 'uint16'),
  757. ('video_source', 'S16'),
  758. ('frame_rate', 'float32'),
  759. ('unused', 'S2')]},
  760. b'TRACKOBJ': {
  761. 'a': [
  762. ('packet_id', 'S8'),
  763. ('trackable_type', 'uint16'),
  764. ('trackable_id', 'uint16'),
  765. ('point_count', 'uint16'),
  766. ('video_source', 'S16'),
  767. ('unused', 'S2')]}}
  768. return nev_ext_header_types
  769. def __nev_data_masks(self, packet_ids):
  770. """
  771. Defines data masks for different .nev file specifications depending on
  772. the given packet identifiers.
  773. """
  774. __nev_data_masks = {
  775. 'NonNeural': {
  776. 'a': (packet_ids == 0)},
  777. 'Spikes': {
  778. # Version 2.1 & 2.2
  779. 'a': (0 < packet_ids) & (packet_ids <= 255),
  780. # Version>=2.3
  781. 'b': (0 < packet_ids) & (packet_ids <= 2048)},
  782. 'Comments': {
  783. 'a': (packet_ids == 0xFFFF)},
  784. 'VideoSync': {
  785. 'a': (packet_ids == 0xFFFE)},
  786. 'TrackingEvents': {
  787. 'a': (packet_ids == 0xFFFD)},
  788. 'ButtonTrigger': {
  789. 'a': (packet_ids == 0xFFFC)},
  790. 'ConfigEvent': {
  791. 'a': (packet_ids == 0xFFFB)}}
  792. return __nev_data_masks
  793. def __nev_data_types(self, data_size):
  794. """
  795. Defines data types for different .nev file specifications depending on
  796. the given packet identifiers.
  797. """
  798. __nev_data_types = {
  799. 'NonNeural': {
  800. # Version 2.1 & 2.2
  801. 'a': [
  802. ('timestamp', 'uint32'),
  803. ('packet_id', 'uint16'),
  804. ('packet_insertion_reason', 'uint8'),
  805. ('reserved', 'uint8'),
  806. ('digital_input', 'uint16'),
  807. ('analog_input_channel_1', 'int16'),
  808. ('analog_input_channel_2', 'int16'),
  809. ('analog_input_channel_3', 'int16'),
  810. ('analog_input_channel_4', 'int16'),
  811. ('analog_input_channel_5', 'int16'),
  812. ('unused', 'S{0}'.format(data_size - 20))],
  813. # Version>=2.3
  814. 'b': [
  815. ('timestamp', 'uint32'),
  816. ('packet_id', 'uint16'),
  817. ('packet_insertion_reason', 'uint8'),
  818. ('reserved', 'uint8'),
  819. ('digital_input', 'uint16'),
  820. ('unused', 'S{0}'.format(data_size - 10))]},
  821. 'Spikes': {
  822. 'a': [
  823. ('timestamp', 'uint32'),
  824. ('packet_id', 'uint16'),
  825. ('unit_class_nb', 'uint8'),
  826. ('reserved', 'uint8'),
  827. ('waveform', 'S{0}'.format(data_size - 8))]},
  828. 'Comments': {
  829. 'a': [
  830. ('timestamp', 'uint32'),
  831. ('packet_id', 'uint16'),
  832. ('char_set', 'uint8'),
  833. ('flag', 'uint8'),
  834. ('data', 'uint32'),
  835. ('comment', 'S{0}'.format(data_size - 12))]},
  836. 'VideoSync': {
  837. 'a': [
  838. ('timestamp', 'uint32'),
  839. ('packet_id', 'uint16'),
  840. ('video_file_nb', 'uint16'),
  841. ('video_frame_nb', 'uint32'),
  842. ('video_elapsed_time', 'uint32'),
  843. ('video_source_id', 'uint32'),
  844. ('unused', 'int8', (data_size - 20, ))]},
  845. 'TrackingEvents': {
  846. 'a': [
  847. ('timestamp', 'uint32'),
  848. ('packet_id', 'uint16'),
  849. ('parent_id', 'uint16'),
  850. ('node_id', 'uint16'),
  851. ('node_count', 'uint16'),
  852. ('point_count', 'uint16'),
  853. ('tracking_points', 'uint16', ((data_size - 14) // 2, ))]},
  854. 'ButtonTrigger': {
  855. 'a': [
  856. ('timestamp', 'uint32'),
  857. ('packet_id', 'uint16'),
  858. ('trigger_type', 'uint16'),
  859. ('unused', 'int8', (data_size - 8, ))]},
  860. 'ConfigEvent': {
  861. 'a': [
  862. ('timestamp', 'uint32'),
  863. ('packet_id', 'uint16'),
  864. ('config_change_type', 'uint16'),
  865. ('config_changed', 'S{0}'.format(data_size - 8))]}}
  866. return __nev_data_types
  867. def __nev_params(self, param_name):
  868. """
  869. Returns wanted nev parameter.
  870. """
  871. nev_parameters = {
  872. 'bytes_in_data_packets':
  873. self.__nev_basic_header['bytes_in_data_packets'],
  874. 'rec_datetime': datetime.datetime(
  875. year=self.__nev_basic_header['year'],
  876. month=self.__nev_basic_header['month'],
  877. day=self.__nev_basic_header['day'],
  878. hour=self.__nev_basic_header['hour'],
  879. minute=self.__nev_basic_header['minute'],
  880. second=self.__nev_basic_header['second'],
  881. microsecond=self.__nev_basic_header['millisecond']),
  882. 'max_res': self.__nev_basic_header['timestamp_resolution'],
  883. 'channel_ids': self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  884. 'channel_labels': self.__channel_labels[self.__nev_spec](),
  885. 'event_unit': pq.CompoundUnit("1.0/{0} * s".format(
  886. self.__nev_basic_header['timestamp_resolution'])),
  887. 'nb_units': dict(zip(
  888. self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  889. self.__nev_ext_header[b'NEUEVWAV']['nb_sorted_units'])),
  890. 'digitization_factor': dict(zip(
  891. self.__nev_ext_header[b'NEUEVWAV']['electrode_id'],
  892. self.__nev_ext_header[b'NEUEVWAV']['digitization_factor'])),
  893. 'data_size': self.__nev_basic_header['bytes_in_data_packets'],
  894. 'waveform_size': self.__waveform_size[self.__nev_spec](),
  895. 'waveform_dtypes': self.__get_waveforms_dtype(),
  896. 'waveform_sampling_rate':
  897. self.__nev_basic_header['sample_resolution'] * pq.Hz,
  898. 'waveform_time_unit': pq.CompoundUnit("1.0/{0} * s".format(
  899. self.__nev_basic_header['sample_resolution'])),
  900. 'waveform_unit': pq.uV}
  901. return nev_parameters[param_name]
  902. def __get_file_size(self, filename):
  903. """
  904. Returns the file size in bytes for the given file.
  905. """
  906. filebuf = open(filename, 'rb')
  907. filebuf.seek(0, os.SEEK_END)
  908. file_size = filebuf.tell()
  909. filebuf.close()
  910. return file_size
  911. def __get_min_time(self):
  912. """
  913. Returns the smallest time that can be determined from the recording for
  914. use as the lower bound n in an interval [n,m).
  915. """
  916. tp = []
  917. if self._avail_files['nev']:
  918. tp.extend(self.__get_nev_rec_times()[0])
  919. for nsx_i in self._avail_nsx:
  920. tp.extend(self.__nsx_rec_times[self.__nsx_spec[nsx_i]](nsx_i)[0])
  921. return min(tp)
  922. def __get_max_time(self):
  923. """
  924. Returns the largest time that can be determined from the recording for
  925. use as the upper bound m in an interval [n,m).
  926. """
  927. tp = []
  928. if self._avail_files['nev']:
  929. tp.extend(self.__get_nev_rec_times()[1])
  930. for nsx_i in self._avail_nsx:
  931. tp.extend(self.__nsx_rec_times[self.__nsx_spec[nsx_i]](nsx_i)[1])
  932. return max(tp)
  933. def __get_nev_rec_times(self):
  934. """
  935. Extracts minimum and maximum time points from a nev file.
  936. """
  937. filename = '.'.join([self._filenames['nev'], 'nev'])
  938. dt = [('timestamp', 'uint32')]
  939. offset = \
  940. self.__get_file_size(filename) - \
  941. self.__nev_params('bytes_in_data_packets')
  942. last_data_packet = np.memmap(filename, offset=offset, dtype=dt)[0]
  943. n_starts = [0 * self.__nev_params('event_unit')]
  944. n_stops = [
  945. last_data_packet['timestamp'] * self.__nev_params('event_unit')]
  946. return n_starts, n_stops
  947. def __get_nsx_rec_times_variant_a(self, nsx_nb):
  948. """
  949. Extracts minimum and maximum time points from a 2.1 nsx file.
  950. """
  951. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  952. t_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  953. 'time_unit', nsx_nb)
  954. highest_res = self.__nev_params('event_unit')
  955. bytes_in_headers = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  956. 'bytes_in_headers', nsx_nb)
  957. nb_data_points = int(
  958. (self.__get_file_size(filename) - bytes_in_headers) /
  959. (2 * self.__nsx_basic_header[nsx_nb]['channel_count']) - 1)
  960. # add n_start
  961. n_starts = [(0 * t_unit).rescale(highest_res)]
  962. # add n_stop
  963. n_stops = [(nb_data_points * t_unit).rescale(highest_res)]
  964. return n_starts, n_stops
  965. def __get_nsx_rec_times_variant_b(self, nsx_nb):
  966. """
  967. Extracts minimum and maximum time points from a 2.2 or 2.3 nsx file.
  968. """
  969. t_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  970. 'time_unit', nsx_nb)
  971. highest_res = self.__nev_params('event_unit')
  972. n_starts = []
  973. n_stops = []
  974. # add n-start and n_stop for all data blocks
  975. for data_bl in self.__nsx_data_header[nsx_nb].keys():
  976. ts0 = self.__nsx_data_header[nsx_nb][data_bl]['timestamp']
  977. nbdp = self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points']
  978. # add n_start
  979. start = ts0 * t_unit
  980. n_starts.append(start.rescale(highest_res))
  981. # add n_stop
  982. stop = start + nbdp * t_unit
  983. n_stops.append(stop.rescale(highest_res))
  984. return sorted(n_starts), sorted(n_stops)
  985. def __get_waveforms_dtype(self):
  986. """
  987. Extracts the actual waveform dtype set for each channel.
  988. """
  989. # Blackrock code giving the approiate dtype
  990. conv = {0: 'int8', 1: 'int8', 2: 'int16', 4: 'int32'}
  991. # get all electrode ids from nev ext header
  992. all_el_ids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  993. # get the dtype of waveform (this is stupidly complicated)
  994. if self.__is_set(
  995. np.array(self.__nev_basic_header['additionnal_flags']), 0):
  996. dtype_waveforms = dict((k, 'int16') for k in all_el_ids)
  997. else:
  998. # extract bytes per waveform
  999. waveform_bytes = \
  1000. self.__nev_ext_header[b'NEUEVWAV']['bytes_per_waveform']
  1001. # extract dtype for waveforms fro each electrode
  1002. dtype_waveforms = dict(zip(all_el_ids, conv[waveform_bytes]))
  1003. return dtype_waveforms
  1004. def __get_channel_labels_variant_a(self):
  1005. """
  1006. Returns labels for all channels for file spec 2.1
  1007. """
  1008. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1009. labels = []
  1010. for elid in elids:
  1011. if elid < 129:
  1012. labels.append('chan%i' % elid)
  1013. else:
  1014. labels.append('ainp%i' % (elid - 129 + 1))
  1015. return dict(zip(elids, labels))
  1016. def __get_channel_labels_variant_b(self):
  1017. """
  1018. Returns labels for all channels for file spec 2.2 and 2.3
  1019. """
  1020. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1021. labels = self.__nev_ext_header[b'NEUEVLBL']['label']
  1022. return dict(zip(elids, labels)) if len(labels) > 0 else None
  1023. def __get_waveform_size_variant_a(self):
  1024. """
  1025. Returns wavform sizes for all channels for file spec 2.1 and 2.2
  1026. """
  1027. wf_dtypes = self.__get_waveforms_dtype()
  1028. nb_bytes_wf = self.__nev_basic_header['bytes_in_data_packets'] - 8
  1029. wf_sizes = dict([
  1030. (ch, int(nb_bytes_wf / np.dtype(dt).itemsize)) for ch, dt in
  1031. wf_dtypes.items()])
  1032. return wf_sizes
  1033. def __get_waveform_size_variant_b(self):
  1034. """
  1035. Returns wavform sizes for all channels for file spec 2.3
  1036. """
  1037. elids = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1038. spike_widths = self.__nev_ext_header[b'NEUEVWAV']['spike_width']
  1039. return dict(zip(elids, spike_widths))
  1040. def __get_left_sweep_waveforms(self):
  1041. """
  1042. Returns left sweep of waveforms for each channel. Left sweep is defined
  1043. as the time from the beginning of the waveform to the trigger time of
  1044. the corresponding spike.
  1045. """
  1046. # TODO: Double check if this is the actual setting for Blackrock
  1047. wf_t_unit = self.__nev_params('waveform_time_unit')
  1048. all_ch = self.__nev_params('channel_ids')
  1049. # TODO: Double check if this is the correct assumption (10 samples)
  1050. # default value: threshold crossing after 10 samples of waveform
  1051. wf_left_sweep = dict([(ch, 10 * wf_t_unit) for ch in all_ch])
  1052. # non-default: threshold crossing at center of waveform
  1053. # wf_size = self.__nev_params('waveform_size')
  1054. # wf_left_sweep = dict(
  1055. # [(ch, (wf_size[ch] / 2) * wf_t_unit) for ch in all_ch])
  1056. return wf_left_sweep
  1057. def __get_nsx_param_variant_a(self, param_name, nsx_nb):
  1058. """
  1059. Returns parameter (param_name) for a given nsx (nsx_nb) for file spec
  1060. 2.1.
  1061. """
  1062. # Here, min/max_analog_val and min/max_digital_val are not available in
  1063. # the nsx, so that we must estimate these parameters from the
  1064. # digitization factor of the nev (information by Kian Torab, Blackrock
  1065. # Microsystems). Here dig_factor=max_analog_val/max_digital_val. We set
  1066. # max_digital_val to 1000, and max_analog_val=dig_factor. dig_factor is
  1067. # given in nV by definition, so the units turn out to be uV.
  1068. labels = []
  1069. dig_factor = []
  1070. for elid in self.__nsx_ext_header[nsx_nb]['electrode_id']:
  1071. if self._avail_files['nev']:
  1072. # This is a workaround for the DigitalFactor overflow in NEV
  1073. # files recorded with buggy Cerebus system.
  1074. # Fix taken from: NMPK toolbox by Blackrock,
  1075. # file openNEV, line 464,
  1076. # git rev. d0a25eac902704a3a29fa5dfd3aed0744f4733ed
  1077. df = self.__nev_params('digitization_factor')[elid]
  1078. if df == 21516:
  1079. df = 152592.547
  1080. dig_factor.append(df)
  1081. else:
  1082. dig_factor.append(None)
  1083. if elid < 129:
  1084. labels.append('chan%i' % elid)
  1085. else:
  1086. labels.append('ainp%i' % (elid - 129 + 1))
  1087. nsx_parameters = {
  1088. 'labels': labels,
  1089. 'units': np.array(
  1090. ['uV'] *
  1091. self.__nsx_basic_header[nsx_nb]['channel_count']),
  1092. 'min_analog_val': -1 * np.array(dig_factor),
  1093. 'max_analog_val': np.array(dig_factor),
  1094. 'min_digital_val': np.array(
  1095. [-1000] * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1096. 'max_digital_val': np.array(
  1097. [1000] * self.__nsx_basic_header[nsx_nb]['channel_count']),
  1098. 'timestamp_resolution': 30000,
  1099. 'bytes_in_headers':
  1100. self.__nsx_basic_header[nsx_nb].dtype.itemsize +
  1101. self.__nsx_ext_header[nsx_nb].dtype.itemsize *
  1102. self.__nsx_basic_header[nsx_nb]['channel_count'],
  1103. 'sampling_rate':
  1104. 30000 / self.__nsx_basic_header[nsx_nb]['period'] * pq.Hz,
  1105. 'time_unit': pq.CompoundUnit("1.0/{0}*s".format(
  1106. 30000 / self.__nsx_basic_header[nsx_nb]['period']))}
  1107. return nsx_parameters[param_name]
  1108. def __get_nsx_param_variant_b(self, param_name, nsx_nb):
  1109. """
  1110. Returns parameter (param_name) for a given nsx (nsx_nb) for file spec
  1111. 2.2 and 2.3.
  1112. """
  1113. nsx_parameters = {
  1114. 'labels':
  1115. self.__nsx_ext_header[nsx_nb]['electrode_label'],
  1116. 'units':
  1117. self.__nsx_ext_header[nsx_nb]['units'],
  1118. 'min_analog_val':
  1119. self.__nsx_ext_header[nsx_nb]['min_analog_val'],
  1120. 'max_analog_val':
  1121. self.__nsx_ext_header[nsx_nb]['max_analog_val'],
  1122. 'min_digital_val':
  1123. self.__nsx_ext_header[nsx_nb]['min_digital_val'],
  1124. 'max_digital_val':
  1125. self.__nsx_ext_header[nsx_nb]['max_digital_val'],
  1126. 'timestamp_resolution':
  1127. self.__nsx_basic_header[nsx_nb]['timestamp_resolution'],
  1128. 'bytes_in_headers':
  1129. self.__nsx_basic_header[nsx_nb]['bytes_in_headers'],
  1130. 'sampling_rate':
  1131. self.__nsx_basic_header[nsx_nb]['timestamp_resolution'] /
  1132. self.__nsx_basic_header[nsx_nb]['period'] * pq.Hz,
  1133. 'time_unit': pq.CompoundUnit("1.0/{0}*s".format(
  1134. self.__nsx_basic_header[nsx_nb]['timestamp_resolution'] /
  1135. self.__nsx_basic_header[nsx_nb]['period']))}
  1136. return nsx_parameters[param_name]
  1137. def __get_nsx_databl_param_variant_a(
  1138. self, param_name, nsx_nb, n_start=None, n_stop=None):
  1139. """
  1140. Returns data block parameter (param_name) for a given nsx (nsx_nb) for
  1141. file spec 2.1. Arg 'n_start' should not be specified! It is only set
  1142. for compatibility reasons with higher file spec.
  1143. """
  1144. filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
  1145. t_starts, t_stops = \
  1146. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1147. bytes_in_headers = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1148. 'bytes_in_headers', nsx_nb)
  1149. # extract parameters from nsx basic extended and data header
  1150. data_parameters = {
  1151. 'nb_data_points': int(
  1152. (self.__get_file_size(filename) - bytes_in_headers) /
  1153. (2 * self.__nsx_basic_header[nsx_nb]['channel_count']) - 1),
  1154. 'databl_idx': 1,
  1155. 'databl_t_start': t_starts[0],
  1156. 'databl_t_stop': t_stops[0]}
  1157. return data_parameters[param_name]
  1158. def __get_nsx_databl_param_variant_b(
  1159. self, param_name, nsx_nb, n_start, n_stop):
  1160. """
  1161. Returns data block parameter (param_name) for a given nsx (nsx_nb) with
  1162. a wanted n_start for file spec 2.2 and 2.3.
  1163. """
  1164. t_starts, t_stops = \
  1165. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1166. # data header
  1167. for d_bl in self.__nsx_data_header[nsx_nb].keys():
  1168. # from "data header" with corresponding t_start and t_stop
  1169. data_parameters = {
  1170. 'nb_data_points':
  1171. self.__nsx_data_header[nsx_nb][d_bl]['nb_data_points'],
  1172. 'databl_idx': d_bl,
  1173. 'databl_t_start': t_starts[d_bl - 1],
  1174. 'databl_t_stop': t_stops[d_bl - 1]}
  1175. if t_starts[d_bl - 1] <= n_start < n_stop <= t_stops[d_bl - 1]:
  1176. return data_parameters[param_name]
  1177. elif n_start < t_starts[d_bl - 1] < n_stop <= t_stops[d_bl - 1]:
  1178. self._print_verbose(
  1179. "User n_start ({0}) is smaller than the corresponding "
  1180. "t_start of the available ns{1} datablock "
  1181. "({2}).".format(n_start, nsx_nb, t_starts[d_bl - 1]))
  1182. return data_parameters[param_name]
  1183. elif t_starts[d_bl - 1] <= n_start < t_stops[d_bl - 1] < n_stop:
  1184. self._print_verbose(
  1185. "User n_stop ({0}) is larger than the corresponding "
  1186. "t_stop of the available ns{1} datablock "
  1187. "({2}).".format(n_stop, nsx_nb, t_stops[d_bl - 1]))
  1188. return data_parameters[param_name]
  1189. elif n_start < t_starts[d_bl - 1] < t_stops[d_bl - 1] < n_stop:
  1190. self._print_verbose(
  1191. "User n_start ({0}) is smaller than the corresponding "
  1192. "t_start and user n_stop ({1}) is larger than the "
  1193. "corresponding t_stop of the available ns{2} datablock "
  1194. "({3}).".format(
  1195. n_start, n_stop, nsx_nb,
  1196. (t_starts[d_bl - 1], t_stops[d_bl - 1])))
  1197. return data_parameters[param_name]
  1198. else:
  1199. continue
  1200. raise ValueError(
  1201. "User n_start and n_stop are all smaller or larger than the "
  1202. "t_start and t_stops of all available ns%i datablocks" % nsx_nb)
  1203. def __get_nonneural_evtypes_variant_a(self, data):
  1204. """
  1205. Defines event types and the necessary parameters to extract them from
  1206. a 2.1 and 2.2 nev file.
  1207. """
  1208. # TODO: add annotations of nev ext header (NSASEXEX) to event types
  1209. # digital events
  1210. event_types = {
  1211. 'digital_input_port': {
  1212. 'name': 'digital_input_port',
  1213. 'field': 'digital_input',
  1214. 'mask': self.__is_set(data['packet_insertion_reason'], 0),
  1215. 'desc': "Events of the digital input port"},
  1216. 'serial_input_port': {
  1217. 'name': 'serial_input_port',
  1218. 'field': 'digital_input',
  1219. 'mask':
  1220. self.__is_set(data['packet_insertion_reason'], 0) &
  1221. self.__is_set(data['packet_insertion_reason'], 7),
  1222. 'desc': "Events of the serial input port"}}
  1223. # analog input events via threshold crossings
  1224. for ch in range(5):
  1225. event_types.update({
  1226. 'analog_input_channel_{0}'.format(ch + 1): {
  1227. 'name': 'analog_input_channel_{0}'.format(ch + 1),
  1228. 'field': 'analog_input_channel_{0}'.format(ch + 1),
  1229. 'mask': self.__is_set(
  1230. data['packet_insertion_reason'], ch + 1),
  1231. 'desc': "Values of analog input channel {0} in mV "
  1232. "(+/- 5000)".format(ch + 1)}})
  1233. # TODO: define field and desc
  1234. event_types.update({
  1235. 'periodic_sampling_events': {
  1236. 'name': 'periodic_sampling_events',
  1237. 'field': 'digital_input',
  1238. 'mask': self.__is_set(data['packet_insertion_reason'], 6),
  1239. 'desc': 'Periodic sampling event of a certain frequency'}})
  1240. return event_types
  1241. def __get_nonneural_evtypes_variant_b(self, data):
  1242. """
  1243. Defines event types and the necessary parameters to extract them from
  1244. a 2.3 nev file.
  1245. """
  1246. # digital events
  1247. event_types = {
  1248. 'digital_input_port': {
  1249. 'name': 'digital_input_port',
  1250. 'field': 'digital_input',
  1251. 'mask': self.__is_set(data['packet_insertion_reason'], 0),
  1252. 'desc': "Events of the digital input port"},
  1253. 'serial_input_port': {
  1254. 'name': 'serial_input_port',
  1255. 'field': 'digital_input',
  1256. 'mask':
  1257. self.__is_set(data['packet_insertion_reason'], 0) &
  1258. self.__is_set(data['packet_insertion_reason'], 7),
  1259. 'desc': "Events of the serial input port"}}
  1260. return event_types
  1261. def __get_unit_classification(self, un_id):
  1262. """
  1263. Returns the Blackrock unit classification of an online spike sorting
  1264. for the given unit id (un_id).
  1265. """
  1266. # Blackrock unit classification
  1267. if un_id == 0:
  1268. return 'unclassified'
  1269. elif 1 <= un_id <= 16:
  1270. return '{0}'.format(un_id)
  1271. elif 17 <= un_id <= 244:
  1272. raise ValueError(
  1273. "Unit id {0} is not used by daq system".format(un_id))
  1274. elif un_id == 255:
  1275. return 'noise'
  1276. else:
  1277. raise ValueError("Unit id {0} cannot be classified".format(un_id))
  1278. def __is_set(self, flag, pos):
  1279. """
  1280. Checks if bit is set at the given position for flag. If flag is an
  1281. array, an array will be returned.
  1282. """
  1283. return flag & (1 << pos) > 0
  1284. def __transform_nsx_to_load(self, nsx_to_load):
  1285. """
  1286. Transforms the input argument nsx_to_load to a list of integers.
  1287. """
  1288. if hasattr(nsx_to_load, "__len__") and len(nsx_to_load) == 0:
  1289. nsx_to_load = None
  1290. if isinstance(nsx_to_load, int):
  1291. nsx_to_load = [nsx_to_load]
  1292. if isinstance(nsx_to_load, str):
  1293. if nsx_to_load.lower() == 'none':
  1294. nsx_to_load = None
  1295. elif nsx_to_load.lower() == 'all':
  1296. nsx_to_load = self._avail_nsx
  1297. else:
  1298. raise ValueError("Invalid specification of nsx_to_load.")
  1299. if nsx_to_load:
  1300. for nsx_nb in nsx_to_load:
  1301. if not self._avail_files['ns' + str(nsx_nb)]:
  1302. raise ValueError("ns%i is not available" % nsx_nb)
  1303. return nsx_to_load
  1304. def __transform_channels(self, channels, nsx_to_load):
  1305. """
  1306. Transforms the input argument channels to a list of integers.
  1307. """
  1308. all_channels = []
  1309. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  1310. if nsx_to_load is not None:
  1311. for nsx_nb in nsx_to_load:
  1312. all_channels.extend(
  1313. self.__nsx_ext_header[nsx_nb]['electrode_id'].astype(int))
  1314. elec_id = self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  1315. all_channels.extend(elec_id.astype(int))
  1316. all_channels = np.unique(all_channels).tolist()
  1317. if hasattr(channels, "__len__") and len(channels) == 0:
  1318. channels = None
  1319. if isinstance(channels, int):
  1320. channels = [channels]
  1321. if isinstance(channels, str):
  1322. if channels.lower() == 'none':
  1323. channels = None
  1324. elif channels.lower() == 'all':
  1325. channels = all_channels
  1326. else:
  1327. raise ValueError("Invalid channel specification.")
  1328. if channels:
  1329. if len(set(all_channels) & set(channels)) < len(channels):
  1330. self._print_verbose(
  1331. "Ignoring unknown channel ID(s) specified in in channels.")
  1332. # Make sure, all channels are valid and contain no duplicates
  1333. channels = list(set(all_channels).intersection(set(channels)))
  1334. else:
  1335. self._print_verbose("No channel is specified, therefore no "
  1336. "time series and unit data is loaded.")
  1337. return channels
  1338. def __transform_units(self, units, channels):
  1339. """
  1340. Transforms the input argument nsx_to_load to a dictionary, where keys
  1341. (channels) are int, and values (units) are lists of integers.
  1342. """
  1343. if isinstance(units, dict):
  1344. for ch, u in units.items():
  1345. if ch not in channels:
  1346. self._print_verbose(
  1347. "Units contain a channel id which is not listed in "
  1348. "channels")
  1349. if isinstance(u, int):
  1350. units[ch] = [u]
  1351. if hasattr(u, '__len__') and len(u) == 0:
  1352. units[ch] = None
  1353. if isinstance(u, str):
  1354. if u.lower() == 'none':
  1355. units[ch] = None
  1356. elif u.lower() == 'all':
  1357. units[ch] = list(range(17))
  1358. units[ch].append(255)
  1359. else:
  1360. raise ValueError("Invalid unit specification.")
  1361. else:
  1362. if hasattr(units, "__len__") and len(units) == 0:
  1363. units = None
  1364. if isinstance(units, str):
  1365. if units.lower() == 'none':
  1366. units = None
  1367. elif units.lower() == 'all':
  1368. units = list(range(17))
  1369. units.append(255)
  1370. else:
  1371. raise ValueError("Invalid unit specification.")
  1372. if isinstance(units, int):
  1373. units = [units]
  1374. if (channels is None) and (units is not None):
  1375. raise ValueError(
  1376. 'At least one channel needs to be loaded to load units')
  1377. if units:
  1378. units = dict(zip(channels, [units] * len(channels)))
  1379. if units is None:
  1380. self._print_verbose("No units are specified, therefore no "
  1381. "unit or spiketrain is loaded.")
  1382. return units
  1383. def __transform_times(self, n, default_n):
  1384. """
  1385. Transforms the input argument n_start or n_stop (n) to a list of
  1386. quantities. In case n is None, it is set to a default value provided by
  1387. the given function (default_n).
  1388. """
  1389. highest_res = self.__nev_params('event_unit')
  1390. if isinstance(n, pq.Quantity):
  1391. n = [n.rescale(highest_res)]
  1392. elif hasattr(n, "__len__"):
  1393. n = [tp.rescale(highest_res) if tp is not None
  1394. else default_n for tp in n]
  1395. elif n is None:
  1396. n = [default_n]
  1397. else:
  1398. raise ValueError('Invalid specification of n_start/n_stop.')
  1399. return n
  1400. def __merge_time_ranges(
  1401. self, user_n_starts, user_n_stops, nsx_to_load):
  1402. """
  1403. Merges after a validation the user specified n_starts and n_stops with
  1404. the intrinsically given n_starts and n_stops (from e.g, recording
  1405. pauses) of the file set.
  1406. Final n_starts and n_stops are chosen, so that the time range of each
  1407. resulting segment is set to the best meaningful maximum. This means
  1408. that the duration of the signals stored in the segments might be
  1409. smaller than the actually set duration of the segment.
  1410. """
  1411. # define the higest time resolution
  1412. # (for accurate manipulations of the time settings)
  1413. highest_res = self.__nev_params('event_unit')
  1414. user_n_starts = self.__transform_times(
  1415. user_n_starts, self.__get_min_time())
  1416. user_n_stops = self.__transform_times(
  1417. user_n_stops, self.__get_max_time())
  1418. # check if user provided as many n_starts as n_stops
  1419. if len(user_n_starts) != len(user_n_stops):
  1420. raise ValueError("n_starts and n_stops must be of equal length")
  1421. # if necessary reset max n_stop to max time of file set
  1422. if user_n_starts[0] < self.__get_min_time():
  1423. user_n_starts[0] = self.__get_min_time()
  1424. self._print_verbose(
  1425. "First entry of n_start is smaller than min time of the file "
  1426. "set: n_start[0] set to min time of file set")
  1427. if user_n_starts[-1] > self.__get_max_time():
  1428. user_n_starts = user_n_starts[:-1]
  1429. user_n_stops = user_n_stops[:-1]
  1430. self._print_verbose(
  1431. "Last entry of n_start is larger than max time of the file "
  1432. "set: last n_start and n_stop entry are excluded")
  1433. if user_n_stops[-1] > self.__get_max_time():
  1434. user_n_stops[-1] = self.__get_max_time()
  1435. self._print_verbose(
  1436. "Last entry of n_stop is larger than max time of the file "
  1437. "set: n_stop[-1] set to max time of file set")
  1438. # get intrinsic time settings of nsx files (incl. rec pauses)
  1439. n_starts_files = []
  1440. n_stops_files = []
  1441. if nsx_to_load is not None:
  1442. for nsx_nb in nsx_to_load:
  1443. start_stop = \
  1444. self.__nsx_rec_times[self.__nsx_spec[nsx_nb]](nsx_nb)
  1445. n_starts_files.append(start_stop[0])
  1446. n_stops_files.append(start_stop[1])
  1447. # reducing n_starts from wanted nsx files to minima
  1448. # (keep recording pause if it occurs)
  1449. if len(n_starts_files) > 0:
  1450. if np.shape(n_starts_files)[1] > 1:
  1451. n_starts_files = [
  1452. tp * highest_res for tp in np.min(n_starts_files, axis=1)]
  1453. else:
  1454. n_starts_files = [
  1455. tp * highest_res for tp in np.min(n_starts_files, axis=0)]
  1456. # reducing n_starts from wanted nsx files to maxima
  1457. # (keep recording pause if it occurs)
  1458. if len(n_stops_files) > 0:
  1459. if np.shape(n_stops_files)[1] > 1:
  1460. n_stops_files = [
  1461. tp * highest_res for tp in np.max(n_stops_files, axis=1)]
  1462. else:
  1463. n_stops_files = [
  1464. tp * highest_res for tp in np.max(n_stops_files, axis=0)]
  1465. # merge user time settings with intrinsic nsx time settings
  1466. n_starts = []
  1467. n_stops = []
  1468. for start, stop in zip(user_n_starts, user_n_stops):
  1469. # check if start and stop of user create a positive time interval
  1470. if not start < stop:
  1471. raise ValueError(
  1472. "t(i) in n_starts has to be smaller than t(i) in n_stops")
  1473. # Reduce n_starts_files to given intervals of user & add start
  1474. if len(n_starts_files) > 0:
  1475. mask = (n_starts_files > start) & (n_starts_files < stop)
  1476. red_n_starts_files = np.array(n_starts_files)[mask]
  1477. merged_n_starts = [start] + [
  1478. tp * highest_res for tp in red_n_starts_files]
  1479. else:
  1480. merged_n_starts = [start]
  1481. # Reduce n_stops_files to given intervals of user & add stop
  1482. if len(n_stops_files) > 0:
  1483. mask = (n_stops_files > start) & (n_stops_files < stop)
  1484. red_n_stops_files = np.array(n_stops_files)[mask]
  1485. merged_n_stops = [
  1486. tp * highest_res for tp in red_n_stops_files] + [stop]
  1487. else:
  1488. merged_n_stops = [stop]
  1489. # Define combined user and file n_starts and n_stops
  1490. # case one:
  1491. if len(merged_n_starts) == len(merged_n_stops):
  1492. if len(merged_n_starts) + len(merged_n_stops) == 2:
  1493. n_starts.extend(merged_n_starts)
  1494. n_stops.extend(merged_n_stops)
  1495. if len(merged_n_starts) + len(merged_n_stops) > 2:
  1496. merged_n_starts.remove(merged_n_starts[1])
  1497. n_starts.extend([merged_n_starts])
  1498. merged_n_stops.remove(merged_n_stops[-2])
  1499. n_stops.extend(merged_n_stops)
  1500. # case two:
  1501. elif len(merged_n_starts) < len(merged_n_stops):
  1502. n_starts.extend(merged_n_starts)
  1503. merged_n_stops.remove(merged_n_stops[-2])
  1504. n_stops.extend(merged_n_stops)
  1505. # case three:
  1506. elif len(merged_n_starts) > len(merged_n_stops):
  1507. merged_n_starts.remove(merged_n_starts[1])
  1508. n_starts.extend(merged_n_starts)
  1509. n_stops.extend(merged_n_stops)
  1510. if len(n_starts) > len(user_n_starts) and \
  1511. len(n_stops) > len(user_n_stops):
  1512. self._print_verbose(
  1513. "Additional recording pauses were detected. There will be "
  1514. "more segments than the user expects.")
  1515. return n_starts, n_stops
  1516. def __read_event(self, n_start, n_stop, data, ev_dict, lazy=False):
  1517. """
  1518. Creates an event for non-neural experimental events in nev data.
  1519. """
  1520. event_unit = self.__nev_params('event_unit')
  1521. if lazy:
  1522. times = []
  1523. labels = np.array([], dtype='S')
  1524. else:
  1525. times = data['timestamp'][ev_dict['mask']] * event_unit
  1526. labels = data[ev_dict['field']][ev_dict['mask']].astype(str)
  1527. # mask for given time interval
  1528. mask = (times >= n_start) & (times < n_stop)
  1529. if np.sum(mask) > 0:
  1530. ev = Event(
  1531. times=times[mask].astype(float),
  1532. labels=labels[mask],
  1533. name=ev_dict['name'],
  1534. description=ev_dict['desc'])
  1535. if lazy:
  1536. ev.lazy_shape = np.sum(mask)
  1537. else:
  1538. ev = None
  1539. return ev
  1540. def __read_spiketrain(
  1541. self, n_start, n_stop, spikes, channel_id, unit_id,
  1542. load_waveforms=False, scaling='raw', lazy=False):
  1543. """
  1544. Creates spiketrains for Spikes in nev data.
  1545. """
  1546. event_unit = self.__nev_params('event_unit')
  1547. # define a name for spiketrain
  1548. # (unique identifier: 1000 * elid + unit_nb)
  1549. name = "Unit {0}".format(1000 * channel_id + unit_id)
  1550. # define description for spiketrain
  1551. desc = 'SpikeTrain from channel: {0}, unit: {1}'.format(
  1552. channel_id, self.__get_unit_classification(unit_id))
  1553. # get spike times for given time interval
  1554. if not lazy:
  1555. times = spikes['timestamp'] * event_unit
  1556. mask = (times >= n_start) & (times < n_stop)
  1557. times = times[mask].astype(float)
  1558. else:
  1559. times = np.array([]) * event_unit
  1560. st = SpikeTrain(
  1561. times=times,
  1562. name=name,
  1563. description=desc,
  1564. file_origin='.'.join([self._filenames['nev'], 'nev']),
  1565. t_start=n_start,
  1566. t_stop=n_stop)
  1567. if lazy:
  1568. st.lazy_shape = np.shape(times)
  1569. # load waveforms if requested
  1570. if load_waveforms and not lazy:
  1571. wf_dtype = self.__nev_params('waveform_dtypes')[channel_id]
  1572. wf_size = self.__nev_params('waveform_size')[channel_id]
  1573. waveforms = spikes['waveform'].flatten().view(wf_dtype)
  1574. waveforms = waveforms.reshape(int(spikes.size), 1, int(wf_size))
  1575. if scaling == 'voltage':
  1576. st.waveforms = (
  1577. waveforms[mask] * self.__nev_params('waveform_unit') *
  1578. self.__nev_params('digitization_factor')[channel_id] /
  1579. 1000.)
  1580. elif scaling == 'raw':
  1581. st.waveforms = waveforms[mask] * pq.dimensionless
  1582. else:
  1583. raise ValueError(
  1584. 'Unkown option {1} for parameter scaling.'.format(scaling))
  1585. st.sampling_rate = self.__nev_params('waveform_sampling_rate')
  1586. st.left_sweep = self.__get_left_sweep_waveforms()[channel_id]
  1587. # add additional annotations
  1588. st.annotate(
  1589. unit_id=int(unit_id),
  1590. channel_id=int(channel_id))
  1591. return st
  1592. def __read_analogsignal(
  1593. self, n_start, n_stop, signal, channel_id, nsx_nb,
  1594. scaling='raw', lazy=False):
  1595. """
  1596. Creates analogsignal for signal of channel in nsx data.
  1597. """
  1598. # TODO: The following part is extremely slow, since the memmaps for the
  1599. # headers are created again and again. In particular, this makes lazy
  1600. # loading slow as well. Solution would be to create header memmaps up
  1601. # front.
  1602. # get parameters
  1603. sampling_rate = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1604. 'sampling_rate', nsx_nb)
  1605. nsx_time_unit = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1606. 'time_unit', nsx_nb)
  1607. max_ana = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1608. 'max_analog_val', nsx_nb)
  1609. min_ana = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1610. 'min_analog_val', nsx_nb)
  1611. max_dig = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1612. 'max_digital_val', nsx_nb)
  1613. min_dig = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1614. 'min_digital_val', nsx_nb)
  1615. units = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1616. 'units', nsx_nb)
  1617. labels = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  1618. 'labels', nsx_nb)
  1619. dbl_idx = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1620. 'databl_idx', nsx_nb, n_start, n_stop)
  1621. t_start = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1622. 'databl_t_start', nsx_nb, n_start, n_stop)
  1623. t_stop = self.__nsx_databl_param[self.__nsx_spec[nsx_nb]](
  1624. 'databl_t_stop', nsx_nb, n_start, n_stop)
  1625. elids_nsx = list(self.__nsx_ext_header[nsx_nb]['electrode_id'])
  1626. if channel_id in elids_nsx:
  1627. idx_ch = elids_nsx.index(channel_id)
  1628. else:
  1629. return None
  1630. description = \
  1631. "AnalogSignal from channel: {0}, label: {1}, nsx: {2}".format(
  1632. channel_id, labels[idx_ch], nsx_nb)
  1633. # TODO: Find a more time/memory efficient way to handle lazy loading
  1634. data_times = np.arange(
  1635. t_start.item(), t_stop.item(),
  1636. self.__nsx_basic_header[nsx_nb]['period']) * t_start.units
  1637. mask = (data_times >= n_start) & (data_times < n_stop)
  1638. if lazy:
  1639. lazy_shape = (np.sum(mask), )
  1640. sig_ch = np.array([], dtype='float32')
  1641. sig_unit = pq.dimensionless
  1642. t_start = n_start.rescale('s')
  1643. else:
  1644. data_times = data_times[mask].astype(float)
  1645. if scaling == 'voltage':
  1646. if not self._avail_files['nev']:
  1647. raise ValueError(
  1648. 'Cannot convert signals in filespec 2.1 nsX '
  1649. 'files to voltage without nev file.')
  1650. sig_ch = signal[dbl_idx][:, idx_ch][mask].astype('float32')
  1651. # transform dig value to physical value
  1652. sym_ana = (max_ana[idx_ch] == -min_ana[idx_ch])
  1653. sym_dig = (max_dig[idx_ch] == -min_dig[idx_ch])
  1654. if sym_ana and sym_dig:
  1655. sig_ch *= float(max_ana[idx_ch]) / float(max_dig[idx_ch])
  1656. else:
  1657. # general case (same result as above for symmetric input)
  1658. sig_ch -= min_dig[idx_ch]
  1659. sig_ch *= float(max_ana[idx_ch] - min_ana[idx_ch]) / \
  1660. float(max_dig[idx_ch] - min_dig[idx_ch])
  1661. sig_ch += float(min_ana[idx_ch])
  1662. sig_unit = units[idx_ch].decode()
  1663. elif scaling == 'raw':
  1664. sig_ch = signal[dbl_idx][:, idx_ch][mask].astype(int)
  1665. sig_unit = pq.dimensionless
  1666. else:
  1667. raise ValueError(
  1668. 'Unkown option {1} for parameter '
  1669. 'scaling.'.format(scaling))
  1670. t_start = data_times[0].rescale(nsx_time_unit)
  1671. anasig = AnalogSignal(
  1672. signal=pq.Quantity(sig_ch, sig_unit, copy=False),
  1673. sampling_rate=sampling_rate,
  1674. t_start=t_start,
  1675. name=labels[idx_ch],
  1676. description=description,
  1677. file_origin='.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb]))
  1678. if lazy:
  1679. anasig.lazy_shape = lazy_shape
  1680. anasig.annotate(
  1681. nsx=nsx_nb,
  1682. channel_id=int(channel_id))
  1683. return anasig
  1684. def __read_unit(self, unit_id, channel_id):
  1685. """
  1686. Creates unit with unit id for given channel id.
  1687. """
  1688. # define a name for spiketrain
  1689. # (unique identifier: 1000 * elid + unit_nb)
  1690. name = "Unit {0}".format(1000 * channel_id + unit_id)
  1691. # define description for spiketrain
  1692. desc = 'Unit from channel: {0}, id: {1}'.format(
  1693. channel_id, self.__get_unit_classification(unit_id))
  1694. un = Unit(
  1695. name=name,
  1696. description=desc,
  1697. file_origin='.'.join([self._filenames['nev'], 'nev']))
  1698. # add additional annotations
  1699. un.annotate(
  1700. unit_id=int(unit_id),
  1701. channel_id=int(channel_id))
  1702. return un
  1703. def __read_channelindex(
  1704. self, channel_id, index=None, channel_units=None, cascade=True):
  1705. """
  1706. Returns a ChannelIndex with the given index for the given channels
  1707. containing a neo.core.unit.Unit object list of the given units.
  1708. """
  1709. flt_type = {0: 'None', 1: 'Butterworth'}
  1710. chidx = ChannelIndex(
  1711. np.array([channel_id]),
  1712. file_origin=self.filename)
  1713. if index is not None:
  1714. chidx.index = index
  1715. chidx.name = "ChannelIndex {0}".format(chidx.index)
  1716. else:
  1717. chidx.name = "ChannelIndex"
  1718. if self._avail_files['nev']:
  1719. channel_labels = self.__nev_params('channel_labels')
  1720. if channel_labels is not None:
  1721. chidx.channel_names = np.array([channel_labels[channel_id]])
  1722. chidx.channel_ids = np.array([channel_id])
  1723. # additional annotations from nev
  1724. if channel_id in self.__nev_ext_header[b'NEUEVWAV']['electrode_id']:
  1725. get_idx = list(
  1726. self.__nev_ext_header[b'NEUEVWAV']['electrode_id']).index(
  1727. channel_id)
  1728. chidx.annotate(
  1729. connector_ID=self.__nev_ext_header[
  1730. b'NEUEVWAV']['physical_connector'][get_idx],
  1731. connector_pinID=self.__nev_ext_header[
  1732. b'NEUEVWAV']['connector_pin'][get_idx],
  1733. nev_dig_factor=self.__nev_ext_header[
  1734. b'NEUEVWAV']['digitization_factor'][get_idx],
  1735. nev_energy_threshold=self.__nev_ext_header[
  1736. b'NEUEVWAV']['energy_threshold'][get_idx] * pq.uV,
  1737. nev_hi_threshold=self.__nev_ext_header[
  1738. b'NEUEVWAV']['hi_threshold'][get_idx] * pq.uV,
  1739. nev_lo_threshold=self.__nev_ext_header[
  1740. b'NEUEVWAV']['lo_threshold'][get_idx] * pq.uV,
  1741. nb_sorted_units=self.__nev_ext_header[
  1742. b'NEUEVWAV']['nb_sorted_units'][get_idx],
  1743. waveform_size=self.__waveform_size[self.__nev_spec](
  1744. )[channel_id] * self.__nev_params('waveform_time_unit'))
  1745. # additional annotations from nev (only for file_spec > 2.1)
  1746. if self.__nev_spec in ['2.2', '2.3']:
  1747. get_idx = list(
  1748. self.__nev_ext_header[
  1749. b'NEUEVFLT']['electrode_id']).index(
  1750. channel_id)
  1751. # filter type codes (extracted from blackrock manual)
  1752. chidx.annotate(
  1753. nev_hi_freq_corner=self.__nev_ext_header[b'NEUEVFLT'][
  1754. 'hi_freq_corner'][get_idx] /
  1755. 1000. * pq.Hz,
  1756. nev_hi_freq_order=self.__nev_ext_header[b'NEUEVFLT'][
  1757. 'hi_freq_order'][get_idx],
  1758. nev_hi_freq_type=flt_type[self.__nev_ext_header[
  1759. b'NEUEVFLT']['hi_freq_type'][get_idx]],
  1760. nev_lo_freq_corner=self.__nev_ext_header[
  1761. b'NEUEVFLT']['lo_freq_corner'][get_idx] /
  1762. 1000. * pq.Hz,
  1763. nev_lo_freq_order=self.__nev_ext_header[
  1764. b'NEUEVFLT']['lo_freq_order'][get_idx],
  1765. nev_lo_freq_type=flt_type[self.__nev_ext_header[
  1766. b'NEUEVFLT']['lo_freq_type'][get_idx]])
  1767. # additional information about the LFP signal
  1768. if self.__nev_spec in ['2.2', '2.3'] and self.__nsx_ext_header:
  1769. # It does not matter which nsX file to ask for this info
  1770. k = self.__nsx_ext_header.keys()[0]
  1771. if channel_id in self.__nsx_ext_header[k]['electrode_id']:
  1772. get_idx = list(
  1773. self.__nsx_ext_header[k]['electrode_id']).index(
  1774. channel_id)
  1775. chidx.annotate(
  1776. nsx_hi_freq_corner=self.__nsx_ext_header[k][
  1777. 'hi_freq_corner'][get_idx] / 1000. * pq.Hz,
  1778. nsx_lo_freq_corner=self.__nsx_ext_header[k][
  1779. 'lo_freq_corner'][get_idx] / 1000. * pq.Hz,
  1780. nsx_hi_freq_order=self.__nsx_ext_header[k][
  1781. 'hi_freq_order'][get_idx],
  1782. nsx_lo_freq_order=self.__nsx_ext_header[k][
  1783. 'lo_freq_order'][get_idx],
  1784. nsx_hi_freq_type=flt_type[
  1785. self.__nsx_ext_header[k]['hi_freq_type'][get_idx]],
  1786. nsx_lo_freq_type=flt_type[
  1787. self.__nsx_ext_header[k]['hi_freq_type'][get_idx]])
  1788. chidx.description = \
  1789. "Container for units and groups analogsignals of one recording " \
  1790. "channel across segments."
  1791. if not cascade:
  1792. return chidx
  1793. if self._avail_files['nev']:
  1794. # read nev data
  1795. nev_data = self.__nev_data_reader[self.__nev_spec]()
  1796. if channel_units is not None:
  1797. # extract first data for channel
  1798. ch_mask = (nev_data['Spikes']['packet_id'] == channel_id)
  1799. data_ch = nev_data['Spikes'][ch_mask]
  1800. for un_id in channel_units:
  1801. if un_id in np.unique(data_ch['unit_class_nb']):
  1802. un = self.__read_unit(
  1803. unit_id=un_id, channel_id=channel_id)
  1804. chidx.units.append(un)
  1805. chidx.create_many_to_one_relationship()
  1806. return chidx
  1807. def read_segment(
  1808. self, n_start, n_stop, name=None, description=None, index=None,
  1809. nsx_to_load='none', channels='none', units='none',
  1810. load_waveforms=False, load_events=False, scaling='raw',
  1811. lazy=False, cascade=True):
  1812. """
  1813. Returns an annotated neo.core.segment.Segment.
  1814. Args:
  1815. n_start (Quantity):
  1816. Start time of maximum time range of signals contained in this
  1817. segment.
  1818. n_stop (Quantity):
  1819. Stop time of maximum time range of signals contained in this
  1820. segment.
  1821. name (None, string):
  1822. If None, name is set to default, otherwise it is set to user
  1823. input.
  1824. description (None, string):
  1825. If None, description is set to default, otherwise it is set to
  1826. user input.
  1827. index (None, int):
  1828. If not None, index of segment is set to user index.
  1829. nsx_to_load (int, list, str):
  1830. ID(s) of nsx file(s) from which to load data, e.g., if set to
  1831. 5 only data from the ns5 file are loaded. If 'none' or empty
  1832. list, no nsx files and therefore no analog signals are loaded.
  1833. If 'all', data from all available nsx are loaded.
  1834. channels (int, list, str):
  1835. Channel id(s) from which to load data. If 'none' or empty list,
  1836. no channels and therefore no analog signal or spiketrains are
  1837. loaded. If 'all', all available channels are loaded.
  1838. units (int, list, str, dict):
  1839. ID(s) of unit(s) to load. If 'none' or empty list, no units and
  1840. therefore no spiketrains are loaded. If 'all', all available
  1841. units are loaded. If dict, the above can be specified
  1842. individually for each channel (keys), e.g. {1: 5, 2: 'all'}
  1843. loads unit 5 from channel 1 and all units from channel 2.
  1844. load_waveforms (boolean):
  1845. If True, waveforms are attached to all loaded spiketrains.
  1846. load_events (boolean):
  1847. If True, all recorded events are loaded.
  1848. scaling (str):
  1849. Determines whether time series of individual
  1850. electrodes/channels are returned as AnalogSignals containing
  1851. raw integer samples ('raw'), or scaled to arrays of floats
  1852. representing voltage ('voltage'). Note that for file
  1853. specification 2.1 and lower, the option 'voltage' requires a
  1854. nev file to be present.
  1855. lazy (boolean):
  1856. If True, only the shape of the data is loaded.
  1857. cascade (boolean):
  1858. If True, only the segment without children is returned.
  1859. Returns:
  1860. Segment (neo.Segment):
  1861. Returns the specified segment. See documentation of
  1862. `read_block()` for a full list of annotations of all child
  1863. objects.
  1864. """
  1865. # Make sure that input args are transformed into correct instances
  1866. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  1867. channels = self.__transform_channels(channels, nsx_to_load)
  1868. units = self.__transform_units(units, channels)
  1869. seg = Segment(file_origin=self.filename)
  1870. # set user defined annotations if they were provided
  1871. if index is None:
  1872. seg.index = 0
  1873. else:
  1874. seg.index = index
  1875. if name is None:
  1876. seg.name = "Segment {0}".format(seg.index)
  1877. else:
  1878. seg.name = name
  1879. if description is None:
  1880. seg.description = "Segment containing data from t_min to t_max."
  1881. else:
  1882. seg.description = description
  1883. if not cascade:
  1884. return seg
  1885. if self._avail_files['nev']:
  1886. # filename = self._filenames['nev'] + '.nev'
  1887. # annotate segment according to file headers
  1888. seg.rec_datetime = datetime.datetime(
  1889. year=self.__nev_basic_header['year'],
  1890. month=self.__nev_basic_header['month'],
  1891. day=self.__nev_basic_header['day'],
  1892. hour=self.__nev_basic_header['hour'],
  1893. minute=self.__nev_basic_header['minute'],
  1894. second=self.__nev_basic_header['second'],
  1895. microsecond=self.__nev_basic_header['millisecond'])
  1896. # read nev data
  1897. nev_data = self.__nev_data_reader[self.__nev_spec]()
  1898. # read non-neural experimental events
  1899. if load_events:
  1900. ev_dict = self.__nonneural_evtypes[self.__nev_spec](
  1901. nev_data['NonNeural'])
  1902. for ev_type in ev_dict.keys():
  1903. ev = self.__read_event(
  1904. n_start=n_start,
  1905. n_stop=n_stop,
  1906. data=nev_data['NonNeural'],
  1907. ev_dict=ev_dict[ev_type],
  1908. lazy=lazy)
  1909. if ev is not None:
  1910. seg.events.append(ev)
  1911. # TODO: not yet implemented (only avail in nev_spec 2.3)
  1912. # videosync events
  1913. # trackingevents events
  1914. # buttontrigger events
  1915. # configevent events
  1916. # get spiketrain
  1917. if units is not None:
  1918. not_existing_units = []
  1919. for ch_id in units.keys():
  1920. # extract first data for channel
  1921. ch_mask = (nev_data['Spikes']['packet_id'] == ch_id)
  1922. data_ch = nev_data['Spikes'][ch_mask]
  1923. if units[ch_id] is not None:
  1924. for un_id in units[ch_id]:
  1925. if un_id in np.unique(data_ch['unit_class_nb']):
  1926. # extract then data for unit if unit exists
  1927. un_mask = (data_ch['unit_class_nb'] == un_id)
  1928. data_un = data_ch[un_mask]
  1929. st = self.__read_spiketrain(
  1930. n_start=n_start,
  1931. n_stop=n_stop,
  1932. spikes=data_un,
  1933. channel_id=ch_id,
  1934. unit_id=un_id,
  1935. load_waveforms=load_waveforms,
  1936. scaling=scaling,
  1937. lazy=lazy)
  1938. seg.spiketrains.append(st)
  1939. else:
  1940. not_existing_units.append(un_id)
  1941. if not_existing_units:
  1942. self._print_verbose(
  1943. "Units {0} on channel {1} do not "
  1944. "exist".format(not_existing_units, ch_id))
  1945. else:
  1946. self._print_verbose(
  1947. "There are no units specified for channel "
  1948. "{0}".format(ch_id))
  1949. if nsx_to_load is not None:
  1950. for nsx_nb in nsx_to_load:
  1951. # read nsx data
  1952. nsx_data = \
  1953. self.__nsx_data_reader[self.__nsx_spec[nsx_nb]](nsx_nb)
  1954. # read Analogsignals
  1955. for ch_id in channels:
  1956. anasig = self.__read_analogsignal(
  1957. n_start=n_start,
  1958. n_stop=n_stop,
  1959. signal=nsx_data,
  1960. channel_id=ch_id,
  1961. nsx_nb=nsx_nb,
  1962. scaling=scaling,
  1963. lazy=lazy)
  1964. if anasig is not None:
  1965. seg.analogsignals.append(anasig)
  1966. # TODO: not yet implemented
  1967. # if self._avail_files['sif']:
  1968. # sif_header = self._read_sif(self._filenames['sif'] + '.sif')
  1969. # TODO: not yet implemented
  1970. # if self._avail_files['ccf']:
  1971. # ccf_header = self._read_sif(self._filenames['ccf'] + '.ccf')
  1972. seg.create_many_to_one_relationship()
  1973. return seg
  1974. def read_block(
  1975. self, index=None, name=None, description=None, nsx_to_load='none',
  1976. n_starts=None, n_stops=None, channels='none', units='none',
  1977. load_waveforms=False, load_events=False, scaling='raw',
  1978. lazy=False, cascade=True):
  1979. """
  1980. Args:
  1981. index (None, int):
  1982. If not None, index of block is set to user input.
  1983. name (None, str):
  1984. If None, name is set to default, otherwise it is set to user
  1985. input.
  1986. description (None, str):
  1987. If None, description is set to default, otherwise it is set to
  1988. user input.
  1989. nsx_to_load (int, list, str):
  1990. ID(s) of nsx file(s) from which to load data, e.g., if set to
  1991. 5 only data from the ns5 file are loaded. If 'none' or empty
  1992. list, no nsx files and therefore no analog signals are loaded.
  1993. If 'all', data from all available nsx are loaded.
  1994. n_starts (None, Quantity, list):
  1995. Start times for data in each segment. Number of entries must be
  1996. equal to length of n_stops. If None, intrinsic recording start
  1997. times of files set are used.
  1998. n_stops (None, Quantity, list):
  1999. Stop times for data in each segment. Number of entries must be
  2000. equal to length of n_starts. If None, intrinsic recording stop
  2001. times of files set are used.
  2002. channels (int, list, str):
  2003. Channel id(s) from which to load data. If 'none' or empty list,
  2004. no channels and therefore no analog signal or spiketrains are
  2005. loaded. If 'all', all available channels are loaded.
  2006. units (int, list, str, dict):
  2007. ID(s) of unit(s) to load. If 'none' or empty list, no units and
  2008. therefore no spiketrains are loaded. If 'all', all available
  2009. units are loaded. If dict, the above can be specified
  2010. individually for each channel (keys), e.g. {1: 5, 2: 'all'}
  2011. loads unit 5 from channel 1 and all units from channel 2.
  2012. load_waveforms (boolean):
  2013. If True, waveforms are attached to all loaded spiketrains.
  2014. load_events (boolean):
  2015. If True, all recorded events are loaded.
  2016. scaling (str):
  2017. Determines whether time series of individual
  2018. electrodes/channels are returned as AnalogSignals containing
  2019. raw integer samples ('raw'), or scaled to arrays of floats
  2020. representing voltage ('voltage'). Note that for file
  2021. specification 2.1 and lower, the option 'voltage' requires a
  2022. nev file to be present.
  2023. lazy (bool):
  2024. If True, only the shape of the data is loaded.
  2025. cascade (bool or "lazy"):
  2026. If True, only the block without children is returned.
  2027. Returns:
  2028. Block (neo.segment.Block):
  2029. Block linking all loaded Neo objects.
  2030. Block annotations:
  2031. avail_file_set (list):
  2032. List of extensions of all available files for the given
  2033. recording.
  2034. avail_nsx (list of int):
  2035. List of integers specifying the .nsX files available,
  2036. e.g., [2, 5] indicates that an ns2 and and ns5 file are
  2037. available.
  2038. avail_nev (bool):
  2039. True if a .nev file is available.
  2040. avail_ccf (bool):
  2041. True if a .ccf file is available.
  2042. avail_sif (bool):
  2043. True if a .sif file is available.
  2044. rec_pauses (bool):
  2045. True if the session contains a recording pause (i.e.,
  2046. multiple segments).
  2047. nb_segments (int):
  2048. Number of segments created after merging recording
  2049. times specified by user with the intrinsic ones of the
  2050. file set.
  2051. Segment annotations:
  2052. None.
  2053. ChannelIndex annotations:
  2054. waveform_size (Quantitiy):
  2055. Length of time used to save spike waveforms (in units
  2056. of 1/30000 s).
  2057. nev_hi_freq_corner (Quantitiy),
  2058. nev_lo_freq_corner (Quantitiy),
  2059. nev_hi_freq_order (int), nev_lo_freq_order (int),
  2060. nev_hi_freq_type (str), nev_lo_freq_type (str),
  2061. nev_hi_threshold, nev_lo_threshold,
  2062. nev_energy_threshold (quantity):
  2063. Indicates parameters of spike detection.
  2064. nsx_hi_freq_corner (Quantity),
  2065. nsx_lo_freq_corner (Quantity)
  2066. nsx_hi_freq_order (int), nsx_lo_freq_order (int),
  2067. nsx_hi_freq_type (str), nsx_lo_freq_type (str)
  2068. Indicates parameters of the filtered signal in one of
  2069. the files ns1-ns5 (ns6, if available, is not filtered).
  2070. nev_dig_factor (int):
  2071. Digitization factor in microvolts of the nev file, used
  2072. to convert raw samples to volt.
  2073. connector_ID, connector_pinID (int):
  2074. ID of connector and pin on the connector where the
  2075. channel was recorded from.
  2076. nb_sorted_units (int):
  2077. Number of sorted units on this channel (noise, mua and
  2078. sua).
  2079. Unit annotations:
  2080. unit_id (int):
  2081. ID of the unit.
  2082. channel_id (int):
  2083. Channel ID (Blackrock ID) from which the unit was
  2084. loaded (equiv. to the single list entry in the
  2085. attribute channel_ids of ChannelIndex parent).
  2086. AnalogSignal annotations:
  2087. nsx (int):
  2088. nsX file the signal was loaded from, e.g., 5 indicates
  2089. the .ns5 file.
  2090. channel_id (int):
  2091. Channel ID (Blackrock ID) from which the signal was
  2092. loaded.
  2093. Spiketrain annotations:
  2094. unit_id (int):
  2095. ID of the unit from which the spikes were recorded.
  2096. channel_id (int):
  2097. Channel ID (Blackrock ID) from which the spikes were
  2098. loaded.
  2099. Event annotations:
  2100. The resulting Block contains one Event object with the name
  2101. `digital_input_port`. It contains all digitally recorded
  2102. events, with the event code coded in the labels of the
  2103. Event. The Event object contains no further annotation.
  2104. """
  2105. # Make sure that input args are transformed into correct instances
  2106. nsx_to_load = self.__transform_nsx_to_load(nsx_to_load)
  2107. channels = self.__transform_channels(channels, nsx_to_load)
  2108. units = self.__transform_units(units, channels)
  2109. # Create block
  2110. bl = Block(file_origin=self.filename)
  2111. # set user defined annotations if they were provided
  2112. if index is not None:
  2113. bl.index = index
  2114. if name is None:
  2115. bl.name = "Blackrock Data Block"
  2116. else:
  2117. bl.name = name
  2118. if description is None:
  2119. bl.description = "Block of data from Blackrock file set."
  2120. else:
  2121. bl.description = description
  2122. if self._avail_files['nev']:
  2123. bl.rec_datetime = self.__nev_params('rec_datetime')
  2124. bl.annotate(
  2125. avail_file_set=[k for k, v in self._avail_files.items() if v])
  2126. bl.annotate(avail_nsx=self._avail_nsx)
  2127. bl.annotate(avail_nev=self._avail_files['nev'])
  2128. bl.annotate(avail_sif=self._avail_files['sif'])
  2129. bl.annotate(avail_ccf=self._avail_files['ccf'])
  2130. bl.annotate(rec_pauses=False)
  2131. # Test n_starts and n_stops user requirements and combine them if
  2132. # possible with file internal n_starts and n_stops from rec pauses.
  2133. n_starts, n_stops = \
  2134. self.__merge_time_ranges(n_starts, n_stops, nsx_to_load)
  2135. bl.annotate(nb_segments=len(n_starts))
  2136. if not cascade:
  2137. return bl
  2138. # read segment
  2139. for seg_idx, (n_start, n_stop) in enumerate(zip(n_starts, n_stops)):
  2140. seg = self.read_segment(
  2141. n_start=n_start,
  2142. n_stop=n_stop,
  2143. index=seg_idx,
  2144. nsx_to_load=nsx_to_load,
  2145. channels=channels,
  2146. units=units,
  2147. load_waveforms=load_waveforms,
  2148. load_events=load_events,
  2149. scaling=scaling,
  2150. lazy=lazy,
  2151. cascade=cascade)
  2152. bl.segments.append(seg)
  2153. # read channelindexes
  2154. if channels:
  2155. for ch_id in channels:
  2156. if units and ch_id in units.keys():
  2157. ch_units = units[ch_id]
  2158. else:
  2159. ch_units = None
  2160. chidx = self.__read_channelindex(
  2161. channel_id=ch_id,
  2162. index=0,
  2163. channel_units=ch_units,
  2164. cascade=cascade)
  2165. for seg in bl.segments:
  2166. if ch_units:
  2167. for un in chidx.units:
  2168. sts = seg.filter(
  2169. targdict={'name': un.name},
  2170. objects='SpikeTrain')
  2171. for st in sts:
  2172. un.spiketrains.append(st)
  2173. anasigs = seg.filter(
  2174. targdict={'channel_id': ch_id},
  2175. objects='AnalogSignal')
  2176. for anasig in anasigs:
  2177. chidx.analogsignals.append(anasig)
  2178. bl.channel_indexes.append(chidx)
  2179. bl.create_many_to_one_relationship()
  2180. return bl
  2181. def __str__(self):
  2182. """
  2183. Prints summary of the Blackrock data file set.
  2184. """
  2185. output = "\nFile Origins for Blackrock File Set\n"\
  2186. "====================================\n"
  2187. for ftype in self._filenames.keys():
  2188. output += ftype + ':' + self._filenames[ftype] + '\n'
  2189. if self._avail_files['nev']:
  2190. output += "\nEvent Parameters (NEV)\n"\
  2191. "====================================\n"\
  2192. "Timestamp resolution (Hz): " +\
  2193. str(self.__nev_basic_header['timestamp_resolution']) +\
  2194. "\nWaveform resolution (Hz): " +\
  2195. str(self.__nev_basic_header['sample_resolution'])
  2196. if b'NEUEVWAV' in self.__nev_ext_header.keys():
  2197. avail_el = \
  2198. self.__nev_ext_header[b'NEUEVWAV']['electrode_id']
  2199. con = \
  2200. self.__nev_ext_header[b'NEUEVWAV']['physical_connector']
  2201. pin = \
  2202. self.__nev_ext_header[b'NEUEVWAV']['connector_pin']
  2203. nb_units = \
  2204. self.__nev_ext_header[b'NEUEVWAV']['nb_sorted_units']
  2205. output += "\n\nAvailable electrode IDs:\n"\
  2206. "====================================\n"
  2207. for i, el in enumerate(avail_el):
  2208. output += "Electrode ID %i: " % el
  2209. channel_labels = self.__nev_params('channel_labels')
  2210. if channel_labels is not None:
  2211. output += "label %s: " % channel_labels[el]
  2212. output += "connector: %i, " % con[i]
  2213. output += "pin: %i, " % pin[i]
  2214. output += 'nb_units: %i\n' % nb_units[i]
  2215. for nsx_nb in self._avail_nsx:
  2216. analog_res = self.__nsx_params[self.__nsx_spec[nsx_nb]](
  2217. 'sampling_rate', nsx_nb)
  2218. avail_el = [
  2219. el for el in self.__nsx_ext_header[nsx_nb]['electrode_id']]
  2220. output += "\nAnalog Parameters (NS" + str(nsx_nb) + ")"\
  2221. "\n===================================="
  2222. output += "\nResolution (Hz): %i" % analog_res
  2223. output += "\nAvailable channel IDs: " +\
  2224. ", " .join(["%i" % a for a in avail_el]) + "\n"
  2225. return output