neuralynxio_v1.py 103 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405
  1. """
  2. Class for reading data from Neuralynx files.
  3. This IO supports NCS, NEV and NSE file formats.
  4. This module is an older implementation with old neo.io API.
  5. A new class NeuralynxIO compunded by NeuralynxRawIO and BaseFromIO
  6. superseed this one.
  7. Depends on: numpy
  8. Supported: Read
  9. Author: Julia Sprenger, Carlos Canova
  10. Adapted from the exampleIO of python-neo
  11. """
  12. import sys
  13. import os
  14. import warnings
  15. import codecs
  16. import copy
  17. import re
  18. import datetime
  19. import pkg_resources
  20. import numpy as np
  21. import quantities as pq
  22. from neo.io.baseio import BaseIO
  23. import neo.io.neuralynxio
  24. from neo.core import (Block, Segment, ChannelIndex, AnalogSignal, SpikeTrain,
  25. Event, Unit)
  26. from os import listdir, sep
  27. from os.path import isfile, getsize
  28. import hashlib
  29. import pickle
  30. if hasattr(pkg_resources, 'pkg_resources'):
  31. parse_version = pkg_resources.pkg_resources.parse_version
  32. else:
  33. parse_version = pkg_resources.parse_version
  34. class NeuralynxIO(BaseIO):
  35. """
  36. Class for reading Neuralynx files.
  37. It enables reading:
  38. - :class:'Block'
  39. - :class:'Segment'
  40. - :class:'AnalogSignal'
  41. - :class:'SpikeTrain'
  42. Usage:
  43. from neo import io
  44. import quantities as pq
  45. import matplotlib.pyplot as plt
  46. session_folder = '../Data/2014-07-24_10-31-02'
  47. NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
  48. block = NIO.read_block(t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
  49. events=True)
  50. seg = block.segments[0]
  51. analogsignal = seg.analogsignals[0]
  52. plt.plot(analogsignal.times.rescale(pq.ms), analogsignal.magnitude)
  53. plt.show()
  54. """
  55. is_readable = True # This class can only read data
  56. is_writable = False # write is not supported
  57. # This class is able to directly or indirectly handle the following objects
  58. # You can notice that this greatly simplifies the full Neo object hierarchy
  59. supported_objects = [Segment, AnalogSignal, SpikeTrain, Event]
  60. # This class can return either a Block or a Segment
  61. # The first one is the default ( self.read )
  62. # These lists should go from highest object to lowest object because
  63. # common_io_test assumes it.
  64. readable_objects = [Segment, AnalogSignal, SpikeTrain]
  65. # This class is not able to write objects
  66. writeable_objects = []
  67. has_header = False
  68. is_streameable = False
  69. # This is for GUI stuff : a definition for parameters when reading.
  70. # This dict should be keyed by object (`Block`). Each entry is a list
  71. # of tuple. The first entry in each tuple is the parameter name. The
  72. # second entry is a dict with keys 'value' (for default value),
  73. # and 'label' (for a descriptive name).
  74. # Note that if the highest-level object requires parameters,
  75. # common_io_test will be skipped.
  76. read_params = {
  77. Segment: [('waveforms', {'value': True})],
  78. Block: [('waveforms', {'value': False})]
  79. }
  80. # do not supported write so no GUI stuff
  81. write_params = None
  82. name = 'Neuralynx'
  83. description = 'This IO reads .nse/.ncs/.nev files of the Neuralynx (' \
  84. 'Cheetah) recordings system (tetrodes).'
  85. extensions = ['nse', 'ncs', 'nev', 'ntt']
  86. # mode can be 'file' or 'dir' or 'fake' or 'database'
  87. # the main case is 'file' but some reader are base on a directory or
  88. # a database this info is for GUI stuff also
  89. mode = 'dir'
  90. # hardcoded parameters from manual, which are not present in Neuralynx
  91. # data files
  92. # unit of timestamps in different files
  93. nev_time_unit = pq.microsecond
  94. ncs_time_unit = pq.microsecond
  95. nse_time_unit = pq.microsecond
  96. ntt_time_unit = pq.microsecond
  97. # unit of sampling rate in different files
  98. ncs_sr_unit = pq.Hz
  99. nse_sr_unit = pq.Hz
  100. ntt_sr_unit = pq.Hz
  101. def __init__(self, sessiondir=None, cachedir=None, use_cache='hash',
  102. print_diagnostic=False, filename=None):
  103. """
  104. Arguments:
  105. sessiondir: the directory the files of the recording session are
  106. collected. Default 'None'.
  107. print_diagnostic: indicates, whether information about the
  108. loading of
  109. data is printed in terminal or not. Default 'False'.
  110. cachedir: the directory where metadata about the recording
  111. session is
  112. read from and written to.
  113. use_cache: method used for cache identification. Possible values:
  114. 'hash'/
  115. 'always'/'datesize'/'never'. Default 'hash'
  116. filename: this argument is handles the same as sessiondir and is
  117. only
  118. added for external IO interfaces. The value of
  119. sessiondir
  120. has priority over filename.
  121. """
  122. warnings.warn('{} is deprecated and will be removed in neo version 0.10. Use {} instead.'
  123. ''.format(self.__class__, neo.io.neuralynxio.NeuralynxIO), FutureWarning)
  124. BaseIO.__init__(self)
  125. # possiblity to provide filename instead of sessiondir for IO
  126. # compatibility
  127. if filename is not None and sessiondir is None:
  128. sessiondir = filename
  129. if sessiondir is None:
  130. raise ValueError('Must provide a directory containing data files of'
  131. ' of one recording session.')
  132. # remove filename if specific file was passed
  133. if any([sessiondir.endswith('.%s' % ext) for ext in self.extensions]):
  134. sessiondir = sessiondir[:sessiondir.rfind(sep)]
  135. # remove / for consistent directory handling
  136. if sessiondir.endswith(sep):
  137. sessiondir = sessiondir.rstrip(sep)
  138. # set general parameters of this IO
  139. self.sessiondir = sessiondir
  140. self.filename = sessiondir.split(sep)[-1]
  141. self._print_diagnostic = print_diagnostic
  142. self.associated = False
  143. self._associate(cachedir=cachedir, usecache=use_cache)
  144. self._diagnostic_print(
  145. 'Initialized IO for session %s' % self.sessiondir)
  146. def read_block(self, lazy=False, cascade=True, t_starts=None,
  147. t_stops=None,
  148. electrode_list=None, unit_list=None, analogsignals=True,
  149. events=False,
  150. waveforms=False):
  151. """
  152. Reads data in a requested time window and returns block with as many
  153. segments
  154. es necessary containing these data.
  155. Arguments:
  156. lazy : Postpone actual reading of the data files. Default 'False'.
  157. cascade : Do not postpone reading subsequent neo types (segments).
  158. Default 'True'.
  159. t_starts : list of quantities or quantity describing the start of
  160. the requested time window to load. If None or [None]
  161. the complete session is loaded. Default 'None'.
  162. t_stops : list of quantities or quantity describing the end of the
  163. requested time window to load. Has to contain the
  164. same number of values as t_starts. If None or [None]
  165. the complete session is loaded. Default 'None'.
  166. electrode_list : list of integers containing the IDs of the
  167. requested to load. If [] or None all available
  168. channels will be loaded.
  169. Default: None.
  170. unit_list : list of integers containing the IDs of the requested
  171. units to load. If [] or None all available units
  172. will be loaded.
  173. Default: None.
  174. analogsignals : boolean, indication whether analogsignals should be
  175. read. Default: True.
  176. events : Loading events. If True all available events in the given
  177. time window will be read. Default: False.
  178. waveforms : Load waveform for spikes in the requested time
  179. window. Default: False.
  180. Returns: Block object containing the requested data in neo structures.
  181. Usage:
  182. from neo import io
  183. import quantities as pq
  184. import matplotlib.pyplot as plt
  185. session_folder = '../Data/2014-07-24_10-31-02'
  186. NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
  187. block = NIO.read_block(lazy = False, cascade = True,
  188. t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
  189. electrode_list = [1,5,10],
  190. unit_list = [1,2,3],
  191. events = True, waveforms = True)
  192. plt.plot(block.segments[0].analogsignals[0])
  193. plt.show()
  194. """
  195. # Create block
  196. bl = Block(file_origin=self.sessiondir)
  197. bl.name = self.filename
  198. if not cascade:
  199. return bl
  200. # Checking input of t_start and t_stop
  201. # For lazy users that specify x,x instead of [x],[x] for t_starts,
  202. # t_stops
  203. if t_starts is None:
  204. t_starts = [None]
  205. elif type(t_starts) == pq.Quantity:
  206. t_starts = [t_starts]
  207. elif type(t_starts) != list or any(
  208. [(type(i) != pq.Quantity and i is not None) for i in t_starts]):
  209. raise ValueError('Invalid specification of t_starts.')
  210. if t_stops is None:
  211. t_stops = [None]
  212. elif type(t_stops) == pq.Quantity:
  213. t_stops = [t_stops]
  214. elif type(t_stops) != list or any(
  215. [(type(i) != pq.Quantity and i is not None) for i in t_stops]):
  216. raise ValueError('Invalid specification of t_stops.')
  217. # adapting t_starts and t_stops to known gap times (extracted in
  218. # association process / initialization)
  219. for gap in self.parameters_global['gaps']:
  220. # gap=gap_list[0]
  221. for e in range(len(t_starts)):
  222. t1, t2 = t_starts[e], t_stops[e]
  223. gap_start = gap[1] * self.ncs_time_unit - \
  224. self.parameters_global['t_start']
  225. gap_stop = gap[2] * self.ncs_time_unit - self.parameters_global[
  226. 't_start']
  227. if ((t1 is None and t2 is None)
  228. or (t1 is None and t2 is not None and t2.rescale(
  229. self.ncs_time_unit) > gap_stop)
  230. or (t2 is None and t1 is not None and t1.rescale(
  231. self.ncs_time_unit) < gap_stop)
  232. or (t1 is not None and t2 is not None and t1.rescale(
  233. self.ncs_time_unit) < gap_start
  234. and t2.rescale(self.ncs_time_unit) > gap_stop)):
  235. # adapting first time segment
  236. t_stops[e] = gap_start
  237. # inserting second time segment
  238. t_starts.insert(e + 1, gap_stop)
  239. t_stops.insert(e + 1, t2)
  240. warnings.warn(
  241. 'Substituted t_starts and t_stops in order to skip '
  242. 'gap in recording session.')
  243. # loading all channels if empty electrode_list
  244. if electrode_list == [] or electrode_list is None:
  245. electrode_list = self.parameters_ncs.keys()
  246. # adding a segment for each t_start, t_stop pair
  247. for t_start, t_stop in zip(t_starts, t_stops):
  248. seg = self.read_segment(lazy=lazy, cascade=cascade,
  249. t_start=t_start, t_stop=t_stop,
  250. electrode_list=electrode_list,
  251. unit_list=unit_list,
  252. analogsignals=analogsignals, events=events,
  253. waveforms=waveforms)
  254. bl.segments.append(seg)
  255. # generate units
  256. units = []
  257. channel_unit_collection = {}
  258. for st in [s for seg in bl.segments for s in seg.spiketrains]:
  259. # collecting spiketrains of same channel and unit id to generate
  260. # common unit
  261. chuid = (st.annotations['channel_index'], st.annotations['unit_id'])
  262. if chuid in channel_unit_collection:
  263. channel_unit_collection[chuid].append(st)
  264. else:
  265. channel_unit_collection[chuid] = [st]
  266. for chuid in channel_unit_collection:
  267. sts = channel_unit_collection[chuid]
  268. unit = Unit(name='Channel %i, Unit %i' % chuid)
  269. unit.spiketrains.extend(sts)
  270. units.append(unit)
  271. # generate one channel indexes for each analogsignal
  272. for anasig in [a for seg in bl.segments for a in seg.analogsignals]:
  273. channelids = anasig.annotations['channel_index']
  274. channel_names = ['channel %i' % i for i in channelids]
  275. channelidx = ChannelIndex(index=range(len(channelids)),
  276. channel_names=channel_names,
  277. name='channel ids for all analogsignal '
  278. '"%s"' % anasig.name,
  279. channel_ids=channelids)
  280. channelidx.analogsignals.append(anasig)
  281. bl.channel_indexes.append(channelidx)
  282. # generate channel indexes for units
  283. channelids = [unit.spiketrains[0].annotations['channel_index']
  284. for unit in units]
  285. channel_names = ['channel %i' % i for i in channelids]
  286. channelidx = ChannelIndex(index=range(len(channelids)),
  287. channel_names=channel_names,
  288. name='channel ids for all spiketrains',
  289. channel_ids=channelids)
  290. channelidx.units.extend(units)
  291. bl.channel_indexes.append(channelidx)
  292. bl.create_many_to_one_relationship()
  293. # Adding global parameters to block annotation
  294. bl.annotations.update(self.parameters_global)
  295. return bl
  296. def read_segment(self, lazy=False, cascade=True, t_start=None, t_stop=None,
  297. electrode_list=None, unit_list=None, analogsignals=True,
  298. events=False, waveforms=False):
  299. """Reads one Segment.
  300. The Segment will contain one AnalogSignal for each channel
  301. and will go from t_start to t_stop.
  302. Arguments:
  303. lazy : Postpone actual reading of the data files. Default 'False'.
  304. cascade : Do not postpone reading subsequent neo types (SpikeTrains,
  305. AnalogSignals, Events).
  306. Default 'True'.
  307. t_start : time (quantity) that the Segment begins. Default None.
  308. t_stop : time (quantity) that the Segment ends. Default None.
  309. electrode_list : list of integers containing the IDs of the
  310. requested to load. If [] or None all available
  311. channels will be loaded.
  312. Default: None.
  313. unit_list : list of integers containing the IDs of the requested
  314. units to load. If [] or None all available units
  315. will be loaded. If False, no unit will be loaded.
  316. Default: None.
  317. analogsignals : boolean, indication whether analogsignals should be
  318. read. Default: True.
  319. events : Loading events. If True all available events in the given
  320. time window will be read. Default: False.
  321. waveforms : Load waveform for spikes in the requested time
  322. window. Default: False.
  323. Returns:
  324. Segment object containing neo objects, which contain the data.
  325. """
  326. # input check
  327. # loading all channels if empty electrode_list
  328. if electrode_list == [] or electrode_list is None:
  329. electrode_list = self.parameters_ncs.keys()
  330. elif electrode_list is None:
  331. raise ValueError('Electrode_list can not be None.')
  332. elif [v for v in electrode_list if
  333. v in self.parameters_ncs.keys()] == []:
  334. # warn if non of the requested channels are present in this session
  335. warnings.warn('Requested channels %s are not present in session '
  336. '(contains only %s)' % (
  337. electrode_list, self.parameters_ncs.keys()))
  338. electrode_list = []
  339. seg = Segment(file_origin=self.filename)
  340. if not cascade:
  341. return seg
  342. # generate empty segment for analogsignal collection
  343. empty_seg = Segment(file_origin=self.filename)
  344. # Reading NCS Files #
  345. # selecting ncs files to load based on electrode_list requested
  346. if analogsignals:
  347. for chid in electrode_list:
  348. if chid in self.parameters_ncs:
  349. file_ncs = self.parameters_ncs[chid]['filename']
  350. self.read_ncs(file_ncs, empty_seg, lazy, cascade,
  351. t_start=t_start, t_stop=t_stop)
  352. else:
  353. self._diagnostic_print('Can not load ncs of channel %i. '
  354. 'No corresponding ncs file '
  355. 'present.' % (chid))
  356. # supplementory merge function, should be replaced by neo utility
  357. # function
  358. def merge_analogsignals(anasig_list):
  359. for aid, anasig in enumerate(anasig_list):
  360. anasig.channel_index = None
  361. if aid == 0:
  362. full_analogsignal = anasig
  363. else:
  364. full_analogsignal = full_analogsignal.merge(anasig)
  365. for key in anasig_list[0].annotations.keys():
  366. listified_values = [a.annotations[key] for a in anasig_list]
  367. full_analogsignal.annotations[key] = listified_values
  368. return full_analogsignal
  369. analogsignal = merge_analogsignals(empty_seg.analogsignals)
  370. seg.analogsignals.append(analogsignal)
  371. analogsignal.segment = seg
  372. # Reading NEV Files (Events)#
  373. # reading all files available
  374. if events:
  375. for filename_nev in self.nev_asso:
  376. self.read_nev(filename_nev, seg, lazy, cascade, t_start=t_start,
  377. t_stop=t_stop)
  378. # Reading Spike Data only if requested
  379. if unit_list is not False:
  380. # Reading NSE Files (Spikes)#
  381. # selecting nse files to load based on electrode_list requested
  382. for chid in electrode_list:
  383. if chid in self.parameters_nse:
  384. filename_nse = self.parameters_nse[chid]['filename']
  385. self.read_nse(filename_nse, seg, lazy, cascade,
  386. t_start=t_start, t_stop=t_stop,
  387. waveforms=waveforms)
  388. else:
  389. self._diagnostic_print('Can not load nse of channel %i. '
  390. 'No corresponding nse file '
  391. 'present.' % (chid))
  392. # Reading ntt Files (Spikes)#
  393. # selecting ntt files to load based on electrode_list requested
  394. for chid in electrode_list:
  395. if chid in self.parameters_ntt:
  396. filename_ntt = self.parameters_ntt[chid]['filename']
  397. self.read_ntt(filename_ntt, seg, lazy, cascade,
  398. t_start=t_start, t_stop=t_stop,
  399. waveforms=waveforms)
  400. else:
  401. self._diagnostic_print('Can not load ntt of channel %i. '
  402. 'No corresponding ntt file '
  403. 'present.' % (chid))
  404. return seg
  405. def read_ncs(self, filename_ncs, seg, lazy=False, cascade=True,
  406. t_start=None, t_stop=None):
  407. '''
  408. Reading a single .ncs file from the associated Neuralynx recording
  409. session.
  410. In case of a recording gap between t_start and t_stop, data are only
  411. loaded until gap start.
  412. For loading data across recording gaps use read_block(...).
  413. Arguments:
  414. filename_ncs : Name of the .ncs file to be loaded.
  415. seg : Neo Segment, to which the AnalogSignal containing the data
  416. will be attached.
  417. lazy : Postpone actual reading of the data. Instead provide a dummy
  418. AnalogSignal. Default 'False'.
  419. cascade : Not used in this context. Default: 'True'.
  420. t_start : time or sample (quantity or integer) that the
  421. AnalogSignal begins.
  422. Default None.
  423. t_stop : time or sample (quantity or integer) that the
  424. AnalogSignal ends.
  425. Default None.
  426. Returns:
  427. None
  428. '''
  429. # checking format of filename and correcting if necessary
  430. if filename_ncs[-4:] != '.ncs':
  431. filename_ncs = filename_ncs + '.ncs'
  432. if sep in filename_ncs:
  433. filename_ncs = filename_ncs.split(sep)[-1]
  434. # Extracting the channel id from prescan (association) of ncs files with
  435. # this recording session
  436. chid = self.get_channel_id_by_file_name(filename_ncs)
  437. if chid is None:
  438. raise ValueError('NeuralynxIO is attempting to read a file '
  439. 'not associated to this session (%s).' % (
  440. filename_ncs))
  441. if not cascade:
  442. return
  443. # read data
  444. header_time_data = self.__mmap_ncs_packet_timestamps(filename_ncs)
  445. data = self.__mmap_ncs_data(filename_ncs)
  446. # ensure meaningful values for requested start and stop times
  447. # in case time is provided in samples: transform to absolute time units
  448. if isinstance(t_start, int):
  449. t_start = t_start / self.parameters_ncs[chid]['sampling_rate']
  450. if isinstance(t_stop, int):
  451. t_stop = t_stop / self.parameters_ncs[chid]['sampling_rate']
  452. # rescaling to global start time of recording (time of first sample
  453. # in any file type)
  454. if t_start is None or t_start < (
  455. self.parameters_ncs[chid]['t_start']
  456. - self.parameters_global[
  457. 't_start']):
  458. t_start = (
  459. self.parameters_ncs[chid]['t_start'] - self.parameters_global[
  460. 't_start'])
  461. if t_start > (
  462. self.parameters_ncs[chid]['t_stop']
  463. - self.parameters_global[
  464. 't_start']):
  465. raise ValueError(
  466. 'Requested times window (%s to %s) is later than data are '
  467. 'recorded (t_stop = %s) '
  468. 'for file %s.' % (t_start, t_stop,
  469. (self.parameters_ncs[chid]['t_stop']
  470. - self.parameters_global['t_start']),
  471. filename_ncs))
  472. if t_stop is None or t_stop > (
  473. self.parameters_ncs[chid]['t_stop']
  474. - self.parameters_global[
  475. 't_start']):
  476. t_stop = (
  477. self.parameters_ncs[chid]['t_stop'] - self.parameters_global[
  478. 't_start'])
  479. if t_stop < (
  480. self.parameters_ncs[chid]['t_start']
  481. - self.parameters_global['t_start']):
  482. raise ValueError(
  483. 'Requested times window (%s to %s) is earlier than data '
  484. 'are '
  485. 'recorded (t_start = %s) '
  486. 'for file %s.' % (t_start, t_stop,
  487. (self.parameters_ncs[chid]['t_start']
  488. - self.parameters_global['t_start']),
  489. filename_ncs))
  490. if t_start >= t_stop:
  491. raise ValueError(
  492. 'Requested start time (%s) is later than / equal to stop '
  493. 'time '
  494. '(%s) '
  495. 'for file %s.' % (t_start, t_stop, filename_ncs))
  496. # Extracting data signal in requested time window
  497. unit = pq.dimensionless # default value
  498. if lazy:
  499. sig = []
  500. p_id_start = 0
  501. else:
  502. tstamps = header_time_data * self.ncs_time_unit - \
  503. self.parameters_global['t_start']
  504. # find data packet to start with signal construction
  505. starts = np.where(tstamps <= t_start)[0]
  506. if len(starts) == 0:
  507. self._diagnostic_print(
  508. 'Requested AnalogSignal not present in this time '
  509. 'interval.')
  510. return
  511. else:
  512. # first packet to be included into signal
  513. p_id_start = starts[-1]
  514. # find data packet where signal ends (due to gap or t_stop)
  515. stops = np.where(tstamps >= t_stop)[0]
  516. if len(stops) != 0:
  517. first_stop = [stops[0]]
  518. else:
  519. first_stop = []
  520. # last packet to be included in signal
  521. p_id_stop = min(first_stop + [len(data)])
  522. # search gaps in recording in time range to load
  523. gap_packets = [gap_id[0] for gap_id in
  524. self.parameters_ncs[chid]['gaps'] if
  525. gap_id[0] > p_id_start]
  526. if len(gap_packets) > 0 and min(gap_packets) < p_id_stop:
  527. p_id_stop = min(gap_packets)
  528. warnings.warn(
  529. 'Analogsignalarray was shortened due to gap in '
  530. 'recorded '
  531. 'data '
  532. ' of file %s at packet id %i' % (
  533. filename_ncs, min(gap_packets)))
  534. # search broken packets in time range to load
  535. broken_packets = []
  536. if 'broken_packet' in self.parameters_ncs[chid]:
  537. broken_packets = [packet[0] for packet in
  538. self.parameters_ncs[chid]['broken_packet']
  539. if packet[0] > p_id_start]
  540. if len(broken_packets) > 0 and min(broken_packets) < p_id_stop:
  541. p_id_stop = min(broken_packets)
  542. warnings.warn(
  543. 'Analogsignalarray was shortened due to broken data '
  544. 'packet in recorded data '
  545. ' of file %s at packet id %i' % (
  546. filename_ncs, min(broken_packets)))
  547. # construct signal in valid packet range
  548. sig = np.array(data[p_id_start:p_id_stop + 1], dtype=float)
  549. sig = sig.reshape(len(sig) * len(sig[0]))
  550. # ADBitVolts is not guaranteed to be present in the header!
  551. if 'ADBitVolts' in self.parameters_ncs[chid]:
  552. sig *= self.parameters_ncs[chid]['ADBitVolts']
  553. unit = pq.V
  554. else:
  555. warnings.warn(
  556. 'Could not transform data from file %s into physical '
  557. 'signal. '
  558. 'Missing "ADBitVolts" value in text header.')
  559. # defining sampling rate for rescaling purposes
  560. sampling_rate = self.parameters_ncs[chid]['sampling_unit'][0]
  561. # creating neo AnalogSignal containing data
  562. anasig = AnalogSignal(signal=pq.Quantity(sig, unit, copy=False),
  563. sampling_rate=1 * sampling_rate,
  564. # rescaling t_start to sampling time units
  565. t_start=(header_time_data[p_id_start] * self.ncs_time_unit
  566. - self.parameters_global['t_start']).rescale(
  567. 1 / sampling_rate),
  568. name='channel_%i' % (chid),
  569. channel_index=chid)
  570. # removing protruding parts of first and last data packet
  571. if anasig.t_start < t_start.rescale(anasig.t_start.units):
  572. anasig = anasig.time_slice(t_start.rescale(anasig.t_start.units),
  573. None)
  574. if anasig.t_stop > t_stop.rescale(anasig.t_start.units):
  575. anasig = anasig.time_slice(None,
  576. t_stop.rescale(anasig.t_start.units))
  577. annotations = copy.deepcopy(self.parameters_ncs[chid])
  578. for pop_key in ['sampling_rate', 't_start']:
  579. if pop_key in annotations:
  580. annotations.pop(pop_key)
  581. anasig.annotations.update(annotations)
  582. anasig.annotations['electrode_id'] = chid
  583. # this annotation is necesary for automatic genereation of
  584. # recordingchannels
  585. anasig.annotations['channel_index'] = chid
  586. anasig.segment = seg # needed for merge function of analogsignals
  587. seg.analogsignals.append(anasig)
  588. def read_nev(self, filename_nev, seg, lazy=False, cascade=True,
  589. t_start=None, t_stop=None):
  590. '''
  591. Reads associated nev file and attaches its content as eventarray to
  592. provided neo segment. In constrast to read_ncs times can not be provided
  593. in number of samples as a nev file has no inherent sampling rate.
  594. Arguments:
  595. filename_nev : Name of the .nev file to be loaded.
  596. seg : Neo Segment, to which the Event containing the data
  597. will be attached.
  598. lazy : Postpone actual reading of the data. Instead provide a dummy
  599. Event. Default 'False'.
  600. cascade : Not used in this context. Default: 'True'.
  601. t_start : time (quantity) that the Events begin.
  602. Default None.
  603. t_stop : time (quantity) that the Event end.
  604. Default None.
  605. Returns:
  606. None
  607. '''
  608. if filename_nev[-4:] != '.nev':
  609. filename_nev += '.nev'
  610. if sep in filename_nev:
  611. filename_nev = filename_nev.split(sep)[-1]
  612. if filename_nev not in self.nev_asso:
  613. raise ValueError('NeuralynxIO is attempting to read a file '
  614. 'not associated to this session (%s).' % (
  615. filename_nev))
  616. # # ensure meaningful values for requested start and stop times
  617. # # providing time is samples for nev file does not make sense as we
  618. # don't know the underlying sampling rate
  619. if isinstance(t_start, int):
  620. raise ValueError(
  621. 'Requesting event information from nev file in samples '
  622. 'does '
  623. 'not make sense. '
  624. 'Requested t_start %s' % t_start)
  625. if isinstance(t_stop, int):
  626. raise ValueError(
  627. 'Requesting event information from nev file in samples '
  628. 'does '
  629. 'not make sense. '
  630. 'Requested t_stop %s' % t_stop)
  631. # ensure meaningful values for requested start and stop times
  632. if t_start is None or t_start < (
  633. self.parameters_nev[filename_nev]['t_start']
  634. - self.parameters_global['t_start']):
  635. t_start = (self.parameters_nev[filename_nev]['t_start']
  636. - self.parameters_global['t_start'])
  637. if t_start > (self.parameters_nev[filename_nev]['t_stop']
  638. - self.parameters_global['t_start']):
  639. raise ValueError(
  640. 'Requested times window (%s to %s) is later than data are '
  641. 'recorded (t_stop = %s) '
  642. 'for file %s.' % (t_start, t_stop,
  643. (self.parameters_nev[filename_nev]['t_stop']
  644. - self.parameters_global['t_start']),
  645. filename_nev))
  646. if t_stop is None or t_stop > (
  647. self.parameters_nev[filename_nev]['t_stop']
  648. - self.parameters_global['t_start']):
  649. t_stop = (self.parameters_nev[filename_nev]['t_stop']
  650. - self.parameters_global['t_start'])
  651. if t_stop < (self.parameters_nev[filename_nev]['t_start']
  652. - self.parameters_global['t_start']):
  653. raise ValueError(
  654. 'Requested times window (%s to %s) is earlier than data '
  655. 'are '
  656. 'recorded (t_start = %s) '
  657. 'for file %s.' % (t_start, t_stop,
  658. (
  659. self.parameters_nev[filename_nev][
  660. 't_start']
  661. - self.parameters_global['t_start']),
  662. filename_nev))
  663. if t_start >= t_stop:
  664. raise ValueError(
  665. 'Requested start time (%s) is later than / equal to stop '
  666. 'time '
  667. '(%s) '
  668. 'for file %s.' % (t_start, t_stop, filename_nev))
  669. data = self.__mmap_nev_file(filename_nev)
  670. # Extracting all events for one event type and put it into an event
  671. # array
  672. # TODO: Check if this is the correct way of event creation.
  673. for event_type in self.parameters_nev[filename_nev]['event_types']:
  674. # Extract all time stamps of digital markers and rescaling time
  675. type_mask = [i for i in range(len(data)) if
  676. (data[i][4] == event_type['event_id']
  677. and data[i][5] == event_type['nttl']
  678. and data[i][10].decode('latin-1') == event_type[
  679. 'name'])]
  680. marker_times = [t[3] for t in
  681. data[type_mask]] * self.nev_time_unit - \
  682. self.parameters_global['t_start']
  683. # only consider Events in the requested time window [t_start,
  684. # t_stop]
  685. time_mask = [i for i in range(len(marker_times)) if (
  686. marker_times[i] >= t_start and marker_times[i] <= t_stop)]
  687. marker_times = marker_times[time_mask]
  688. # Do not create an eventarray if there are no events of this type
  689. # in the requested time range
  690. if len(marker_times) == 0:
  691. continue
  692. ev = Event(times=pq.Quantity(marker_times, units=self.nev_time_unit,
  693. dtype="int"),
  694. labels=event_type['name'],
  695. name="Digital Marker " + str(event_type),
  696. file_origin=filename_nev,
  697. marker_id=event_type['event_id'],
  698. digital_marker=True,
  699. analog_marker=False,
  700. nttl=event_type['nttl'])
  701. seg.events.append(ev)
  702. def read_nse(self, filename_nse, seg, lazy=False, cascade=True,
  703. t_start=None, t_stop=None, unit_list=None,
  704. waveforms=False):
  705. '''
  706. Reads nse file and attaches content as spike train to provided neo
  707. segment. Times can be provided in samples (integer values). If the
  708. nse file does not contain a sampling rate value, the ncs sampling
  709. rate on the same electrode is used.
  710. Arguments:
  711. filename_nse : Name of the .nse file to be loaded.
  712. seg : Neo Segment, to which the Spiketrain containing the data
  713. will be attached.
  714. lazy : Postpone actual reading of the data. Instead provide a dummy
  715. SpikeTrain. Default 'False'.
  716. cascade : Not used in this context. Default: 'True'.
  717. t_start : time or sample (quantity or integer) that the
  718. SpikeTrain begins.
  719. Default None.
  720. t_stop : time or sample (quantity or integer) that the SpikeTrain
  721. ends.
  722. Default None.
  723. unit_list : unit ids to be loaded. If [], all units are loaded.
  724. Default None.
  725. waveforms : Load the waveform (up to 32 data points) for each
  726. spike time. Default: False
  727. Returns:
  728. None
  729. '''
  730. if filename_nse[-4:] != '.nse':
  731. filename_nse += '.nse'
  732. if sep in filename_nse:
  733. filename_nse = filename_nse.split(sep)[-1]
  734. # extracting channel id of requested file
  735. channel_id = self.get_channel_id_by_file_name(filename_nse)
  736. if channel_id is not None:
  737. chid = channel_id
  738. else:
  739. # if nse file is empty it is not listed in self.parameters_nse, but
  740. # in self.nse_avail
  741. if filename_nse in self.nse_avail:
  742. warnings.warn('NeuralynxIO is attempting to read an empty '
  743. '(not associated) nse file (%s). '
  744. 'Not loading nse file.' % (filename_nse))
  745. return
  746. else:
  747. raise ValueError('NeuralynxIO is attempting to read a file '
  748. 'not associated to this session (%s).' % (
  749. filename_nse))
  750. # ensure meaningful values for requested start and stop times
  751. # in case time is provided in samples: transform to absolute time units
  752. # ncs sampling rate is best guess if there is no explicit sampling
  753. # rate given for nse values.
  754. if 'sampling_rate' in self.parameters_nse[chid]:
  755. sr = self.parameters_nse[chid]['sampling_rate']
  756. elif chid in self.parameters_ncs and 'sampling_rate' in \
  757. self.parameters_ncs[chid]:
  758. sr = self.parameters_ncs[chid]['sampling_rate']
  759. else:
  760. raise ValueError(
  761. 'No sampling rate present for channel id %i in nse file '
  762. '%s. '
  763. 'Could also not find the sampling rate of the respective '
  764. 'ncs '
  765. 'file.' % (
  766. chid, filename_nse))
  767. if isinstance(t_start, int):
  768. t_start = t_start / sr
  769. if isinstance(t_stop, int):
  770. t_stop = t_stop / sr
  771. # + rescaling global recording start (first sample in any file type)
  772. # This is not optimal, as there is no way to know how long the
  773. # recording lasted after last spike
  774. if t_start is None or t_start < (
  775. self.parameters_nse[chid]['t_first']
  776. - self.parameters_global[
  777. 't_start']):
  778. t_start = (
  779. self.parameters_nse[chid]['t_first'] - self.parameters_global[
  780. 't_start'])
  781. if t_start > (
  782. self.parameters_nse[chid]['t_last']
  783. - self.parameters_global['t_start']):
  784. raise ValueError(
  785. 'Requested times window (%s to %s) is later than data are '
  786. 'recorded (t_stop = %s) '
  787. 'for file %s.' % (t_start, t_stop,
  788. (self.parameters_nse[chid]['t_last']
  789. - self.parameters_global['t_start']),
  790. filename_nse))
  791. if t_stop is None:
  792. t_stop = (sys.maxsize) * self.nse_time_unit
  793. if t_stop is None or t_stop > (
  794. self.parameters_nse[chid]['t_last']
  795. - self.parameters_global[
  796. 't_start']):
  797. t_stop = (
  798. self.parameters_nse[chid]['t_last'] - self.parameters_global[
  799. 't_start'])
  800. if t_stop < (
  801. self.parameters_nse[chid]['t_first']
  802. - self.parameters_global[
  803. 't_start']):
  804. raise ValueError(
  805. 'Requested times window (%s to %s) is earlier than data '
  806. 'are recorded (t_start = %s) '
  807. 'for file %s.' % (t_start, t_stop,
  808. (self.parameters_nse[chid]['t_first']
  809. - self.parameters_global['t_start']),
  810. filename_nse))
  811. if t_start >= t_stop:
  812. raise ValueError(
  813. 'Requested start time (%s) is later than / equal to stop '
  814. 'time '
  815. '(%s) for file %s.' % (t_start, t_stop, filename_nse))
  816. # reading data
  817. [timestamps, channel_ids, cell_numbers, features,
  818. data_points] = self.__mmap_nse_packets(filename_nse)
  819. # load all units available if unit_list==[] or None
  820. if unit_list == [] or unit_list is None:
  821. unit_list = np.unique(cell_numbers)
  822. elif not any([u in cell_numbers for u in unit_list]):
  823. self._diagnostic_print(
  824. 'None of the requested unit ids (%s) present '
  825. 'in nse file %s (contains unit_list %s)' % (
  826. unit_list, filename_nse, np.unique(cell_numbers)))
  827. # extracting spikes unit-wise and generate spiketrains
  828. for unit_i in unit_list:
  829. if not lazy:
  830. # Extract all time stamps of that neuron on that electrode
  831. unit_mask = np.where(cell_numbers == unit_i)[0]
  832. spike_times = timestamps[unit_mask] * self.nse_time_unit
  833. spike_times = spike_times - self.parameters_global['t_start']
  834. time_mask = np.where(np.logical_and(spike_times >= t_start,
  835. spike_times < t_stop))
  836. spike_times = spike_times[time_mask]
  837. else:
  838. spike_times = pq.Quantity([], units=self.nse_time_unit)
  839. # Create SpikeTrain object
  840. st = SpikeTrain(times=spike_times,
  841. t_start=t_start,
  842. t_stop=t_stop,
  843. sampling_rate=self.parameters_ncs[chid][
  844. 'sampling_rate'],
  845. name="Channel %i, Unit %i" % (chid, unit_i),
  846. file_origin=filename_nse,
  847. unit_id=unit_i,
  848. channel_id=chid)
  849. if waveforms and not lazy:
  850. # Collect all waveforms of the specific unit
  851. # For computational reasons: no units, no time axis
  852. st.waveforms = data_points[unit_mask][time_mask]
  853. # TODO: Add units to waveforms (pq.uV?) and add annotation
  854. # left_sweep = x * pq.ms indicating when threshold crossing
  855. # occurred in waveform
  856. st.annotations.update(self.parameters_nse[chid])
  857. st.annotations['electrode_id'] = chid
  858. # This annotations is necessary for automatic generation of
  859. # recordingchannels
  860. st.annotations['channel_index'] = chid
  861. seg.spiketrains.append(st)
  862. def read_ntt(self, filename_ntt, seg, lazy=False, cascade=True,
  863. t_start=None, t_stop=None, unit_list=None,
  864. waveforms=False):
  865. '''
  866. Reads ntt file and attaches content as spike train to provided neo
  867. segment.
  868. Arguments:
  869. filename_ntt : Name of the .ntt file to be loaded.
  870. seg : Neo Segment, to which the Spiketrain containing the data
  871. will be attached.
  872. lazy : Postpone actual reading of the data. Instead provide a dummy
  873. SpikeTrain. Default 'False'.
  874. cascade : Not used in this context. Default: 'True'.
  875. t_start : time (quantity) that the SpikeTrain begins. Default None.
  876. t_stop : time (quantity) that the SpikeTrain ends. Default None.
  877. unit_list : unit ids to be loaded. If [] or None all units are
  878. loaded.
  879. Default None.
  880. waveforms : Load the waveform (up to 32 data points) for each
  881. spike time. Default: False
  882. Returns:
  883. None
  884. '''
  885. if filename_ntt[-4:] != '.ntt':
  886. filename_ntt += '.ntt'
  887. if sep in filename_ntt:
  888. filename_ntt = filename_ntt.split(sep)[-1]
  889. # extracting channel id of requested file
  890. channel_id = self.get_channel_id_by_file_name(filename_ntt)
  891. if channel_id is not None:
  892. chid = channel_id
  893. else:
  894. # if ntt file is empty it is not listed in self.parameters_ntt, but
  895. # in self.ntt_avail
  896. if filename_ntt in self.ntt_avail:
  897. warnings.warn('NeuralynxIO is attempting to read an empty '
  898. '(not associated) ntt file (%s). '
  899. 'Not loading ntt file.' % (filename_ntt))
  900. return
  901. else:
  902. raise ValueError('NeuralynxIO is attempting to read a file '
  903. 'not associated to this session (%s).' % (
  904. filename_ntt))
  905. # ensure meaningful values for requested start and stop times
  906. # in case time is provided in samples: transform to absolute time units
  907. # ncs sampling rate is best guess if there is no explicit sampling
  908. # rate given for ntt values.
  909. if 'sampling_rate' in self.parameters_ntt[chid]:
  910. sr = self.parameters_ntt[chid]['sampling_rate']
  911. elif chid in self.parameters_ncs and 'sampling_rate' in \
  912. self.parameters_ncs[chid]:
  913. sr = self.parameters_ncs[chid]['sampling_rate']
  914. else:
  915. raise ValueError(
  916. 'No sampling rate present for channel id %i in ntt file '
  917. '%s. '
  918. 'Could also not find the sampling rate of the respective '
  919. 'ncs '
  920. 'file.' % (
  921. chid, filename_ntt))
  922. if isinstance(t_start, int):
  923. t_start = t_start / sr
  924. if isinstance(t_stop, int):
  925. t_stop = t_stop / sr
  926. # + rescaling to global recording start (first sample in any
  927. # recording file)
  928. if t_start is None or t_start < (
  929. self.parameters_ntt[chid]['t_first']
  930. - self.parameters_global[
  931. 't_start']):
  932. t_start = (
  933. self.parameters_ntt[chid]['t_first'] - self.parameters_global[
  934. 't_start'])
  935. if t_start > (
  936. self.parameters_ntt[chid]['t_last']
  937. - self.parameters_global[
  938. 't_start']):
  939. raise ValueError(
  940. 'Requested times window (%s to %s) is later than data are '
  941. 'recorded (t_stop = %s) '
  942. 'for file %s.' % (t_start, t_stop,
  943. (self.parameters_ntt[chid]['t_last']
  944. - self.parameters_global['t_start']),
  945. filename_ntt))
  946. if t_stop is None:
  947. t_stop = (sys.maxsize) * self.ntt_time_unit
  948. if t_stop is None or t_stop > (
  949. self.parameters_ntt[chid]['t_last']
  950. - self.parameters_global[
  951. 't_start']):
  952. t_stop = (
  953. self.parameters_ntt[chid]['t_last'] - self.parameters_global[
  954. 't_start'])
  955. if t_stop < (
  956. self.parameters_ntt[chid]['t_first']
  957. - self.parameters_global[
  958. 't_start']):
  959. raise ValueError(
  960. 'Requested times window (%s to %s) is earlier than data '
  961. 'are '
  962. 'recorded (t_start = %s) '
  963. 'for file %s.' % (t_start, t_stop,
  964. (self.parameters_ntt[chid]['t_first']
  965. - self.parameters_global['t_start']),
  966. filename_ntt))
  967. if t_start >= t_stop:
  968. raise ValueError(
  969. 'Requested start time (%s) is later than / equal to stop '
  970. 'time '
  971. '(%s) '
  972. 'for file %s.' % (t_start, t_stop, filename_ntt))
  973. # reading data
  974. [timestamps, channel_ids, cell_numbers, features,
  975. data_points] = self.__mmap_ntt_packets(filename_ntt)
  976. # TODO: When ntt available: Implement 1 RecordingChannelGroup per
  977. # Tetrode, such that each electrode gets its own recording channel
  978. # load all units available if units==[]
  979. if unit_list == [] or unit_list is None:
  980. unit_list = np.unique(cell_numbers)
  981. elif not any([u in cell_numbers for u in unit_list]):
  982. self._diagnostic_print(
  983. 'None of the requested unit ids (%s) present '
  984. 'in ntt file %s (contains units %s)' % (
  985. unit_list, filename_ntt, np.unique(cell_numbers)))
  986. # loading data for each unit and generating spiketrain
  987. for unit_i in unit_list:
  988. if not lazy:
  989. # Extract all time stamps of that neuron on that electrode
  990. mask = np.where(cell_numbers == unit_i)[0]
  991. spike_times = timestamps[mask] * self.ntt_time_unit
  992. spike_times = spike_times - self.parameters_global['t_start']
  993. spike_times = spike_times[np.where(
  994. np.logical_and(spike_times >= t_start,
  995. spike_times < t_stop))]
  996. else:
  997. spike_times = pq.Quantity([], units=self.ntt_time_unit)
  998. # Create SpikeTrain object
  999. st = SpikeTrain(times=spike_times,
  1000. t_start=t_start,
  1001. t_stop=t_stop,
  1002. sampling_rate=self.parameters_ncs[chid][
  1003. 'sampling_rate'],
  1004. name="Channel %i, Unit %i" % (chid, unit_i),
  1005. file_origin=filename_ntt,
  1006. unit_id=unit_i,
  1007. channel_id=chid)
  1008. # Collect all waveforms of the specific unit
  1009. if waveforms and not lazy:
  1010. # For computational reasons: no units, no time axis
  1011. # transposing to adhere to neo guidline, which states that
  1012. # time should be in the first axis.
  1013. # This is stupid and not intuitive.
  1014. st.waveforms = np.array(
  1015. [data_points[t, :, :] for t in range(len(timestamps))
  1016. if cell_numbers[t] == unit_i]).transpose()
  1017. # TODO: Add units to waveforms (pq.uV?) and add annotation
  1018. # left_sweep = x * pq.ms indicating when threshold crossing
  1019. # occurred in waveform
  1020. st.annotations = self.parameters_ntt[chid]
  1021. st.annotations['electrode_id'] = chid
  1022. # This annotations is necessary for automatic generation of
  1023. # recordingchannels
  1024. st.annotations['channel_index'] = chid
  1025. seg.spiketrains.append(st)
  1026. # private routines
  1027. # #################################################
  1028. def _associate(self, cachedir=None, usecache='hash'):
  1029. """
  1030. Associates the object with a specified Neuralynx session, i.e., a
  1031. combination of a .nse, .nev and .ncs files. The meta data is read
  1032. into the
  1033. object for future reference.
  1034. Arguments:
  1035. cachedir : Directory for loading and saving hashes of recording
  1036. sessions
  1037. and pickled meta information about files
  1038. extracted during
  1039. association process
  1040. use_cache: method used for cache identification. Possible values:
  1041. 'hash'/
  1042. 'always'/'datesize'/'never'. Default 'hash'
  1043. Returns:
  1044. -
  1045. """
  1046. # If already associated, disassociate first
  1047. if self.associated:
  1048. raise OSError(
  1049. "Trying to associate an already associated NeuralynxIO "
  1050. "object.")
  1051. # Create parameter containers
  1052. # Dictionary that holds different parameters read from the .nev file
  1053. self.parameters_nse = {}
  1054. # List of parameter dictionaries for all potential file types
  1055. self.parameters_ncs = {}
  1056. self.parameters_nev = {}
  1057. self.parameters_ntt = {}
  1058. # combined global parameters
  1059. self.parameters_global = {}
  1060. # Scanning session directory for recorded files
  1061. self.sessionfiles = [f for f in listdir(self.sessiondir) if
  1062. isfile(os.path.join(self.sessiondir, f))]
  1063. # Listing available files
  1064. self.ncs_avail = []
  1065. self.nse_avail = []
  1066. self.nev_avail = []
  1067. self.ntt_avail = []
  1068. # Listing associated (=non corrupted, non empty files)
  1069. self.ncs_asso = []
  1070. self.nse_asso = []
  1071. self.nev_asso = []
  1072. self.ntt_asso = []
  1073. if usecache not in ['hash', 'always', 'datesize', 'never']:
  1074. raise ValueError(
  1075. "Argument value of usecache '%s' is not valid. Accepted "
  1076. "values are 'hash','always','datesize','never'" % usecache)
  1077. if cachedir is None and usecache != 'never':
  1078. raise ValueError('No cache directory provided.')
  1079. # check if there are any changes of the data files -> new data check run
  1080. check_files = True if usecache != 'always' else False # never
  1081. # checking files if usecache=='always'
  1082. if cachedir is not None and usecache != 'never':
  1083. self._diagnostic_print(
  1084. 'Calculating %s of session files to check for cached '
  1085. 'parameter files.' % usecache)
  1086. cachefile = cachedir + sep + self.sessiondir.split(sep)[
  1087. -1] + '/hashkeys'
  1088. if not os.path.exists(cachedir + sep + self.sessiondir.split(sep)[-1]):
  1089. os.makedirs(cachedir + sep + self.sessiondir.split(sep)[-1])
  1090. if usecache == 'hash':
  1091. hashes_calc = {}
  1092. # calculates hash of all available files
  1093. for f in self.sessionfiles:
  1094. file_hash = self.hashfile(open(self.sessiondir + sep + f,
  1095. 'rb'), hashlib.sha256())
  1096. hashes_calc[f] = file_hash
  1097. elif usecache == 'datesize':
  1098. hashes_calc = {}
  1099. for f in self.sessionfiles:
  1100. hashes_calc[f] = self.datesizefile(
  1101. self.sessiondir + sep + f)
  1102. # load hashes saved for this session in an earlier loading run
  1103. if os.path.exists(cachefile):
  1104. hashes_read = pickle.load(open(cachefile, 'rb'))
  1105. else:
  1106. hashes_read = {}
  1107. # compare hashes to previously saved meta data und load meta data
  1108. # if no changes occured
  1109. if usecache == 'always' or all([f in hashes_calc
  1110. and f in hashes_read
  1111. and hashes_calc[f] == hashes_read[f]
  1112. for f in self.sessionfiles]):
  1113. check_files = False
  1114. self._diagnostic_print(
  1115. 'Using cached metadata from earlier analysis run in '
  1116. 'file '
  1117. '%s. Skipping file checks.' % cachefile)
  1118. # loading saved parameters
  1119. parameterfile = cachedir + sep + self.sessiondir.split(sep)[
  1120. -1] + '/parameters.cache'
  1121. if os.path.exists(parameterfile):
  1122. parameters_read = pickle.load(open(parameterfile, 'rb'))
  1123. else:
  1124. raise OSError('Inconsistent cache files.')
  1125. for IOdict, dictname in [(self.parameters_global, 'global'),
  1126. (self.parameters_ncs, 'ncs'),
  1127. (self.parameters_nse, 'nse'),
  1128. (self.parameters_nev, 'nev'),
  1129. (self.parameters_ntt, 'ntt')]:
  1130. IOdict.update(parameters_read[dictname])
  1131. self.nev_asso = self.parameters_nev.keys()
  1132. self.ncs_asso = [val['filename'] for val in
  1133. self.parameters_ncs.values()]
  1134. self.nse_asso = [val['filename'] for val in
  1135. self.parameters_nse.values()]
  1136. self.ntt_asso = [val['filename'] for val in
  1137. self.parameters_ntt.values()]
  1138. for filename in self.sessionfiles:
  1139. # Extracting only continuous signal files (.ncs)
  1140. if filename[-4:] == '.ncs':
  1141. self.ncs_avail.append(filename)
  1142. elif filename[-4:] == '.nse':
  1143. self.nse_avail.append(filename)
  1144. elif filename[-4:] == '.nev':
  1145. self.nev_avail.append(filename)
  1146. elif filename[-4:] == '.ntt':
  1147. self.ntt_avail.append(filename)
  1148. else:
  1149. self._diagnostic_print(
  1150. 'Ignoring file of unknown data type %s' % filename)
  1151. if check_files:
  1152. self._diagnostic_print('Starting individual file checks.')
  1153. # =======================================================================
  1154. # # Scan NCS files
  1155. # =======================================================================
  1156. self._diagnostic_print(
  1157. '\nDetected %i .ncs file(s).' % (len(self.ncs_avail)))
  1158. for ncs_file in self.ncs_avail:
  1159. # Loading individual NCS file and extracting parameters
  1160. self._diagnostic_print("Scanning " + ncs_file + ".")
  1161. # Reading file packet headers
  1162. filehandle = self.__mmap_ncs_packet_headers(ncs_file)
  1163. if filehandle is None:
  1164. continue
  1165. try:
  1166. # Checking consistency of ncs file
  1167. self.__ncs_packet_check(filehandle)
  1168. except AssertionError:
  1169. warnings.warn(
  1170. 'Session file %s did not pass data packet check. '
  1171. 'This file can not be loaded.' % ncs_file)
  1172. continue
  1173. # Reading data packet header information and store them in
  1174. # parameters_ncs
  1175. self.__read_ncs_data_headers(filehandle, ncs_file)
  1176. # Reading txt file header
  1177. channel_id = self.get_channel_id_by_file_name(ncs_file)
  1178. self.__read_text_header(ncs_file,
  1179. self.parameters_ncs[channel_id])
  1180. # Check for invalid starting times of data packets in ncs file
  1181. self.__ncs_invalid_first_sample_check(filehandle)
  1182. # Check ncs file for gaps
  1183. self.__ncs_gap_check(filehandle)
  1184. self.ncs_asso.append(ncs_file)
  1185. # =======================================================================
  1186. # # Scan NSE files
  1187. # =======================================================================
  1188. # Loading individual NSE file and extracting parameters
  1189. self._diagnostic_print(
  1190. '\nDetected %i .nse file(s).' % (len(self.nse_avail)))
  1191. for nse_file in self.nse_avail:
  1192. # Loading individual NSE file and extracting parameters
  1193. self._diagnostic_print('Scanning ' + nse_file + '.')
  1194. # Reading file
  1195. filehandle = self.__mmap_nse_packets(nse_file)
  1196. if filehandle is None:
  1197. continue
  1198. try:
  1199. # Checking consistency of nse file
  1200. self.__nse_check(filehandle)
  1201. except AssertionError:
  1202. warnings.warn(
  1203. 'Session file %s did not pass data packet check. '
  1204. 'This file can not be loaded.' % nse_file)
  1205. continue
  1206. # Reading header information and store them in parameters_nse
  1207. self.__read_nse_data_header(filehandle, nse_file)
  1208. # Reading txt file header
  1209. channel_id = self.get_channel_id_by_file_name(nse_file)
  1210. self.__read_text_header(nse_file,
  1211. self.parameters_nse[channel_id])
  1212. # using sampling rate from txt header, as this is not saved
  1213. # in data packets
  1214. if 'SamplingFrequency' in self.parameters_nse[channel_id]:
  1215. self.parameters_nse[channel_id]['sampling_rate'] = \
  1216. (self.parameters_nse[channel_id][
  1217. 'SamplingFrequency'] * self.nse_sr_unit)
  1218. self.nse_asso.append(nse_file)
  1219. # =======================================================================
  1220. # # Scan NEV files
  1221. # =======================================================================
  1222. self._diagnostic_print(
  1223. '\nDetected %i .nev file(s).' % (len(self.nev_avail)))
  1224. for nev_file in self.nev_avail:
  1225. # Loading individual NEV file and extracting parameters
  1226. self._diagnostic_print('Scanning ' + nev_file + '.')
  1227. # Reading file
  1228. filehandle = self.__mmap_nev_file(nev_file)
  1229. if filehandle is None:
  1230. continue
  1231. try:
  1232. # Checking consistency of nev file
  1233. self.__nev_check(filehandle)
  1234. except AssertionError:
  1235. warnings.warn(
  1236. 'Session file %s did not pass data packet check. '
  1237. 'This file can not be loaded.' % nev_file)
  1238. continue
  1239. # Reading header information and store them in parameters_nev
  1240. self.__read_nev_data_header(filehandle, nev_file)
  1241. # Reading txt file header
  1242. self.__read_text_header(nev_file, self.parameters_nev[nev_file])
  1243. self.nev_asso.append(nev_file)
  1244. # =======================================================================
  1245. # # Scan NTT files
  1246. # =======================================================================
  1247. self._diagnostic_print(
  1248. '\nDetected %i .ntt file(s).' % (len(self.ntt_avail)))
  1249. for ntt_file in self.ntt_avail:
  1250. # Loading individual NTT file and extracting parameters
  1251. self._diagnostic_print('Scanning ' + ntt_file + '.')
  1252. # Reading file
  1253. filehandle = self.__mmap_ntt_file(ntt_file)
  1254. if filehandle is None:
  1255. continue
  1256. try:
  1257. # Checking consistency of nev file
  1258. self.__ntt_check(filehandle)
  1259. except AssertionError:
  1260. warnings.warn(
  1261. 'Session file %s did not pass data packet check. '
  1262. 'This file can not be loaded.' % ntt_file)
  1263. continue
  1264. # Reading header information and store them in parameters_nev
  1265. self.__read_ntt_data_header(filehandle, ntt_file)
  1266. # Reading txt file header
  1267. self.__read_ntt_text_header(ntt_file)
  1268. # using sampling rate from txt header, as this is not saved
  1269. # in data packets
  1270. if 'SamplingFrequency' in self.parameters_ntt[channel_id]:
  1271. self.parameters_ntt[channel_id]['sampling_rate'] = \
  1272. (self.parameters_ntt[channel_id][
  1273. 'SamplingFrequency'] * self.ntt_sr_unit)
  1274. self.ntt_asso.append(ntt_file)
  1275. # =======================================================================
  1276. # # Check consistency across files
  1277. # =======================================================================
  1278. # check RECORDING_OPENED / CLOSED times (from txt header) for
  1279. # different files
  1280. for parameter_collection in [self.parameters_ncs,
  1281. self.parameters_nse,
  1282. self.parameters_nev,
  1283. self.parameters_ntt]:
  1284. # check recoding_closed times for specific file types
  1285. if any(np.abs(np.diff([i['recording_opened'] for i in
  1286. parameter_collection.values()]))
  1287. > datetime.timedelta(seconds=1)):
  1288. raise ValueError(
  1289. 'NCS files were opened for recording with a delay '
  1290. 'greater than 0.1 second.')
  1291. # check recoding_closed times for specific file types
  1292. if any(np.diff([i['recording_closed'] for i in
  1293. parameter_collection.values()
  1294. if i['recording_closed'] is not None])
  1295. > datetime.timedelta(seconds=0.1)):
  1296. raise ValueError(
  1297. 'NCS files were closed after recording with a '
  1298. 'delay '
  1299. 'greater than 0.1 second.')
  1300. # get maximal duration of any file in the recording
  1301. parameter_collection = list(self.parameters_ncs.values()) + \
  1302. list(self.parameters_nse.values()) + \
  1303. list(self.parameters_ntt.values()) + \
  1304. list(self.parameters_nev.values())
  1305. self.parameters_global['recording_opened'] = min(
  1306. [i['recording_opened'] for i in parameter_collection])
  1307. self.parameters_global['recording_closed'] = max(
  1308. [i['recording_closed'] for i in parameter_collection])
  1309. # Set up GLOBAL TIMING SCHEME
  1310. # #############################
  1311. for file_type, parameter_collection in [
  1312. ('ncs', self.parameters_ncs), ('nse', self.parameters_nse),
  1313. ('nev', self.parameters_nev), ('ntt', self.parameters_ntt)]:
  1314. # check starting times
  1315. name_t1, name_t2 = ['t_start', 't_stop'] if (
  1316. file_type != 'nse' and file_type != 'ntt') \
  1317. else ['t_first', 't_last']
  1318. # checking if files of same type start at same time point
  1319. if file_type != 'nse' and file_type != 'ntt' \
  1320. and len(np.unique(np.array(
  1321. [i[name_t1].magnitude for i in
  1322. parameter_collection.values()]))) > 1:
  1323. raise ValueError(
  1324. '%s files do not start at same time point.' %
  1325. file_type)
  1326. # saving t_start and t_stop for each file type available
  1327. if len([i[name_t1] for i in parameter_collection.values()]):
  1328. self.parameters_global['%s_t_start' % file_type] = min(
  1329. [i[name_t1]
  1330. for i in parameter_collection.values()])
  1331. self.parameters_global['%s_t_stop' % file_type] = min(
  1332. [i[name_t2]
  1333. for i in parameter_collection.values()])
  1334. # extracting minimial t_start and maximal t_stop value for this
  1335. # recording session
  1336. self.parameters_global['t_start'] = min(
  1337. [self.parameters_global['%s_t_start' % t]
  1338. for t in ['ncs', 'nev', 'nse', 'ntt']
  1339. if '%s_t_start' % t in self.parameters_global])
  1340. self.parameters_global['t_stop'] = max(
  1341. [self.parameters_global['%s_t_stop' % t]
  1342. for t in ['ncs', 'nev', 'nse', 'ntt']
  1343. if '%s_t_start' % t in self.parameters_global])
  1344. # checking gap consistency across ncs files
  1345. # check number of gaps detected
  1346. if len(np.unique([len(i['gaps']) for i in
  1347. self.parameters_ncs.values()])) != 1:
  1348. raise ValueError('NCS files contain different numbers of gaps!')
  1349. # check consistency of gaps across files and create global gap
  1350. # collection
  1351. self.parameters_global['gaps'] = []
  1352. for g in range(len(list(self.parameters_ncs.values())[0]['gaps'])):
  1353. integrated = False
  1354. gap_stats = np.unique(
  1355. [i['gaps'][g] for i in self.parameters_ncs.values()],
  1356. return_counts=True)
  1357. if len(gap_stats[0]) != 3 or len(np.unique(gap_stats[1])) != 1:
  1358. raise ValueError(
  1359. 'Gap number %i is not consistent across NCS '
  1360. 'files.' % (
  1361. g))
  1362. else:
  1363. # check if this is second part of already existing gap
  1364. for gg in range(len(self.parameters_global['gaps'])):
  1365. globalgap = self.parameters_global['gaps'][gg]
  1366. # check if stop time of first is start time of second
  1367. # -> continuous gap
  1368. if globalgap[2] == \
  1369. list(self.parameters_ncs.values())[0]['gaps'][
  1370. g][1]:
  1371. self.parameters_global['gaps'][gg] = \
  1372. self.parameters_global['gaps'][gg][:2] + (
  1373. list(self.parameters_ncs.values())[0][
  1374. 'gaps'][g][
  1375. 2],)
  1376. integrated = True
  1377. break
  1378. if not integrated:
  1379. # add as new gap if this is not a continuation of
  1380. # existing global gap
  1381. self.parameters_global['gaps'].append(
  1382. list(self.parameters_ncs.values())[0][
  1383. 'gaps'][g])
  1384. # save results of association for future analysis together with hash
  1385. # values for change tracking
  1386. if cachedir is not None and usecache != 'never':
  1387. pickle.dump({'global': self.parameters_global,
  1388. 'ncs': self.parameters_ncs,
  1389. 'nev': self.parameters_nev,
  1390. 'nse': self.parameters_nse,
  1391. 'ntt': self.parameters_ntt},
  1392. open(cachedir + sep + self.sessiondir.split(sep)[
  1393. -1] + '/parameters.cache', 'wb'))
  1394. if usecache != 'always':
  1395. pickle.dump(hashes_calc, open(
  1396. cachedir + sep + self.sessiondir.split(sep)[
  1397. -1] + '/hashkeys', 'wb'))
  1398. self.associated = True
  1399. # private routines
  1400. # #########################################################�
  1401. # Memory Mapping Methods
  1402. def __mmap_nse_packets(self, filename):
  1403. """
  1404. Memory map of the Neuralynx .ncs file optimized for extraction of
  1405. data packet headers
  1406. Reading standard dtype improves speed, but timestamps need to be
  1407. reconstructed
  1408. """
  1409. filesize = getsize(self.sessiondir + sep + filename) # in byte
  1410. if filesize > 16384:
  1411. data = np.memmap(self.sessiondir + sep + filename,
  1412. dtype='<u2',
  1413. shape=((filesize - 16384) // 2 // 56, 56),
  1414. mode='r', offset=16384)
  1415. # reconstructing original data
  1416. # first 4 ints -> timestamp in microsec
  1417. timestamps = data[:, 0] \
  1418. + data[:, 1] * 2 ** 16 \
  1419. + data[:, 2] * 2 ** 32 \
  1420. + data[:, 3] * 2 ** 48
  1421. channel_id = data[:, 4] + data[:, 5] * 2 ** 16
  1422. cell_number = data[:, 6] + data[:, 7] * 2 ** 16
  1423. features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
  1424. range(8, 23, 2)]
  1425. features = np.array(features, dtype='i4')
  1426. data_points = data[:, 24:56].astype('i2')
  1427. del data
  1428. return timestamps, channel_id, cell_number, features, data_points
  1429. else:
  1430. return None
  1431. def __mmap_ncs_data(self, filename):
  1432. """ Memory map of the Neuralynx .ncs file optimized for data
  1433. extraction"""
  1434. if getsize(self.sessiondir + sep + filename) > 16384:
  1435. data = np.memmap(self.sessiondir + sep + filename,
  1436. dtype=np.dtype(('i2', (522))), mode='r',
  1437. offset=16384)
  1438. # removing data packet headers and flattening data
  1439. return data[:, 10:]
  1440. else:
  1441. return None
  1442. def __mmap_ncs_packet_headers(self, filename):
  1443. """
  1444. Memory map of the Neuralynx .ncs file optimized for extraction of
  1445. data packet headers
  1446. Reading standard dtype improves speed, but timestamps need to be
  1447. reconstructed
  1448. """
  1449. filesize = getsize(self.sessiondir + sep + filename) # in byte
  1450. if filesize > 16384:
  1451. data = np.memmap(self.sessiondir + sep + filename,
  1452. dtype='<u4',
  1453. shape=((filesize - 16384) // 4 // 261, 261),
  1454. mode='r', offset=16384)
  1455. ts = data[:, 0:2]
  1456. multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
  1457. axis=0)
  1458. timestamps = np.sum(ts * multi, axis=1)
  1459. # timestamps = data[:,0] + (data[:,1] *2**32)
  1460. header_u4 = data[:, 2:5]
  1461. return timestamps, header_u4
  1462. else:
  1463. return None
  1464. def __mmap_ncs_packet_timestamps(self, filename):
  1465. """
  1466. Memory map of the Neuralynx .ncs file optimized for extraction of
  1467. data packet headers
  1468. Reading standard dtype improves speed, but timestamps need to be
  1469. reconstructed
  1470. """
  1471. filesize = getsize(self.sessiondir + sep + filename) # in byte
  1472. if filesize > 16384:
  1473. data = np.memmap(self.sessiondir + sep + filename,
  1474. dtype='<u4',
  1475. shape=(int((filesize - 16384) / 4 / 261), 261),
  1476. mode='r', offset=16384)
  1477. ts = data[:, 0:2]
  1478. multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
  1479. axis=0)
  1480. timestamps = np.sum(ts * multi, axis=1)
  1481. # timestamps = data[:,0] + data[:,1]*2**32
  1482. return timestamps
  1483. else:
  1484. return None
  1485. def __mmap_nev_file(self, filename):
  1486. """ Memory map the Neuralynx .nev file """
  1487. nev_dtype = np.dtype([
  1488. ('reserved', '<i2'),
  1489. ('system_id', '<i2'),
  1490. ('data_size', '<i2'),
  1491. ('timestamp', '<u8'),
  1492. ('event_id', '<i2'),
  1493. ('ttl_input', '<i2'),
  1494. ('crc_check', '<i2'),
  1495. ('dummy1', '<i2'),
  1496. ('dummy2', '<i2'),
  1497. ('extra', '<i4', (8,)),
  1498. ('event_string', 'a128'),
  1499. ])
  1500. if getsize(self.sessiondir + sep + filename) > 16384:
  1501. return np.memmap(self.sessiondir + sep + filename,
  1502. dtype=nev_dtype, mode='r', offset=16384)
  1503. else:
  1504. return None
  1505. def __mmap_ntt_file(self, filename):
  1506. """ Memory map the Neuralynx .nse file """
  1507. nse_dtype = np.dtype([
  1508. ('timestamp', '<u8'),
  1509. ('sc_number', '<u4'),
  1510. ('cell_number', '<u4'),
  1511. ('params', '<u4', (8,)),
  1512. ('data', '<i2', (32, 4)),
  1513. ])
  1514. if getsize(self.sessiondir + sep + filename) > 16384:
  1515. return np.memmap(self.sessiondir + sep + filename,
  1516. dtype=nse_dtype, mode='r', offset=16384)
  1517. else:
  1518. return None
  1519. def __mmap_ntt_packets(self, filename):
  1520. """
  1521. Memory map of the Neuralynx .ncs file optimized for extraction of
  1522. data packet headers
  1523. Reading standard dtype improves speed, but timestamps need to be
  1524. reconstructed
  1525. """
  1526. filesize = getsize(self.sessiondir + sep + filename) # in byte
  1527. if filesize > 16384:
  1528. data = np.memmap(self.sessiondir + sep + filename,
  1529. dtype='<u2',
  1530. shape=((filesize - 16384) / 2 / 152, 152),
  1531. mode='r', offset=16384)
  1532. # reconstructing original data
  1533. # first 4 ints -> timestamp in microsec
  1534. timestamps = data[:, 0] + data[:, 1] * 2 ** 16 + \
  1535. data[:, 2] * 2 ** 32 + data[:, 3] * 2 ** 48
  1536. channel_id = data[:, 4] + data[:, 5] * 2 ** 16
  1537. cell_number = data[:, 6] + data[:, 7] * 2 ** 16
  1538. features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
  1539. range(8, 23, 2)]
  1540. features = np.array(features, dtype='i4')
  1541. data_points = data[:, 24:152].astype('i2').reshape((4, 32))
  1542. del data
  1543. return timestamps, channel_id, cell_number, features, data_points
  1544. else:
  1545. return None
  1546. # ___________________________ header extraction __________________________
  1547. def __read_text_header(self, filename, parameter_dict):
  1548. # Reading main file header (plain text, 16kB)
  1549. text_header = codecs.open(self.sessiondir + sep + filename, 'r',
  1550. 'latin-1').read(16384)
  1551. parameter_dict['cheetah_version'] = \
  1552. self.__get_cheetah_version_from_txt_header(text_header, filename)
  1553. parameter_dict.update(self.__get_filename_and_times_from_txt_header(
  1554. text_header, parameter_dict['cheetah_version']))
  1555. # separating lines of header and ignoring last line (fill), check if
  1556. # Linux or Windows OS
  1557. if sep == '/':
  1558. text_header = text_header.split('\r\n')[:-1]
  1559. if sep == '\\':
  1560. text_header = text_header.split('\n')[:-1]
  1561. # minor parameters possibly saved in header (for any file type)
  1562. minor_keys = ['AcqEntName',
  1563. 'FileType',
  1564. 'FileVersion',
  1565. 'RecordSize',
  1566. 'HardwareSubSystemName',
  1567. 'HardwareSubSystemType',
  1568. 'SamplingFrequency',
  1569. 'ADMaxValue',
  1570. 'ADBitVolts',
  1571. 'NumADChannels',
  1572. 'ADChannel',
  1573. 'InputRange',
  1574. 'InputInverted',
  1575. 'DSPLowCutFilterEnabled',
  1576. 'DspLowCutFrequency',
  1577. 'DspLowCutNumTaps',
  1578. 'DspLowCutFilterType',
  1579. 'DSPHighCutFilterEnabled',
  1580. 'DspHighCutFrequency',
  1581. 'DspHighCutNumTaps',
  1582. 'DspHighCutFilterType',
  1583. 'DspDelayCompensation',
  1584. 'DspFilterDelay_\xb5s',
  1585. 'DisabledSubChannels',
  1586. 'WaveformLength',
  1587. 'AlignmentPt',
  1588. 'ThreshVal',
  1589. 'MinRetriggerSamples',
  1590. 'SpikeRetriggerTime',
  1591. 'DualThresholding',
  1592. 'Feature Peak 0',
  1593. 'Feature Valley 1',
  1594. 'Feature Energy 2',
  1595. 'Feature Height 3',
  1596. 'Feature NthSample 4',
  1597. 'Feature NthSample 5',
  1598. 'Feature NthSample 6',
  1599. 'Feature NthSample 7',
  1600. 'SessionUUID',
  1601. 'FileUUID',
  1602. 'CheetahRev',
  1603. 'ProbeName',
  1604. 'OriginalFileName',
  1605. 'TimeCreated',
  1606. 'TimeClosed',
  1607. 'ApplicationName',
  1608. 'AcquisitionSystem',
  1609. 'ReferenceChannel']
  1610. # extracting minor key values of header (only taking into account
  1611. # non-empty lines)
  1612. for i, minor_entry in enumerate(text_header):
  1613. if minor_entry == '' or minor_entry[0] == '#':
  1614. continue
  1615. matching_key = [key for key in minor_keys if
  1616. minor_entry.strip('-').startswith(key)]
  1617. if len(matching_key) == 1:
  1618. matching_key = matching_key[0]
  1619. minor_value = minor_entry.split(matching_key)[1].strip(
  1620. ' ').rstrip(' ')
  1621. # determine data type of entry
  1622. if minor_value.isdigit():
  1623. # converting to int if possible
  1624. minor_value = int(minor_value)
  1625. else:
  1626. # converting to float if possible
  1627. try:
  1628. minor_value = float(minor_value)
  1629. except:
  1630. pass
  1631. if matching_key in parameter_dict:
  1632. warnings.warn(
  1633. 'Multiple entries for {} in text header of {}'.format(
  1634. matching_key, filename))
  1635. else:
  1636. parameter_dict[matching_key] = minor_value
  1637. elif len(matching_key) > 1:
  1638. raise ValueError(
  1639. 'Inconsistent minor key list for text header '
  1640. 'interpretation.')
  1641. else:
  1642. warnings.warn(
  1643. 'Skipping text header entry %s, because it is not in '
  1644. 'minor key list' % minor_entry)
  1645. self._diagnostic_print(
  1646. 'Successfully decoded text header of file (%s).' % filename)
  1647. def __get_cheetah_version_from_txt_header(self, text_header, filename):
  1648. version_regex = re.compile(r'((-CheetahRev )|'
  1649. r'(ApplicationName Cheetah "))'
  1650. r'(?P<version>\d{1,3}\.\d{1,3}\.\d{1,3})')
  1651. match = version_regex.search(text_header)
  1652. if match:
  1653. return match.groupdict()['version']
  1654. else:
  1655. raise ValueError('Can not extract Cheetah version from file '
  1656. 'header of file %s' % filename)
  1657. def __get_filename_and_times_from_txt_header(self, text_header, version):
  1658. if parse_version(version) <= parse_version('5.6.4'):
  1659. datetime1_regex = re.compile(r'## Time Opened \(m/d/y\): '
  1660. r'(?P<date>\S+)'
  1661. r' \(h:m:s\.ms\) '
  1662. r'(?P<time>\S+)')
  1663. datetime2_regex = re.compile(r'## Time Closed \(m/d/y\): '
  1664. r'(?P<date>\S+)'
  1665. r' \(h:m:s\.ms\) '
  1666. r'(?P<time>\S+)')
  1667. filename_regex = re.compile(r'## File Name (?P<filename>\S+)')
  1668. datetimeformat = '%m/%d/%Y %H:%M:%S.%f'
  1669. else:
  1670. datetime1_regex = re.compile(r'-TimeCreated '
  1671. r'(?P<date>\S+) '
  1672. r'(?P<time>\S+)')
  1673. datetime2_regex = re.compile(r'-TimeClosed '
  1674. r'(?P<date>\S+) '
  1675. r'(?P<time>\S+)')
  1676. filename_regex = re.compile(r'-OriginalFileName '
  1677. r'"?(?P<filename>\S+)"?')
  1678. datetimeformat = '%Y/%m/%d %H:%M:%S'
  1679. matchtime1 = datetime1_regex.search(text_header).groupdict()
  1680. matchtime2 = datetime2_regex.search(text_header).groupdict()
  1681. matchfilename = filename_regex.search(text_header)
  1682. filename = matchfilename.groupdict()['filename']
  1683. if '## Time Closed File was not closed properly' in text_header:
  1684. warnings.warn('Text header of file %s does not contain recording '
  1685. 'closed time. File was not closed properly.'
  1686. '' % filename)
  1687. datetime1 = datetime.datetime.strptime(matchtime1['date'] + ' '
  1688. + matchtime1['time'],
  1689. datetimeformat)
  1690. datetime2 = datetime.datetime.strptime(matchtime2['date'] + ' '
  1691. + matchtime2['time'],
  1692. datetimeformat)
  1693. output = {'recording_opened': datetime1,
  1694. 'recording_closed': datetime2,
  1695. 'file_created': datetime1,
  1696. 'file_closed': datetime2,
  1697. 'recording_file_name': filename}
  1698. return output
  1699. def __read_ncs_data_headers(self, filehandle, filename):
  1700. '''
  1701. Reads the .ncs data block headers and stores the information in the
  1702. object's parameters_ncs dictionary.
  1703. Args:
  1704. filehandle (file object):
  1705. Handle to the already opened .ncs file.
  1706. filename (string):
  1707. Name of the ncs file.
  1708. Returns:
  1709. dict of extracted data
  1710. '''
  1711. timestamps = filehandle[0]
  1712. header_u4 = filehandle[1]
  1713. channel_id = header_u4[0][0]
  1714. sr = header_u4[0][1] # in Hz
  1715. t_start = timestamps[0] # in microseconds
  1716. # calculating corresponding time stamp of first sample, that was not
  1717. # recorded any more
  1718. # t_stop= time of first sample in last packet +(#samples per packet *
  1719. # conversion factor / sampling rate)
  1720. # conversion factor is needed as times are recorded in ms
  1721. t_stop = timestamps[-1] + (
  1722. (header_u4[-1][2]) * (
  1723. 1 / self.ncs_time_unit.rescale(pq.s)).magnitude
  1724. / header_u4[-1][1])
  1725. if channel_id in self.parameters_ncs:
  1726. raise ValueError(
  1727. 'Detected multiple ncs files for channel_id %i.'
  1728. % channel_id)
  1729. else:
  1730. sampling_unit = [pq.CompoundUnit('%f*%s'
  1731. '' % (sr,
  1732. self.ncs_sr_unit.symbol))]
  1733. sampling_rate = sr * self.ncs_sr_unit
  1734. self.parameters_ncs[channel_id] = {'filename': filename,
  1735. 't_start': t_start
  1736. * self.ncs_time_unit,
  1737. 't_stop': t_stop
  1738. * self.ncs_time_unit,
  1739. 'sampling_rate': sampling_rate,
  1740. 'sampling_unit': sampling_unit,
  1741. 'gaps': []}
  1742. return {channel_id: self.parameters_ncs[channel_id]}
  1743. def __read_nse_data_header(self, filehandle, filename):
  1744. '''
  1745. Reads the .nse data block headers and stores the information in the
  1746. object's parameters_ncs dictionary.
  1747. Args:
  1748. filehandle (file object):
  1749. Handle to the already opened .nse file.
  1750. filename (string):
  1751. Name of the nse file.
  1752. Returns:
  1753. -
  1754. '''
  1755. [timestamps, channel_ids, cell_numbers, features,
  1756. data_points] = filehandle
  1757. if filehandle is not None:
  1758. t_first = timestamps[0] # in microseconds
  1759. t_last = timestamps[-1] # in microseconds
  1760. channel_id = channel_ids[0]
  1761. cell_count = cell_numbers[0] # number of cells identified
  1762. self.parameters_nse[channel_id] = {'filename': filename,
  1763. 't_first': t_first
  1764. * self.nse_time_unit,
  1765. 't_last': t_last
  1766. * self.nse_time_unit,
  1767. 'cell_count': cell_count}
  1768. def __read_ntt_data_header(self, filehandle, filename):
  1769. '''
  1770. Reads the .nse data block headers and stores the information in the
  1771. object's parameters_ncs dictionary.
  1772. Args:
  1773. filehandle (file object):
  1774. Handle to the already opened .nse file.
  1775. filename (string):
  1776. Name of the nse file.
  1777. Returns:
  1778. -
  1779. '''
  1780. [timestamps, channel_ids, cell_numbers, features,
  1781. data_points] = filehandle
  1782. if filehandle is not None:
  1783. t_first = timestamps[0] # in microseconds
  1784. t_last = timestamps[-1] # in microseconds
  1785. channel_id = channel_ids[0]
  1786. cell_count = cell_numbers[0] # number of cells identified
  1787. # spike_parameters = filehandle[0][3]
  1788. # else:
  1789. # t_first = None
  1790. # channel_id = None
  1791. # cell_count = 0
  1792. # # spike_parameters = None
  1793. #
  1794. # self._diagnostic_print('Empty file: No information
  1795. # contained in %s'%filename)
  1796. self.parameters_ntt[channel_id] = {'filename': filename,
  1797. 't_first': t_first
  1798. * self.ntt_time_unit,
  1799. 't_last': t_last
  1800. * self.nse_time_unit,
  1801. 'cell_count': cell_count}
  1802. def __read_nev_data_header(self, filehandle, filename):
  1803. '''
  1804. Reads the .nev data block headers and stores the relevant information
  1805. in the
  1806. object's parameters_nev dictionary.
  1807. Args:
  1808. filehandle (file object):
  1809. Handle to the already opened .nev file.
  1810. filename (string):
  1811. Name of the nev file.
  1812. Returns:
  1813. -
  1814. '''
  1815. # Extracting basic recording events to be able to check recording
  1816. # consistency
  1817. if filename in self.parameters_nev:
  1818. raise ValueError(
  1819. 'Detected multiple nev files of name %s.' % (filename))
  1820. else:
  1821. self.parameters_nev[filename] = {}
  1822. if 'Starting_Recording' in self.parameters_nev[filename]:
  1823. raise ValueError('Trying to read second nev file of name %s. '
  1824. ' Only one can be handled.' % filename)
  1825. self.parameters_nev[filename]['Starting_Recording'] = []
  1826. self.parameters_nev[filename]['events'] = []
  1827. for event in filehandle:
  1828. # separately extracting 'Starting Recording'
  1829. if ((event[4] in [11, 19])
  1830. and (event[10].decode('latin-1') == 'Starting Recording')):
  1831. self.parameters_nev[filename]['Starting_Recording'].append(
  1832. event[3] * self.nev_time_unit)
  1833. # adding all events to parameter collection
  1834. self.parameters_nev[filename]['events'].append(
  1835. {'timestamp': event[3] * self.nev_time_unit,
  1836. 'event_id': event[4],
  1837. 'nttl': event[5],
  1838. 'name': event[10].decode('latin-1')})
  1839. if len(self.parameters_nev[filename]['Starting_Recording']) < 1:
  1840. raise ValueError(
  1841. 'No Event "Starting_Recording" detected in %s' % (
  1842. filename))
  1843. self.parameters_nev[filename]['t_start'] = min(
  1844. self.parameters_nev[filename]['Starting_Recording'])
  1845. # t_stop = time stamp of last event in file
  1846. self.parameters_nev[filename]['t_stop'] = max(
  1847. [e['timestamp'] for e in
  1848. self.parameters_nev[filename]['events']])
  1849. # extract all occurring event types (= combination of nttl,
  1850. # event_id and name/string)
  1851. event_types = copy.deepcopy(self.parameters_nev[filename]['events'])
  1852. for d in event_types:
  1853. d.pop('timestamp')
  1854. self.parameters_nev[filename]['event_types'] = [dict(y) for y in
  1855. {tuple(
  1856. x.items())
  1857. for x in
  1858. event_types}]
  1859. # ________________ File Checks __________________________________
  1860. def __ncs_packet_check(self, filehandle):
  1861. '''
  1862. Checks consistency of data in ncs file and raises assertion error if a
  1863. check fails. Detected recording gaps are added to parameter_ncs
  1864. Args:
  1865. filehandle (file object):
  1866. Handle to the already opened .ncs file.
  1867. '''
  1868. timestamps = filehandle[0]
  1869. header_u4 = filehandle[1]
  1870. # checking sampling rate of data packets
  1871. sr0 = header_u4[0, 1]
  1872. assert all(header_u4[:, 1] == sr0)
  1873. # checking channel id of data packets
  1874. channel_id = header_u4[0, 0]
  1875. assert all(header_u4[:, 0] == channel_id)
  1876. # time offset of data packets
  1877. # TODO: Check if there is a safer way to do the delta_t check for ncs
  1878. # data packets
  1879. # this is a not safe assumption, that the first two data packets have
  1880. # correct time stamps
  1881. delta_t = timestamps[1] - timestamps[0]
  1882. # valid samples of first data packet
  1883. temp_valid_samples = header_u4[0, 2]
  1884. # unit test
  1885. # time difference between packets corresponds to number of recorded
  1886. # samples
  1887. assert delta_t == (
  1888. temp_valid_samples / (
  1889. self.ncs_time_unit.rescale(pq.s).magnitude * sr0))
  1890. self._diagnostic_print('NCS packet check successful.')
  1891. def __nse_check(self, filehandle):
  1892. '''
  1893. Checks consistency of data in ncs file and raises assertion error if a
  1894. check fails.
  1895. Args:
  1896. filehandle (file object):
  1897. Handle to the already opened .nse file.
  1898. '''
  1899. [timestamps, channel_ids, cell_numbers, features,
  1900. data_points] = filehandle
  1901. assert all(channel_ids == channel_ids[0])
  1902. assert all([len(dp) == len(data_points[0]) for dp in data_points])
  1903. self._diagnostic_print('NSE file check successful.')
  1904. def __nev_check(self, filehandle):
  1905. '''
  1906. Checks consistency of data in nev file and raises assertion error if a
  1907. check fails.
  1908. Args:
  1909. filehandle (file object):
  1910. Handle to the already opened .nev file.
  1911. '''
  1912. # this entry should always equal 2 (see Neuralynx File Description),
  1913. # but it is not. For me, this is 0.
  1914. assert all([f[2] == 2 or f[2] == 0 for f in filehandle])
  1915. # TODO: check with more nev files, if index 0,1,2,6,7,8 and 9 can be
  1916. # non-zero. Interpretation? Include in event extraction.
  1917. # only observed 0 for index 0,1,2,6,7,8,9 in nev files.
  1918. # If they are non-zero, this needs to be included in event extraction
  1919. assert all([f[0] == 0 for f in filehandle])
  1920. assert all([f[1] == 0 for f in filehandle])
  1921. assert all([f[2] in [0, 2] for f in filehandle])
  1922. assert all([f[6] == 0 for f in filehandle])
  1923. assert all([f[7] == 0 for f in filehandle])
  1924. assert all([f[8] == 0 for f in filehandle])
  1925. assert all([all(f[9] == 0) for f in filehandle])
  1926. self._diagnostic_print('NEV file check successful.')
  1927. def __ntt_check(self, filehandle):
  1928. '''
  1929. Checks consistency of data in ncs file and raises assertion error if a
  1930. check fails.
  1931. Args:
  1932. filehandle (file object):
  1933. Handle to the already opened .nse file.
  1934. '''
  1935. # TODO: check this when first .ntt files are available
  1936. [timestamps, channel_ids, cell_numbers, features,
  1937. data_points] = filehandle
  1938. assert all(channel_ids == channel_ids[0])
  1939. assert all([len(dp) == len(data_points[0]) for dp in data_points])
  1940. self._diagnostic_print('NTT file check successful.')
  1941. def __ncs_gap_check(self, filehandle):
  1942. '''
  1943. Checks individual data blocks of ncs files for consistent starting
  1944. times with respect to sample count.
  1945. This covers intended recording gaps as well as shortened data packet,
  1946. which are incomplete
  1947. '''
  1948. timestamps = filehandle[0]
  1949. header_u4 = filehandle[1]
  1950. channel_id = header_u4[0, 0]
  1951. if channel_id not in self.parameters_ncs:
  1952. self.parameters_ncs[channel_id] = {}
  1953. # time stamps of data packets
  1954. delta_t = timestamps[1] - timestamps[0] # in microsec
  1955. data_packet_offsets = np.diff(timestamps) # in microsec
  1956. # check if delta_t corresponds to number of valid samples present in
  1957. # data packets
  1958. # NOTE: This also detects recording gaps!
  1959. valid_samples = header_u4[:-1, 2]
  1960. sampling_rate = header_u4[0, 1]
  1961. packet_checks = (valid_samples / (self.ncs_time_unit.rescale(
  1962. pq.s).magnitude * sampling_rate)) == data_packet_offsets
  1963. if not all(packet_checks):
  1964. if 'broken_packets' not in self.parameters_ncs[channel_id]:
  1965. self.parameters_ncs[channel_id]['broken_packets'] = []
  1966. broken_packets = np.where(np.array(packet_checks) is False)[0]
  1967. for broken_packet in broken_packets:
  1968. self.parameters_ncs[channel_id]['broken_packets'].append(
  1969. (broken_packet,
  1970. valid_samples[broken_packet],
  1971. data_packet_offsets[broken_packet]))
  1972. self._diagnostic_print('Detected broken packet in NCS file at '
  1973. 'packet id %i (sample number %i '
  1974. 'time offset id %i)'
  1975. '' % (broken_packet,
  1976. valid_samples[broken_packet],
  1977. data_packet_offsets[broken_packet])
  1978. ) # in microsec
  1979. # checking for irregular data packet durations -> gaps / shortened
  1980. # data packets
  1981. if not all(data_packet_offsets == delta_t):
  1982. if 'gaps' not in self.parameters_ncs[channel_id]:
  1983. self.parameters_ncs[channel_id]['gaps'] = []
  1984. # gap identification by (sample of gap start, duration)
  1985. # gap packets
  1986. gap_packet_ids = np.where(data_packet_offsets != delta_t)[0]
  1987. for gap_packet_id in gap_packet_ids:
  1988. # skip if this packet starting time is known to be corrupted
  1989. # hoping no corruption and gap occurs simultaneously
  1990. # corrupted time stamp affects two delta_t comparisons:
  1991. if gap_packet_id in self.parameters_ncs[channel_id][
  1992. 'invalid_first_samples'] \
  1993. or gap_packet_id + 1 in self.parameters_ncs[channel_id][
  1994. 'invalid_first_samples']:
  1995. continue
  1996. gap_start = timestamps[
  1997. gap_packet_id] # t_start of last packet [microsec]
  1998. gap_stop = timestamps[
  1999. gap_packet_id + 1] # t_stop of first packet [microsec]
  2000. self.parameters_ncs[channel_id]['gaps'].append((gap_packet_id,
  2001. gap_start,
  2002. gap_stop)) #
  2003. # [,microsec,microsec]
  2004. self._diagnostic_print('Detected gap in NCS file between'
  2005. 'sample time %i and %i (last correct '
  2006. 'packet id %i)' % (gap_start, gap_stop,
  2007. gap_packet_id))
  2008. def __ncs_invalid_first_sample_check(self, filehandle):
  2009. '''
  2010. Checks data blocks of ncs files for corrupted starting times indicating
  2011. a missing first sample in the data packet. These are then excluded from
  2012. the gap check, but ignored for further analysis.
  2013. '''
  2014. timestamps = filehandle[0]
  2015. header_u4 = filehandle[1]
  2016. channel_id = header_u4[0, 0]
  2017. self.parameters_ncs[channel_id]['invalid_first_samples'] = []
  2018. # checking if first bit of timestamp is 1, which indicates error
  2019. invalid_packet_ids = np.where(timestamps >= 2 ** 55)[0]
  2020. if len(invalid_packet_ids) > 0:
  2021. warnings.warn('Invalid first sample(s) detected in ncs file'
  2022. '(packet id(s) %i)! This error is ignored in'
  2023. 'subsequent routines.' % (invalid_packet_ids))
  2024. self.parameters_ncs[channel_id][
  2025. 'invalid_first_samples'] = invalid_packet_ids
  2026. # checking consistency of data around corrupted packet time
  2027. for invalid_packet_id in invalid_packet_ids:
  2028. if invalid_packet_id < 2 or invalid_packet_id > len(
  2029. filehandle) - 2:
  2030. raise ValueError(
  2031. 'Corrupted ncs data packet at the beginning'
  2032. 'or end of file.')
  2033. elif (timestamps[invalid_packet_id + 1] - timestamps[
  2034. invalid_packet_id - 1] != 2 * (
  2035. timestamps[invalid_packet_id - 1] - timestamps[
  2036. invalid_packet_id - 2])):
  2037. raise ValueError('Starting times of ncs data packets around'
  2038. 'corrupted data packet are not '
  2039. 'consistent!')
  2040. # Supplementory Functions
  2041. def get_channel_id_by_file_name(self, filename):
  2042. """
  2043. Checking parameters of NCS, NSE and NTT Files for given filename and
  2044. return channel_id if result is consistent
  2045. :param filename:
  2046. :return:
  2047. """
  2048. channel_ids = []
  2049. channel_ids += [k for k in self.parameters_ncs if
  2050. self.parameters_ncs[k]['filename'] == filename]
  2051. channel_ids += [k for k in self.parameters_nse if
  2052. self.parameters_nse[k]['filename'] == filename]
  2053. channel_ids += [k for k in self.parameters_ntt if
  2054. self.parameters_ntt[k]['filename'] == filename]
  2055. if len(np.unique(np.asarray(channel_ids))) == 1:
  2056. return channel_ids[0]
  2057. elif len(channel_ids) > 1:
  2058. raise ValueError(
  2059. 'Ambiguous channel ids detected. Filename %s is associated'
  2060. ' to different channels of NCS and NSE and NTT %s'
  2061. '' % (filename, channel_ids))
  2062. else: # if filename was not detected
  2063. return None
  2064. def hashfile(self, afile, hasher, blocksize=65536):
  2065. buf = afile.read(blocksize)
  2066. while len(buf) > 0:
  2067. hasher.update(buf)
  2068. buf = afile.read(blocksize)
  2069. return hasher.digest()
  2070. def datesizefile(self, filename):
  2071. return str(os.path.getmtime(filename)) + '_' + str(
  2072. os.path.getsize(filename))
  2073. def _diagnostic_print(self, text):
  2074. '''
  2075. Print a diagnostic message.
  2076. Args:
  2077. text (string):
  2078. Diagnostic text to print.
  2079. Returns:
  2080. -
  2081. '''
  2082. if self._print_diagnostic:
  2083. print('NeuralynxIO: ' + text)