Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

brainwaresrcio.py 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586
  1. # -*- coding: utf-8 -*-
  2. """
  3. Class for reading from Brainware SRC files
  4. SRC files are binary files for holding spike data. They are broken up into
  5. nested data sequences of different types, with each type of sequence identified
  6. by a unique ID number. This allows new versions of sequences to be included
  7. without breaking backwards compatibility, since new versions can just be given
  8. a new ID number.
  9. The ID numbers and the format of the data they contain were taken from the
  10. Matlab-based reader function supplied with BrainWare. The python code,
  11. however, was implemented from scratch in Python using Python idioms.
  12. There are some situations where BrainWare data can overflow the SRC file,
  13. resulting in a corrupt file. Neither BrainWare nor the Matlab-based
  14. reader can read such files. This software, however, will try to recover
  15. the data, and in most cases can do so successfully.
  16. Each SRC file can hold the equivalent of multiple Neo Blocks.
  17. Brainware was developed by Dr. Jan Schnupp and is availabe from
  18. Tucker Davis Technologies, Inc.
  19. http://www.tdt.com/downloads.htm
  20. Neither Dr. Jan Schnupp nor Tucker Davis Technologies, Inc. had any part in the
  21. development of this code
  22. The code is implemented with the permission of Dr. Jan Schnupp
  23. Author: Todd Jennings
  24. """
  25. # needed for python 3 compatibility
  26. from __future__ import absolute_import, division, print_function
  27. # import needed core python modules
  28. from datetime import datetime, timedelta
  29. from itertools import chain
  30. import logging
  31. import os.path
  32. import sys
  33. # numpy and quantities are already required by neo
  34. import numpy as np
  35. import quantities as pq
  36. # needed core neo modules
  37. from neo.core import (Block, Event,
  38. ChannelIndex, Segment, SpikeTrain, Unit)
  39. # need to subclass BaseIO
  40. from neo.io.baseio import BaseIO
  41. LOGHANDLER = logging.StreamHandler()
  42. PY_VER = sys.version_info[0]
  43. class BrainwareSrcIO(BaseIO):
  44. """
  45. Class for reading Brainware Spike ReCord files with the extension '.src'
  46. The read_block method returns the first Block of the file. It will
  47. automatically close the file after reading.
  48. The read method is the same as read_block.
  49. The read_all_blocks method automatically reads all Blocks. It will
  50. automatically close the file after reading.
  51. The read_next_block method will return one Block each time it is called.
  52. It will automatically close the file and reset to the first Block
  53. after reading the last block.
  54. Call the close method to close the file and reset this method
  55. back to the first Block.
  56. The _isopen property tells whether the file is currently open and
  57. reading or closed.
  58. Note 1:
  59. The first Unit in each ChannelIndex is always
  60. UnassignedSpikes, which has a SpikeTrain for each Segment containing
  61. all the spikes not assigned to any Unit in that Segment.
  62. Note 2:
  63. The first Segment in each Block is always Comments, which stores all
  64. comments as an Event object.
  65. Note 3:
  66. The parameters from the BrainWare table for each condition are stored
  67. in the Segment annotations. If there are multiple repetitions of
  68. a condition, each repetition is stored as a separate Segment.
  69. Note 4:
  70. There is always only one ChannelIndex. BrainWare stores the
  71. equivalent of ChannelIndexes in separate files.
  72. Usage:
  73. >>> from neo.io.brainwaresrcio import BrainwareSrcIO
  74. >>> srcfile = BrainwareSrcIO(filename='multi_500ms_mulitrep_ch1.src')
  75. >>> blk1 = srcfile.read()
  76. >>> blk2 = srcfile.read_block()
  77. >>> blks = srcfile.read_all_blocks()
  78. >>> print blk1.segments
  79. >>> print blk1.segments[0].spiketrains
  80. >>> print blk1.units
  81. >>> print blk1.units[0].name
  82. >>> print blk2
  83. >>> print blk2[0].segments
  84. >>> print blks
  85. >>> print blks[0].segments
  86. """
  87. is_readable = True # This class can only read data
  88. is_writable = False # write is not supported
  89. # This class is able to directly or indirectly handle the following objects
  90. supported_objects = [Block, ChannelIndex,
  91. Segment, SpikeTrain, Event, Unit]
  92. readable_objects = [Block]
  93. writeable_objects = []
  94. has_header = False
  95. is_streameable = False
  96. # This is for GUI stuff: a definition for parameters when reading.
  97. # This dict should be keyed by object (`Block`). Each entry is a list
  98. # of tuple. The first entry in each tuple is the parameter name. The
  99. # second entry is a dict with keys 'value' (for default value),
  100. # and 'label' (for a descriptive name).
  101. # Note that if the highest-level object requires parameters,
  102. # common_io_test will be skipped.
  103. read_params = {Block: []}
  104. # does not support write so no GUI stuff
  105. write_params = None
  106. name = 'Brainware SRC File'
  107. extensions = ['src']
  108. mode = 'file'
  109. def __init__(self, filename=None):
  110. """
  111. Arguments:
  112. filename: the filename
  113. """
  114. BaseIO.__init__(self)
  115. # log the __init__
  116. self.logger.info('__init__')
  117. # this stores the filename of the current object, exactly as it is
  118. # provided when the instance is initialized.
  119. self._filename = filename
  120. # this store the filename without the path
  121. self._file_origin = filename
  122. # This stores the file object for the current file
  123. self._fsrc = None
  124. # This stores the current Block
  125. self._blk = None
  126. # This stores the current ChannelIndex for easy access
  127. # It is equivalent to self._blk.channel_indexes[0]
  128. self._chx = None
  129. # This stores the current Segment for easy access
  130. # It is equivalent to self._blk.segments[-1]
  131. self._seg0 = None
  132. # this stores a dictionary of the Block's Units by name,
  133. # making it easier and faster to retrieve Units by name later
  134. # UnassignedSpikes and Units accessed by index are not stored here
  135. self._unitdict = {}
  136. # this stores the current Unit
  137. self._unit0 = None
  138. # if the file has a list with negative length, the rest of the file's
  139. # list lengths are unreliable, so we need to store this value for the
  140. # whole file
  141. self._damaged = False
  142. # this stores whether the current file is lazy loaded
  143. self._lazy = False
  144. # this stores whether the current file is cascading
  145. # this is false by default so if we use read_block on its own it works
  146. self._cascade = False
  147. # this stores an empty SpikeTrain which is used in various places.
  148. self._default_spiketrain = None
  149. @property
  150. def _isopen(self):
  151. """
  152. This property tells whether the SRC file associated with the IO object
  153. is open.
  154. """
  155. return self._fsrc is not None
  156. def _opensrc(self):
  157. """
  158. Open the file if it isn't already open.
  159. """
  160. # if the file isn't already open, open it and clear the Blocks
  161. if not self._fsrc or self._fsrc.closed:
  162. self._fsrc = open(self._filename, 'rb')
  163. # figure out the filename of the current file
  164. self._file_origin = os.path.basename(self._filename)
  165. def close(self):
  166. """
  167. Close the currently-open file and reset the current reading point.
  168. """
  169. self.logger.info('close')
  170. if self._isopen and not self._fsrc.closed:
  171. self._fsrc.close()
  172. # we also need to reset all per-file attributes
  173. self._damaged = False
  174. self._fsrc = None
  175. self._seg0 = None
  176. self._cascade = False
  177. self._file_origin = None
  178. self._lazy = False
  179. self._default_spiketrain = None
  180. def read(self, lazy=False, cascade=True, **kargs):
  181. """
  182. Reads the first Block from the Spike ReCording file "filename"
  183. generated with BrainWare.
  184. If you wish to read more than one Block, please use read_all_blocks.
  185. """
  186. return self.read_block(lazy=lazy, cascade=cascade, **kargs)
  187. def read_block(self, lazy=False, cascade=True, **kargs):
  188. """
  189. Reads the first Block from the Spike ReCording file "filename"
  190. generated with BrainWare.
  191. If you wish to read more than one Block, please use read_all_blocks.
  192. """
  193. # there are no keyargs implemented to so far. If someone tries to pass
  194. # them they are expecting them to do something or making a mistake,
  195. # neither of which should pass silently
  196. if kargs:
  197. raise NotImplementedError('This method does not have any '
  198. 'arguments implemented yet')
  199. blockobj = self.read_next_block(cascade=cascade, lazy=lazy)
  200. self.close()
  201. return blockobj
  202. def read_next_block(self, cascade=True, lazy=False, **kargs):
  203. """
  204. Reads a single Block from the Spike ReCording file "filename"
  205. generated with BrainWare.
  206. Each call of read will return the next Block until all Blocks are
  207. loaded. After the last Block, the file will be automatically closed
  208. and the progress reset. Call the close method manually to reset
  209. back to the first Block.
  210. """
  211. # there are no keyargs implemented to so far. If someone tries to pass
  212. # them they are expecting them to do something or making a mistake,
  213. # neither of which should pass silently
  214. if kargs:
  215. raise NotImplementedError('This method does not have any '
  216. 'arguments implemented yet')
  217. self._lazy = lazy
  218. self._opensrc()
  219. # create _default_spiketrain here for performance reasons
  220. self._default_spiketrain = self._init_default_spiketrain.copy()
  221. self._default_spiketrain.file_origin = self._file_origin
  222. if lazy:
  223. self._default_spiketrain.lazy_shape = (0,)
  224. # create the Block and the contents all Blocks of from IO share
  225. self._blk = Block(file_origin=self._file_origin)
  226. if not cascade:
  227. return self._blk
  228. self._chx = ChannelIndex(file_origin=self._file_origin,
  229. index=np.array([], dtype=np.int))
  230. self._seg0 = Segment(name='Comments', file_origin=self._file_origin)
  231. self._unit0 = Unit(name='UnassignedSpikes',
  232. file_origin=self._file_origin,
  233. elliptic=[], boundaries=[],
  234. timestamp=[], max_valid=[])
  235. self._blk.channel_indexes.append(self._chx)
  236. self._chx.units.append(self._unit0)
  237. self._blk.segments.append(self._seg0)
  238. # this actually reads the contents of the Block
  239. result = []
  240. while hasattr(result, '__iter__'):
  241. try:
  242. result = self._read_by_id()
  243. except:
  244. self.close()
  245. raise
  246. # since we read at a Block level we always do this
  247. self._blk.create_many_to_one_relationship()
  248. # put the Block in a local object so it can be gargabe collected
  249. blockobj = self._blk
  250. # reset the per-Block attributes
  251. self._blk = None
  252. self._chx = None
  253. self._unitdict = {}
  254. # combine the comments into one big event
  255. self._combine_segment_events(self._seg0)
  256. # result is None iff the end of the file is reached, so we can
  257. # close the file
  258. # this notification is not helpful if using the read method with
  259. # cascade==True, since the user will know it is done when the method
  260. # returns a value
  261. if result is None:
  262. self.logger.info('Last Block read. Closing file.')
  263. self.close()
  264. return blockobj
  265. def read_all_blocks(self, cascade=True, lazy=False, **kargs):
  266. """
  267. Reads all Blocks from the Spike ReCording file "filename"
  268. generated with BrainWare.
  269. The progress in the file is reset and the file closed then opened again
  270. prior to reading.
  271. The file is automatically closed after reading completes.
  272. """
  273. # there are no keyargs implemented to so far. If someone tries to pass
  274. # them they are expecting them to do something or making a mistake,
  275. # neither of which should pass silently
  276. if kargs:
  277. raise NotImplementedError('This method does not have any '
  278. 'argument implemented yet')
  279. self._lazy = lazy
  280. self._cascade = True
  281. self.close()
  282. self._opensrc()
  283. # Read each Block.
  284. # After the last Block self._isopen is set to False, so this make a
  285. # good way to determine when to stop
  286. blocks = []
  287. while self._isopen:
  288. try:
  289. blocks.append(self.read_next_block(cascade=cascade,
  290. lazy=lazy))
  291. except:
  292. self.close()
  293. raise
  294. return blocks
  295. def _convert_timestamp(self, timestamp, start_date=datetime(1899, 12, 30)):
  296. """
  297. _convert_timestamp(timestamp, start_date) - convert a timestamp in
  298. brainware src file units to a python datetime object.
  299. start_date defaults to 1899.12.30 (ISO format), which is the start date
  300. used by all BrainWare SRC data Blocks so far. If manually specified
  301. it should be a datetime object or any other object that can be added
  302. to a timedelta object.
  303. """
  304. # datetime + timedelta = datetime again.
  305. try:
  306. timestamp = convert_brainwaresrc_timestamp(timestamp, start_date)
  307. except OverflowError as err:
  308. timestamp = start_date
  309. self.logger.exception('_convert_timestamp overflow')
  310. return timestamp
  311. # -------------------------------------------------------------------------
  312. # -------------------------------------------------------------------------
  313. # All methods from here on are private. They are not intended to be used
  314. # on their own, although methods that could theoretically be called on
  315. # their own are marked as such. All private methods could be renamed,
  316. # combined, or split at any time. All private methods prefixed by
  317. # "__read" or "__skip" will alter the current place in the file.
  318. # -------------------------------------------------------------------------
  319. # -------------------------------------------------------------------------
  320. def _read_by_id(self):
  321. """
  322. Reader for generic data
  323. BrainWare SRC files are broken up into data sequences that are
  324. identified by an ID code. This method determines the ID code and calls
  325. the method to read the data sequence with that ID code. See the
  326. _ID_DICT attribute for a dictionary of code/method pairs.
  327. IMPORTANT!!!
  328. This is the only private method that can be called directly.
  329. The rest of the private methods can only safely be called by this
  330. method or by other private methods, since they depend on the
  331. current position in the file.
  332. """
  333. try:
  334. # uint16 -- the ID code of the next sequence
  335. seqid = np.asscalar(np.fromfile(self._fsrc,
  336. dtype=np.uint16, count=1))
  337. except ValueError:
  338. # return a None if at EOF. Other methods use None to recognize
  339. # an EOF
  340. return None
  341. # using the seqid, get the reader function from the reader dict
  342. readfunc = self._ID_DICT.get(seqid)
  343. if readfunc is None:
  344. if seqid <= 0:
  345. # return if end-of-sequence ID code. This has to be 0.
  346. # just calling "return" will return a None which is used as an
  347. # EOF indicator
  348. return 0
  349. else:
  350. # return a warning if the key is invalid
  351. # (this is consistent with the official behavior,
  352. # even the official reference files have invalid keys
  353. # when using the official reference reader matlab
  354. # scripts
  355. self.logger.warning('unknown ID: %s', seqid)
  356. return []
  357. try:
  358. # run the function to get the data
  359. return readfunc(self)
  360. except (EOFError, UnicodeDecodeError) as err:
  361. # return a warning if the EOF is reached in the middle of a method
  362. self.logger.exception('Premature end of file')
  363. return None
  364. # -------------------------------------------------------------------------
  365. # -------------------------------------------------------------------------
  366. # These are helper methods. They don't read from the file, so it
  367. # won't harm the reading process to call them, but they are only relevant
  368. # when used in other private methods.
  369. #
  370. # These are tuned to the particular needs of this IO class, they are
  371. # unlikely to work properly if used with another file format.
  372. # -------------------------------------------------------------------------
  373. # -------------------------------------------------------------------------
  374. def _assign_sequence(self, data_obj):
  375. """
  376. _assign_sequence(data_obj) - Try to guess where an unknown sequence
  377. should go based on its class. Warning are issued if this method is
  378. used since manual reorganization may be needed.
  379. """
  380. if isinstance(data_obj, Unit):
  381. self.logger.warning('Unknown Unit found, adding to Units list')
  382. self._chx.units.append(data_obj)
  383. if data_obj.name:
  384. self._unitdict[data_obj.name] = data_obj
  385. elif isinstance(data_obj, Segment):
  386. self.logger.warning('Unknown Segment found, '
  387. 'adding to Segments list')
  388. self._blk.segments.append(data_obj)
  389. elif isinstance(data_obj, Event):
  390. self.logger.warning('Unknown Event found, '
  391. 'adding to comment Events list')
  392. self._seg0.events.append(data_obj)
  393. elif isinstance(data_obj, SpikeTrain):
  394. self.logger.warning('Unknown SpikeTrain found, '
  395. 'adding to the UnassignedSpikes Unit')
  396. self._unit0.spiketrains.append(data_obj)
  397. elif hasattr(data_obj, '__iter__') and not isinstance(data_obj, str):
  398. for sub_obj in data_obj:
  399. self._assign_sequence(sub_obj)
  400. else:
  401. if self.logger.isEnabledFor(logging.WARNING):
  402. self.logger.warning('Unrecognized sequence of type %s found, '
  403. 'skipping', type(data_obj))
  404. _default_datetime = datetime(1, 1, 1)
  405. _default_t_start = pq.Quantity(0., units=pq.ms, dtype=np.float32)
  406. _init_default_spiketrain = SpikeTrain(times=pq.Quantity([], units=pq.ms,
  407. dtype=np.float32),
  408. t_start=pq.Quantity(0, units=pq.ms,
  409. dtype=np.float32
  410. ),
  411. t_stop=pq.Quantity(1, units=pq.ms,
  412. dtype=np.float32),
  413. waveforms=pq.Quantity([[[]]],
  414. dtype=np.int8,
  415. units=pq.mV),
  416. dtype=np.float32, copy=False,
  417. timestamp=_default_datetime,
  418. respwin=np.array([], dtype=np.int32),
  419. dama_index=-1,
  420. trig2=pq.Quantity([], units=pq.ms,
  421. dtype=np.uint8),
  422. side='')
  423. def _combine_events(self, events):
  424. """
  425. _combine_events(events) - combine a list of Events
  426. with single events into one long Event
  427. """
  428. if not events or self._lazy:
  429. event = Event(times=pq.Quantity([], units=pq.s),
  430. labels=np.array([], dtype='S'),
  431. senders=np.array([], dtype='S'),
  432. t_start=0)
  433. if self._lazy:
  434. event.lazy_shape = len(events)
  435. return event
  436. times = []
  437. labels = []
  438. senders = []
  439. for event in events:
  440. times.append(event.times.magnitude)
  441. labels.append(event.labels)
  442. senders.append(event.annotations['sender'])
  443. times = np.array(times, dtype=np.float32)
  444. t_start = times.min()
  445. times = pq.Quantity(times-t_start, units=pq.d).rescale(pq.s)
  446. labels = np.array(labels)
  447. senders = np.array(senders)
  448. event = Event(times=times, labels=labels,
  449. t_start=t_start.tolist(), senders=senders)
  450. return event
  451. def _combine_segment_events(self, segment):
  452. """
  453. _combine_segment_events(segment)
  454. Combine all Events in a segment.
  455. """
  456. event = self._combine_events(segment.events)
  457. event_t_start = event.annotations.pop('t_start')
  458. segment.rec_datetime = self._convert_timestamp(event_t_start)
  459. segment.events = [event]
  460. event.segment = segment
  461. def _combine_spiketrains(self, spiketrains):
  462. """
  463. _combine_spiketrains(spiketrains) - combine a list of SpikeTrains
  464. with single spikes into one long SpikeTrain
  465. """
  466. if not spiketrains:
  467. return self._default_spiketrain.copy()
  468. if hasattr(spiketrains[0], 'waveforms') and len(spiketrains) == 1:
  469. train = spiketrains[0]
  470. if self._lazy and not hasattr(train, 'lazy_shape'):
  471. train.lazy_shape = train.shape
  472. train = train[:0]
  473. return train
  474. if hasattr(spiketrains[0], 't_stop'):
  475. # workaround for bug in some broken files
  476. istrain = [hasattr(utrain, 'waveforms') for utrain in spiketrains]
  477. if not all(istrain):
  478. goodtrains = [itrain for i, itrain in enumerate(spiketrains)
  479. if istrain[i]]
  480. badtrains = [itrain for i, itrain in enumerate(spiketrains)
  481. if not istrain[i]]
  482. spiketrains = (goodtrains +
  483. [self._combine_spiketrains(badtrains)])
  484. spiketrains = [itrain for itrain in spiketrains if itrain.size > 0]
  485. if not spiketrains:
  486. return self._default_spiketrain.copy()
  487. # get the times of the spiketrains and combine them
  488. waveforms = [itrain.waveforms for itrain in spiketrains]
  489. rawtrains = np.array(np.concatenate(spiketrains, axis=1))
  490. times = pq.Quantity(rawtrains, units=pq.ms, copy=False)
  491. lens1 = np.array([wave.shape[1] for wave in waveforms])
  492. lens2 = np.array([wave.shape[2] for wave in waveforms])
  493. if lens1.max() != lens1.min() or lens2.max() != lens2.min():
  494. lens1 = lens1.max() - lens1
  495. lens2 = lens2.max() - lens2
  496. waveforms = [np.pad(waveform,
  497. ((0, 0), (0, len1), (0, len2)),
  498. 'constant')
  499. for waveform, len1, len2 in zip(waveforms,
  500. lens1,
  501. lens2)]
  502. waveforms = np.concatenate(waveforms, axis=0)
  503. # extract the trig2 annotation
  504. trig2 = np.array(np.concatenate([itrain.annotations['trig2'] for
  505. itrain in spiketrains], axis=1))
  506. trig2 = pq.Quantity(trig2, units=pq.ms)
  507. elif hasattr(spiketrains[0], 'units'):
  508. return self._combine_spiketrains([spiketrains])
  509. else:
  510. times, waveforms, trig2 = zip(*spiketrains)
  511. times = np.concatenate(times, axis=0)
  512. # get the times of the SpikeTrains and combine them
  513. times = pq.Quantity(times, units=pq.ms, copy=False)
  514. # get the waveforms of the SpikeTrains and combine them
  515. # these should be a 3D array with the first axis being the spike,
  516. # the second axis being the recording channel (there is only one),
  517. # and the third axis being the actual waveform
  518. waveforms = np.concatenate(waveforms, axis=0)
  519. # extract the trig2 annotation
  520. trig2 = pq.Quantity(np.hstack(trig2),
  521. units=pq.ms, copy=False)
  522. if not times.size:
  523. return self._default_spiketrain.copy()
  524. # get the maximum time
  525. t_stop = times[-1] * 2.
  526. if self._lazy:
  527. timesshape = times.shape
  528. times = pq.Quantity([], units=pq.ms, copy=False)
  529. waveforms = pq.Quantity([[[]]], units=pq.mV)
  530. else:
  531. waveforms = pq.Quantity(waveforms, units=pq.mV, copy=False)
  532. train = SpikeTrain(times=times, copy=False,
  533. t_start=self._default_t_start.copy(), t_stop=t_stop,
  534. file_origin=self._file_origin,
  535. waveforms=waveforms,
  536. timestamp=self._default_datetime,
  537. respwin=np.array([], dtype=np.int32),
  538. dama_index=-1, trig2=trig2, side='')
  539. if self._lazy:
  540. train.lazy_shape = timesshape
  541. return train
  542. # -------------------------------------------------------------------------
  543. # -------------------------------------------------------------------------
  544. # IMPORTANT!!!
  545. # These are private methods implementing the internal reading mechanism.
  546. # Due to the way BrainWare SRC files are structured, they CANNOT be used
  547. # on their own. Calling these manually will almost certainly alter your
  548. # position in the file in an unrecoverable manner, whether they throw
  549. # an exception or not.
  550. # -------------------------------------------------------------------------
  551. # -------------------------------------------------------------------------
  552. def __read_str(self, numchars=1, utf=None):
  553. """
  554. Read a string of a specific length.
  555. This is compatible with python 2 and python 3.
  556. """
  557. rawstr = np.asscalar(np.fromfile(self._fsrc,
  558. dtype='S%s' % numchars, count=1))
  559. if utf or (utf is None and PY_VER == 3):
  560. return rawstr.decode('utf-8')
  561. return rawstr
  562. def __read_annotations(self):
  563. """
  564. Read the stimulus grid properties.
  565. -------------------------------------------------------------------
  566. Returns a dictionary containing the parameter names as keys and the
  567. parameter values as values.
  568. The returned object must be added to the Block.
  569. ID: 29109
  570. """
  571. # int16 -- number of stimulus parameters
  572. numelements = np.fromfile(self._fsrc, dtype=np.int16, count=1)[0]
  573. if not numelements:
  574. return {}
  575. # [data sequence] * numelements -- parameter names
  576. names = []
  577. for i in range(numelements):
  578. # {skip} = byte (char) -- skip one byte
  579. self._fsrc.seek(1, 1)
  580. # uint8 -- length of next string
  581. numchars = np.asscalar(np.fromfile(self._fsrc,
  582. dtype=np.uint8, count=1))
  583. # if there is no name, make one up
  584. if not numchars:
  585. name = 'param%s' % i
  586. else:
  587. # char * numchars -- parameter name string
  588. name = self.__read_str(numchars)
  589. # if the name is already in there, add a unique number to it
  590. # so it isn't overwritten
  591. if name in names:
  592. name = name + str(i)
  593. names.append(name)
  594. # float32 * numelements -- an array of parameter values
  595. values = np.fromfile(self._fsrc, dtype=np.float32,
  596. count=numelements)
  597. # combine the names and values into a dict
  598. # the dict will be added to the annotations
  599. annotations = dict(zip(names, values))
  600. return annotations
  601. def __read_annotations_old(self):
  602. """
  603. Read the stimulus grid properties.
  604. Returns a dictionary containing the parameter names as keys and the
  605. parameter values as values.
  606. ------------------------------------------------
  607. The returned objects must be added to the Block.
  608. This reads an old version of the format that does not store paramater
  609. names, so placeholder names are created instead.
  610. ID: 29099
  611. """
  612. # int16 * 14 -- an array of parameter values
  613. values = np.fromfile(self._fsrc, dtype=np.int16, count=14)
  614. # create dummy names and combine them with the values in a dict
  615. # the dict will be added to the annotations
  616. params = ['param%s' % i for i in range(len(values))]
  617. annotations = dict(zip(params, values))
  618. return annotations
  619. def __read_comment(self):
  620. """
  621. Read a single comment.
  622. The comment is stored as an Event in Segment 0, which is
  623. specifically for comments.
  624. ----------------------
  625. Returns an empty list.
  626. The returned object is already added to the Block.
  627. No ID number: always called from another method
  628. """
  629. # float64 -- timestamp (number of days since dec 30th 1899)
  630. time = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]
  631. # int16 -- length of next string
  632. numchars1 = np.asscalar(np.fromfile(self._fsrc,
  633. dtype=np.int16, count=1))
  634. # char * numchars -- the one who sent the comment
  635. sender = self.__read_str(numchars1)
  636. # int16 -- length of next string
  637. numchars2 = np.asscalar(np.fromfile(self._fsrc,
  638. dtype=np.int16, count=1))
  639. # char * numchars -- comment text
  640. text = self.__read_str(numchars2, utf=False)
  641. comment = Event(times=pq.Quantity(time, units=pq.d), labels=text,
  642. sender=sender, file_origin=self._file_origin)
  643. self._seg0.events.append(comment)
  644. return []
  645. def __read_list(self):
  646. """
  647. Read a list of arbitrary data sequences
  648. It only says how many data sequences should be read. These sequences
  649. are then read by their ID number.
  650. Note that lists can be nested.
  651. If there are too many sequences (for instance if there are a large
  652. number of spikes in a Segment) then a negative number will be returned
  653. for the number of data sequences to read. In this case the method
  654. tries to guess. This also means that all future list data sequences
  655. have unreliable lengths as well.
  656. -------------------------------------------
  657. Returns a list of objects.
  658. Whether these objects need to be added to the Block depends on the
  659. object in question.
  660. There are several data sequences that have identical formats but are
  661. used in different situations. That means this data sequences has
  662. multiple ID numbers.
  663. ID: 29082
  664. ID: 29083
  665. ID: 29091
  666. ID: 29093
  667. """
  668. # int16 -- number of sequences to read
  669. numelements = np.fromfile(self._fsrc, dtype=np.int16, count=1)[0]
  670. # {skip} = bytes * 4 (int16 * 2) -- skip four bytes
  671. self._fsrc.seek(4, 1)
  672. if numelements == 0:
  673. return []
  674. if not self._damaged and numelements < 0:
  675. self._damaged = True
  676. self.logger.error('Negative sequence count %s, file damaged',
  677. numelements)
  678. if not self._damaged:
  679. # read the sequences into a list
  680. seq_list = [self._read_by_id() for _ in range(numelements)]
  681. else:
  682. # read until we get some indication we should stop
  683. seq_list = []
  684. # uint16 -- the ID of the next sequence
  685. seqidinit = np.fromfile(self._fsrc, dtype=np.uint16, count=1)[0]
  686. # {rewind} = byte * 2 (int16) -- move back 2 bytes, i.e. go back to
  687. # before the beginning of the seqid
  688. self._fsrc.seek(-2, 1)
  689. while 1:
  690. # uint16 -- the ID of the next sequence
  691. seqid = np.fromfile(self._fsrc, dtype=np.uint16, count=1)[0]
  692. # {rewind} = byte * 2 (int16) -- move back 2 bytes, i.e. go
  693. # back to before the beginning of the seqid
  694. self._fsrc.seek(-2, 1)
  695. # if we come across a new sequence, we are at the end of the
  696. # list so we should stop
  697. if seqidinit != seqid:
  698. break
  699. # otherwise read the next sequence
  700. seq_list.append(self._read_by_id())
  701. return seq_list
  702. def __read_segment(self):
  703. """
  704. Read an individual Segment.
  705. A Segment contains a dictionary of parameters, the length of the
  706. recording, a list of Units with their Spikes, and a list of Spikes
  707. not assigned to any Unit. The unassigned spikes are always stored in
  708. Unit 0, which is exclusively for storing these spikes.
  709. -------------------------------------------------
  710. Returns the Segment object created by the method.
  711. The returned object is already added to the Block.
  712. ID: 29106
  713. """
  714. # (data_obj) -- the stimulus parameters for this segment
  715. annotations = self._read_by_id()
  716. annotations['feature_type'] = -1
  717. annotations['go_by_closest_unit_center'] = False
  718. annotations['include_unit_bounds'] = False
  719. # (data_obj) -- SpikeTrain list of unassigned spikes
  720. # these go in the first Unit since it is for unassigned spikes
  721. unassigned_spikes = self._read_by_id()
  722. self._unit0.spiketrains.extend(unassigned_spikes)
  723. # read a list of units and grab the second return value, which is the
  724. # SpikeTrains from this Segment (if we use the Unit we will get all the
  725. # SpikeTrains from that Unit, resuling in duplicates if we are past
  726. # the first Segment
  727. trains = self._read_by_id()
  728. if not trains:
  729. if unassigned_spikes:
  730. # if there are no assigned spikes,
  731. # just use the unassigned spikes
  732. trains = zip(unassigned_spikes)
  733. else:
  734. # if there are no spiketrains at all,
  735. # create an empty spike train
  736. trains = [[self._default_spiketrain.copy()]]
  737. elif hasattr(trains[0], 'dtype'):
  738. # workaround for some broken files
  739. trains = [unassigned_spikes +
  740. [self._combine_spiketrains([trains])]]
  741. else:
  742. # get the second element from each returned value,
  743. # which is the actual SpikeTrains
  744. trains = [unassigned_spikes] + [train[1] for train in trains]
  745. # re-organize by sweeps
  746. trains = zip(*trains)
  747. # int32 -- SpikeTrain length in ms
  748. spiketrainlen = pq.Quantity(np.fromfile(self._fsrc, dtype=np.int32,
  749. count=1)[0], units=pq.ms, copy=False)
  750. segments = []
  751. for train in trains:
  752. # create the Segment and add everything to it
  753. segment = Segment(file_origin=self._file_origin,
  754. **annotations)
  755. segment.spiketrains = train
  756. self._blk.segments.append(segment)
  757. segments.append(segment)
  758. for itrain in train:
  759. # use the SpikeTrain length to figure out the stop time
  760. # t_start is always 0 so we can ignore it
  761. itrain.t_stop = spiketrainlen
  762. return segments
  763. def __read_segment_list(self):
  764. """
  765. Read a list of Segments with comments.
  766. Since comments can occur at any point, whether a recording is happening
  767. or not, it is impossible to reliably assign them to a specific Segment.
  768. For this reason they are always assigned to Segment 0, which is
  769. exclusively used to store comments.
  770. --------------------------------------------------------
  771. Returns a list of the Segments created with this method.
  772. The returned objects are already added to the Block.
  773. ID: 29112
  774. """
  775. # uint8 -- number of electrode channels in the Segment
  776. numchannels = np.fromfile(self._fsrc, dtype=np.uint8, count=1)[0]
  777. # [list of sequences] -- individual Segments
  778. segments = self.__read_list()
  779. while not hasattr(segments[0], 'spiketrains'):
  780. segments = list(chain(*segments))
  781. # char -- "side of brain" info
  782. side = self.__read_str(1)
  783. # int16 -- number of comments
  784. numelements = np.fromfile(self._fsrc, dtype=np.int16, count=1)[0]
  785. # comment_obj * numelements -- comments about the Segments
  786. # we don't know which Segment specifically, though
  787. for _ in range(numelements):
  788. self.__read_comment()
  789. # create a channel_index for the numchannels
  790. self._chx.index = np.arange(numchannels)
  791. self._chx.channel_names = np.array(['Chan{}'.format(i) for i in range(numchannels)], dtype='S')
  792. # store what side of the head we are dealing with
  793. for segment in segments:
  794. for spiketrain in segment.spiketrains:
  795. spiketrain.annotations['side'] = side
  796. return segments
  797. def __read_segment_list_v8(self):
  798. """
  799. Read a list of Segments with comments.
  800. This is version 8 of the data sequence.
  801. This is the same as __read_segment_list_var, but can also contain
  802. one or more arbitrary sequences. The class makes an attempt to assign
  803. the sequences when possible, and warns the user when this happens
  804. (see the _assign_sequence method)
  805. --------------------------------------------------------
  806. Returns a list of the Segments created with this method.
  807. The returned objects are already added to the Block.
  808. ID: 29117
  809. """
  810. # segment_collection_var -- this is based off a segment_collection_var
  811. segments = self.__read_segment_list_var()
  812. # uint16 -- the ID of the next sequence
  813. seqid = np.fromfile(self._fsrc, dtype=np.uint16, count=1)[0]
  814. # {rewind} = byte * 2 (int16) -- move back 2 bytes, i.e. go back to
  815. # before the beginning of the seqid
  816. self._fsrc.seek(-2, 1)
  817. if seqid in self._ID_DICT:
  818. # if it is a valid seqid, read it and try to figure out where
  819. # to put it
  820. self._assign_sequence(self._read_by_id())
  821. else:
  822. # otherwise it is a Unit list
  823. self.__read_unit_list()
  824. # {skip} = byte * 2 (int16) -- skip 2 bytes
  825. self._fsrc.seek(2, 1)
  826. return segments
  827. def __read_segment_list_v9(self):
  828. """
  829. Read a list of Segments with comments.
  830. This is version 9 of the data sequence.
  831. This is the same as __read_segment_list_v8, but contains some
  832. additional annotations. These annotations are added to the Segment.
  833. --------------------------------------------------------
  834. Returns a list of the Segments created with this method.
  835. The returned objects are already added to the Block.
  836. ID: 29120
  837. """
  838. # segment_collection_v8 -- this is based off a segment_collection_v8
  839. segments = self.__read_segment_list_v8()
  840. # uint8
  841. feature_type = np.fromfile(self._fsrc, dtype=np.uint8,
  842. count=1)[0]
  843. # uint8
  844. go_by_closest_unit_center = np.fromfile(self._fsrc, dtype=np.bool8,
  845. count=1)[0]
  846. # uint8
  847. include_unit_bounds = np.fromfile(self._fsrc, dtype=np.bool8,
  848. count=1)[0]
  849. # create a dictionary of the annotations
  850. annotations = {'feature_type': feature_type,
  851. 'go_by_closest_unit_center': go_by_closest_unit_center,
  852. 'include_unit_bounds': include_unit_bounds}
  853. # add the annotations to each Segment
  854. for segment in segments:
  855. segment.annotations.update(annotations)
  856. return segments
  857. def __read_segment_list_var(self):
  858. """
  859. Read a list of Segments with comments.
  860. This is the same as __read_segment_list, but contains information
  861. regarding the sampling period. This information is added to the
  862. SpikeTrains in the Segments.
  863. --------------------------------------------------------
  864. Returns a list of the Segments created with this method.
  865. The returned objects are already added to the Block.
  866. ID: 29114
  867. """
  868. # float32 -- DA conversion clock period in microsec
  869. sampling_period = pq.Quantity(np.fromfile(self._fsrc,
  870. dtype=np.float32, count=1),
  871. units=pq.us, copy=False)[0]
  872. # segment_collection -- this is based off a segment_collection
  873. segments = self.__read_segment_list()
  874. # add the sampling period to each SpikeTrain
  875. for segment in segments:
  876. for spiketrain in segment.spiketrains:
  877. spiketrain.sampling_period = sampling_period
  878. return segments
  879. def __read_spike_fixed(self, numpts=40):
  880. """
  881. Read a spike with a fixed waveform length (40 time bins)
  882. -------------------------------------------
  883. Returns the time, waveform and trig2 value.
  884. The returned objects must be converted to a SpikeTrain then
  885. added to the Block.
  886. ID: 29079
  887. """
  888. # float32 -- spike time stamp in ms since start of SpikeTrain
  889. time = np.fromfile(self._fsrc, dtype=np.float32, count=1)
  890. # int8 * 40 -- spike shape -- use numpts for spike_var
  891. waveform = np.fromfile(self._fsrc, dtype=np.int8,
  892. count=numpts).reshape(1, 1, numpts)
  893. # uint8 -- point of return to noise
  894. trig2 = np.fromfile(self._fsrc, dtype=np.uint8, count=1)
  895. return time, waveform, trig2
  896. def __read_spike_fixed_old(self):
  897. """
  898. Read a spike with a fixed waveform length (40 time bins)
  899. This is an old version of the format. The time is stored as ints
  900. representing 1/25 ms time steps. It has no trigger information.
  901. -------------------------------------------
  902. Returns the time, waveform and trig2 value.
  903. The returned objects must be converted to a SpikeTrain then
  904. added to the Block.
  905. ID: 29081
  906. """
  907. # int32 -- spike time stamp in ms since start of SpikeTrain
  908. time = np.fromfile(self._fsrc, dtype=np.int32, count=1) / 25.
  909. time = time.astype(np.float32)
  910. # int8 * 40 -- spike shape
  911. # This needs to be a 3D array, one for each channel. BrainWare
  912. # only ever has a single channel per file.
  913. waveform = np.fromfile(self._fsrc, dtype=np.int8,
  914. count=40).reshape(1, 1, 40)
  915. # create a dummy trig2 value
  916. trig2 = np.array([-1], dtype=np.uint8)
  917. return time, waveform, trig2
  918. def __read_spike_var(self):
  919. """
  920. Read a spike with a variable waveform length
  921. -------------------------------------------
  922. Returns the time, waveform and trig2 value.
  923. The returned objects must be converted to a SpikeTrain then
  924. added to the Block.
  925. ID: 29115
  926. """
  927. # uint8 -- number of points in spike shape
  928. numpts = np.fromfile(self._fsrc, dtype=np.uint8, count=1)[0]
  929. # spike_fixed is the same as spike_var if you don't read the numpts
  930. # byte and set numpts = 40
  931. return self.__read_spike_fixed(numpts)
  932. def __read_spiketrain_indexed(self):
  933. """
  934. Read a SpikeTrain
  935. This is the same as __read_spiketrain_timestamped except it also
  936. contains the index of the Segment in the dam file.
  937. The index is stored as an annotation in the SpikeTrain.
  938. -------------------------------------------------
  939. Returns a SpikeTrain object with multiple spikes.
  940. The returned object must be added to the Block.
  941. ID: 29121
  942. """
  943. # int32 -- index of the analogsignalarray in corresponding .dam file
  944. dama_index = np.fromfile(self._fsrc, dtype=np.int32,
  945. count=1)[0]
  946. # spiketrain_timestamped -- this is based off a spiketrain_timestamped
  947. spiketrain = self.__read_spiketrain_timestamped()
  948. # add the property to the dict
  949. spiketrain.annotations['dama_index'] = dama_index
  950. return spiketrain
  951. def __read_spiketrain_timestamped(self):
  952. """
  953. Read a SpikeTrain
  954. This SpikeTrain contains a time stamp for when it was recorded
  955. The timestamp is stored as an annotation in the SpikeTrain.
  956. -------------------------------------------------
  957. Returns a SpikeTrain object with multiple spikes.
  958. The returned object must be added to the Block.
  959. ID: 29110
  960. """
  961. # float64 -- timeStamp (number of days since dec 30th 1899)
  962. timestamp = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]
  963. # convert to datetime object
  964. timestamp = self._convert_timestamp(timestamp)
  965. # seq_list -- spike list
  966. # combine the spikes into a single SpikeTrain
  967. spiketrain = self._combine_spiketrains(self.__read_list())
  968. # add the timestamp
  969. spiketrain.annotations['timestamp'] = timestamp
  970. return spiketrain
  971. def __read_unit(self):
  972. """
  973. Read all SpikeTrains from a single Segment and Unit
  974. This is the same as __read_unit_unsorted except it also contains
  975. information on the spike sorting boundaries.
  976. ------------------------------------------------------------------
  977. Returns a single Unit and a list of SpikeTrains from that Unit and
  978. current Segment, in that order. The SpikeTrains must be returned since
  979. it is not possible to determine from the Unit which SpikeTrains are
  980. from the current Segment.
  981. The returned objects are already added to the Block. The SpikeTrains
  982. must be added to the current Segment.
  983. ID: 29116
  984. """
  985. # same as unsorted Unit
  986. unit, trains = self.__read_unit_unsorted()
  987. # float32 * 18 -- Unit boundaries (IEEE 32-bit floats)
  988. unit.annotations['boundaries'] = [np.fromfile(self._fsrc,
  989. dtype=np.float32,
  990. count=18)]
  991. # uint8 * 9 -- boolean values indicating elliptic feature boundary
  992. # dimensions
  993. unit.annotations['elliptic'] = [np.fromfile(self._fsrc,
  994. dtype=np.uint8,
  995. count=9)]
  996. return unit, trains
  997. def __read_unit_list(self):
  998. """
  999. A list of a list of Units
  1000. -----------------------------------------------
  1001. Returns a list of Units modified in the method.
  1002. The returned objects are already added to the Block.
  1003. No ID number: only called by other methods
  1004. """
  1005. # this is used to figure out which Units to return
  1006. maxunit = 1
  1007. # int16 -- number of time slices
  1008. numelements = np.fromfile(self._fsrc, dtype=np.int16, count=1)[0]
  1009. # {sequence} * numelements1 -- the number of lists of Units to read
  1010. self._chx.annotations['max_valid'] = []
  1011. for i in range(numelements):
  1012. # {skip} = byte * 2 (int16) -- skip 2 bytes
  1013. self._fsrc.seek(2, 1)
  1014. # double
  1015. max_valid = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]
  1016. # int16 - the number of Units to read
  1017. numunits = np.fromfile(self._fsrc, dtype=np.int16, count=1)[0]
  1018. # update tha maximum Unit so far
  1019. maxunit = max(maxunit, numunits + 1)
  1020. # if there aren't enough Units, create them
  1021. # remember we need to skip the UnassignedSpikes Unit
  1022. if numunits > len(self._chx.units) + 1:
  1023. for ind1 in range(len(self._chx.units), numunits + 1):
  1024. unit = Unit(name='unit%s' % ind1,
  1025. file_origin=self._file_origin,
  1026. elliptic=[], boundaries=[],
  1027. timestamp=[], max_valid=[])
  1028. self._chx.units.append(unit)
  1029. # {Block} * numelements -- Units
  1030. for ind1 in range(numunits):
  1031. # get the Unit with the given index
  1032. # remember we need to skip the UnassignedSpikes Unit
  1033. unit = self._chx.units[ind1 + 1]
  1034. # {skip} = byte * 2 (int16) -- skip 2 bytes
  1035. self._fsrc.seek(2, 1)
  1036. # int16 -- a multiplier for the elliptic and boundaries
  1037. # properties
  1038. numelements3 = np.fromfile(self._fsrc, dtype=np.int16,
  1039. count=1)[0]
  1040. # uint8 * 10 * numelements3 -- boolean values indicating
  1041. # elliptic feature boundary dimensions
  1042. elliptic = np.fromfile(self._fsrc, dtype=np.uint8,
  1043. count=10 * numelements3)
  1044. # float32 * 20 * numelements3 -- feature boundaries
  1045. boundaries = np.fromfile(self._fsrc, dtype=np.float32,
  1046. count=20 * numelements3)
  1047. unit.annotations['elliptic'].append(elliptic)
  1048. unit.annotations['boundaries'].append(boundaries)
  1049. unit.annotations['max_valid'].append(max_valid)
  1050. return self._chx.units[1:maxunit]
  1051. def __read_unit_list_timestamped(self):
  1052. """
  1053. A list of a list of Units.
  1054. This is the same as __read_unit_list, except that it also has a
  1055. timestamp. This is added ad an annotation to all Units.
  1056. -----------------------------------------------
  1057. Returns a list of Units modified in the method.
  1058. The returned objects are already added to the Block.
  1059. ID: 29119
  1060. """
  1061. # double -- time zero (number of days since dec 30th 1899)
  1062. timestamp = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]
  1063. # convert to to days since UNIX epoc time:
  1064. timestamp = self._convert_timestamp(timestamp)
  1065. # sorter -- this is based off a sorter
  1066. units = self.__read_unit_list()
  1067. for unit in units:
  1068. unit.annotations['timestamp'].append(timestamp)
  1069. return units
  1070. def __read_unit_old(self):
  1071. """
  1072. Read all SpikeTrains from a single Segment and Unit
  1073. This is the same as __read_unit_unsorted except it also contains
  1074. information on the spike sorting boundaries.
  1075. This is an old version of the format that used 48-bit floating-point
  1076. numbers for the boundaries. These cannot easily be read and so are
  1077. skipped.
  1078. ------------------------------------------------------------------
  1079. Returns a single Unit and a list of SpikeTrains from that Unit and
  1080. current Segment, in that order. The SpikeTrains must be returned since
  1081. it is not possible to determine from the Unit which SpikeTrains are
  1082. from the current Segment.
  1083. The returned objects are already added to the Block. The SpikeTrains
  1084. must be added to the current Segment.
  1085. ID: 29107
  1086. """
  1087. # same as Unit
  1088. unit, trains = self.__read_unit_unsorted()
  1089. # bytes * 108 (float48 * 18) -- Unit boundaries (48-bit floating
  1090. # point numbers are not supported so we skip them)
  1091. self._fsrc.seek(108, 1)
  1092. # uint8 * 9 -- boolean values indicating elliptic feature boundary
  1093. # dimensions
  1094. unit.annotations['elliptic'] = np.fromfile(self._fsrc, dtype=np.uint8,
  1095. count=9).tolist()
  1096. return unit, trains
  1097. def __read_unit_unsorted(self):
  1098. """
  1099. Read all SpikeTrains from a single Segment and Unit
  1100. This does not contain Unit boundaries.
  1101. ------------------------------------------------------------------
  1102. Returns a single Unit and a list of SpikeTrains from that Unit and
  1103. current Segment, in that order. The SpikeTrains must be returned since
  1104. it is not possible to determine from the Unit which SpikeTrains are
  1105. from the current Segment.
  1106. The returned objects are already added to the Block. The SpikeTrains
  1107. must be added to the current Segment.
  1108. ID: 29084
  1109. """
  1110. # {skip} = bytes * 2 (uint16) -- skip two bytes
  1111. self._fsrc.seek(2, 1)
  1112. # uint16 -- number of characters in next string
  1113. numchars = np.asscalar(np.fromfile(self._fsrc,
  1114. dtype=np.uint16, count=1))
  1115. # char * numchars -- ID string of Unit
  1116. name = self.__read_str(numchars)
  1117. # int32 -- SpikeTrain length in ms
  1118. # int32 * 4 -- response and spon period boundaries
  1119. parts = np.fromfile(self._fsrc, dtype=np.int32, count=5)
  1120. t_stop = pq.Quantity(parts[0].astype('float32'),
  1121. units=pq.ms, copy=False)
  1122. respwin = parts[1:]
  1123. # (data_obj) -- list of SpikeTrains
  1124. spikeslists = self._read_by_id()
  1125. # use the Unit if it already exists, otherwise create it
  1126. if name in self._unitdict:
  1127. unit = self._unitdict[name]
  1128. else:
  1129. unit = Unit(name=name, file_origin=self._file_origin,
  1130. elliptic=[], boundaries=[], timestamp=[], max_valid=[])
  1131. self._chx.units.append(unit)
  1132. self._unitdict[name] = unit
  1133. # convert the individual spikes to SpikeTrains and add them to the Unit
  1134. trains = [self._combine_spiketrains(spikes) for spikes in spikeslists]
  1135. unit.spiketrains.extend(trains)
  1136. for train in trains:
  1137. train.t_stop = t_stop.copy()
  1138. train.annotations['respwin'] = respwin.copy()
  1139. return unit, trains
  1140. def __skip_information(self):
  1141. """
  1142. Read an information sequence.
  1143. This is data sequence is skipped both here and in the Matlab reference
  1144. implementation.
  1145. ----------------------
  1146. Returns an empty list
  1147. Nothing is created so nothing is added to the Block.
  1148. ID: 29113
  1149. """
  1150. # {skip} char * 34 -- display information
  1151. self._fsrc.seek(34, 1)
  1152. return []
  1153. def __skip_information_old(self):
  1154. """
  1155. Read an information sequence
  1156. This is data sequence is skipped both here and in the Matlab reference
  1157. implementation
  1158. This is an old version of the format
  1159. ----------------------
  1160. Returns an empty list.
  1161. Nothing is created so nothing is added to the Block.
  1162. ID: 29100
  1163. """
  1164. # {skip} char * 4 -- display information
  1165. self._fsrc.seek(4, 1)
  1166. return []
  1167. # This dictionary maps the numeric data sequence ID codes to the data
  1168. # sequence reading functions.
  1169. #
  1170. # Since functions are first-class objects in Python, the functions returned
  1171. # from this dictionary are directly callable.
  1172. #
  1173. # If new data sequence ID codes are added in the future please add the code
  1174. # here in numeric order and the method above in alphabetical order
  1175. #
  1176. # The naming of any private method may change at any time
  1177. _ID_DICT = {29079: __read_spike_fixed,
  1178. 29081: __read_spike_fixed_old,
  1179. 29082: __read_list,
  1180. 29083: __read_list,
  1181. 29084: __read_unit_unsorted,
  1182. 29091: __read_list,
  1183. 29093: __read_list,
  1184. 29099: __read_annotations_old,
  1185. 29100: __skip_information_old,
  1186. 29106: __read_segment,
  1187. 29107: __read_unit_old,
  1188. 29109: __read_annotations,
  1189. 29110: __read_spiketrain_timestamped,
  1190. 29112: __read_segment_list,
  1191. 29113: __skip_information,
  1192. 29114: __read_segment_list_var,
  1193. 29115: __read_spike_var,
  1194. 29116: __read_unit,
  1195. 29117: __read_segment_list_v8,
  1196. 29119: __read_unit_list_timestamped,
  1197. 29120: __read_segment_list_v9,
  1198. 29121: __read_spiketrain_indexed
  1199. }
  1200. def convert_brainwaresrc_timestamp(timestamp,
  1201. start_date=datetime(1899, 12, 30)):
  1202. """
  1203. convert_brainwaresrc_timestamp(timestamp, start_date) - convert a timestamp
  1204. in brainware src file units to a python datetime object.
  1205. start_date defaults to 1899.12.30 (ISO format), which is the start date
  1206. used by all BrainWare SRC data Blocks so far. If manually specified
  1207. it should be a datetime object or any other object that can be added
  1208. to a timedelta object.
  1209. """
  1210. # datetime + timedelta = datetime again.
  1211. return start_date + timedelta(days=timestamp)
  1212. if __name__ == '__main__':
  1213. # run this when calling the file directly as a benchmark
  1214. from neo.test.iotest.test_brainwaresrcio import FILES_TO_TEST
  1215. from neo.test.iotest.common_io_test import url_for_tests
  1216. from neo.test.iotest.tools import (create_local_temp_dir,
  1217. download_test_file,
  1218. get_test_file_full_path,
  1219. make_all_directories)
  1220. shortname = BrainwareSrcIO.__name__.lower().strip('io')
  1221. local_test_dir = create_local_temp_dir(shortname)
  1222. url = url_for_tests+shortname
  1223. FILES_TO_TEST.remove('long_170s_1rep_1clust_ch2.src')
  1224. make_all_directories(FILES_TO_TEST, local_test_dir)
  1225. download_test_file(FILES_TO_TEST, local_test_dir, url)
  1226. for path in get_test_file_full_path(ioclass=BrainwareSrcIO,
  1227. filename=FILES_TO_TEST,
  1228. directory=local_test_dir):
  1229. ioobj = BrainwareSrcIO(path)
  1230. ioobj.read_all_blocks(lazy=False)
  1231. ioobj.read_all_blocks(lazy=True)