brainwaref32io.py 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. '''
  2. Class for reading from Brainware F32 files
  3. F32 files are simplified binary files for holding spike data. Unlike SRC
  4. files, F32 files carry little metadata. This also means, however, that the
  5. file format does not change, unlike SRC files whose format changes periodically
  6. (although ideally SRC files are backwards-compatible).
  7. Each F32 file only holds a single Block.
  8. The only metadata stored in the file is the length of a single repetition
  9. of the stimulus and the values of the stimulus parameters (but not the names
  10. of the parameters).
  11. Brainware was developed by Dr. Jan Schnupp and is availabe from
  12. Tucker Davis Technologies, Inc.
  13. http://www.tdt.com/downloads.htm
  14. Neither Dr. Jan Schnupp nor Tucker Davis Technologies, Inc. had any part in the
  15. development of this code
  16. The code is implemented with the permission of Dr. Jan Schnupp
  17. Author: Todd Jennings
  18. '''
  19. # import needed core python modules
  20. from os import path
  21. # numpy and quantities are already required by neo
  22. import numpy as np
  23. import quantities as pq
  24. # needed core neo modules
  25. from neo.core import Block, Group, Segment, SpikeTrain, Unit
  26. # need to subclass BaseIO
  27. from neo.io.baseio import BaseIO
  28. class BrainwareF32IO(BaseIO):
  29. '''
  30. Class for reading Brainware Spike ReCord files with the extension '.f32'
  31. The read_block method returns the first Block of the file. It will
  32. automatically close the file after reading.
  33. The read method is the same as read_block.
  34. The read_all_blocks method automatically reads all Blocks. It will
  35. automatically close the file after reading.
  36. The read_next_block method will return one Block each time it is called.
  37. It will automatically close the file and reset to the first Block
  38. after reading the last block.
  39. Call the close method to close the file and reset this method
  40. back to the first Block.
  41. The isopen property tells whether the file is currently open and
  42. reading or closed.
  43. Note 1:
  44. There is always only one Group.
  45. Usage:
  46. >>> from neo.io.brainwaref32io import BrainwareF32IO
  47. >>> f32file = BrainwareF32IO(filename='multi_500ms_mulitrep_ch1.f32')
  48. >>> blk1 = f32file.read()
  49. >>> blk2 = f32file.read_block()
  50. >>> print blk1.segments
  51. >>> print blk1.segments[0].spiketrains
  52. >>> print blk1.units
  53. >>> print blk1.units[0].name
  54. >>> print blk2
  55. >>> print blk2[0].segments
  56. '''
  57. is_readable = True # This class can only read data
  58. is_writable = False # write is not supported
  59. # This class is able to directly or indirectly handle the following objects
  60. # You can notice that this greatly simplifies the full Neo object hierarchy
  61. supported_objects = [Block, Group,
  62. Segment, SpikeTrain, Unit]
  63. readable_objects = [Block]
  64. writeable_objects = []
  65. has_header = False
  66. is_streameable = False
  67. # This is for GUI stuff: a definition for parameters when reading.
  68. # This dict should be keyed by object (`Block`). Each entry is a list
  69. # of tuple. The first entry in each tuple is the parameter name. The
  70. # second entry is a dict with keys 'value' (for default value),
  71. # and 'label' (for a descriptive name).
  72. # Note that if the highest-level object requires parameters,
  73. # common_io_test will be skipped.
  74. read_params = {Block: []}
  75. # does not support write so no GUI stuff
  76. write_params = None
  77. name = 'Brainware F32 File'
  78. extensions = ['f32']
  79. mode = 'file'
  80. def __init__(self, filename=None):
  81. '''
  82. Arguments:
  83. filename: the filename
  84. '''
  85. BaseIO.__init__(self)
  86. self._path = filename
  87. self._filename = path.basename(filename)
  88. self._fsrc = None
  89. self._blk = None
  90. self.__unit_group = None
  91. self.__t_stop = None
  92. self.__params = None
  93. self.__seg = None
  94. self.__spiketimes = None
  95. def read_block(self, lazy=False, **kargs):
  96. '''
  97. Reads a block from the simple spike data file "fname" generated
  98. with BrainWare
  99. '''
  100. assert not lazy, 'Do not support lazy'
  101. # there are no keyargs implemented to so far. If someone tries to pass
  102. # them they are expecting them to do something or making a mistake,
  103. # neither of which should pass silently
  104. if kargs:
  105. raise NotImplementedError('This method does not have any '
  106. 'argument implemented yet')
  107. self._fsrc = None
  108. self._blk = Block(file_origin=self._filename)
  109. block = self._blk
  110. # create the objects to store other objects
  111. self.__unit_group = Group(file_origin=self._filename)
  112. block.groups.append(self.__unit_group)
  113. # initialize values
  114. self.__t_stop = None
  115. self.__params = None
  116. self.__seg = None
  117. self.__spiketimes = None
  118. # open the file
  119. with open(self._path, 'rb') as self._fsrc:
  120. res = True
  121. # while the file is not done keep reading segments
  122. while res:
  123. res = self.__read_id()
  124. block.create_many_to_one_relationship()
  125. # cleanup attributes
  126. self._fsrc = None
  127. self._blk = None
  128. self.__t_stop = None
  129. self.__params = None
  130. self.__seg = None
  131. self.__spiketimes = None
  132. return block
  133. # -------------------------------------------------------------------------
  134. # -------------------------------------------------------------------------
  135. # IMPORTANT!!!
  136. # These are private methods implementing the internal reading mechanism.
  137. # Due to the way BrainWare DAM files are structured, they CANNOT be used
  138. # on their own. Calling these manually will almost certainly alter your
  139. # position in the file in an unrecoverable manner, whether they throw
  140. # an exception or not.
  141. # -------------------------------------------------------------------------
  142. # -------------------------------------------------------------------------
  143. def __read_id(self):
  144. '''
  145. Read the next ID number and do the appropriate task with it.
  146. Returns nothing.
  147. '''
  148. try:
  149. # float32 -- ID of the first data sequence
  150. objid = np.fromfile(self._fsrc, dtype=np.float32, count=1)[0]
  151. except IndexError:
  152. # if we have a previous segment, save it
  153. self.__save_segment()
  154. # if there are no more Segments, return
  155. return False
  156. if objid == -2:
  157. self.__read_condition()
  158. elif objid == -1:
  159. self.__read_segment()
  160. else:
  161. self.__spiketimes.append(objid)
  162. return True
  163. def __read_condition(self):
  164. '''
  165. Read the parameter values for a single stimulus condition.
  166. Returns nothing.
  167. '''
  168. # float32 -- SpikeTrain length in ms
  169. self.__t_stop = np.fromfile(self._fsrc, dtype=np.float32, count=1)[0]
  170. # float32 -- number of stimulus parameters
  171. numelements = int(np.fromfile(self._fsrc, dtype=np.float32,
  172. count=1)[0])
  173. # [float32] * numelements -- stimulus parameter values
  174. paramvals = np.fromfile(self._fsrc, dtype=np.float32,
  175. count=numelements).tolist()
  176. # organize the parameers into a dictionary with arbitrary names
  177. paramnames = ['Param%s' % i for i in range(len(paramvals))]
  178. self.__params = dict(zip(paramnames, paramvals))
  179. def __read_segment(self):
  180. '''
  181. Setup the next Segment.
  182. Returns nothing.
  183. '''
  184. # if we have a previous segment, save it
  185. self.__save_segment()
  186. # create the segment
  187. self.__seg = Segment(file_origin=self._filename,
  188. **self.__params)
  189. # create an empy array to save the spike times
  190. # this needs to be converted to a SpikeTrain before it can be used
  191. self.__spiketimes = []
  192. def __save_segment(self):
  193. '''
  194. Write the segment to the Block if it exists
  195. '''
  196. # if this is the beginning of the first condition, then we don't want
  197. # to save, so exit
  198. # but set __seg from None to False so we know next time to create a
  199. # segment even if there are no spike in the condition
  200. if self.__seg is None:
  201. self.__seg = False
  202. return
  203. if not self.__seg:
  204. # create dummy values if there are no SpikeTrains in this condition
  205. self.__seg = Segment(file_origin=self._filename,
  206. **self.__params)
  207. self.__spiketimes = []
  208. times = pq.Quantity(self.__spiketimes, dtype=np.float32,
  209. units=pq.ms)
  210. train = SpikeTrain(times,
  211. t_start=0 * pq.ms, t_stop=self.__t_stop * pq.ms,
  212. file_origin=self._filename)
  213. self.__seg.spiketrains = [train]
  214. self.__unit_group.spiketrains.append(train)
  215. self._blk.segments.append(self.__seg)
  216. # set an empty segment
  217. # from now on, we need to set __seg to False rather than None so
  218. # that if there is a condition with no SpikeTrains we know
  219. # to create an empty Segment
  220. self.__seg = False