Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

1
0

brainwaref32io.py 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. # -*- coding: utf-8 -*-
  2. '''
  3. Class for reading from Brainware F32 files
  4. F32 files are simplified binary files for holding spike data. Unlike SRC
  5. files, F32 files carry little metadata. This also means, however, that the
  6. file format does not change, unlike SRC files whose format changes periodically
  7. (although ideally SRC files are backwards-compatible).
  8. Each F32 file only holds a single Block.
  9. The only metadata stored in the file is the length of a single repetition
  10. of the stimulus and the values of the stimulus parameters (but not the names
  11. of the parameters).
  12. Brainware was developed by Dr. Jan Schnupp and is availabe from
  13. Tucker Davis Technologies, Inc.
  14. http://www.tdt.com/downloads.htm
  15. Neither Dr. Jan Schnupp nor Tucker Davis Technologies, Inc. had any part in the
  16. development of this code
  17. The code is implemented with the permission of Dr. Jan Schnupp
  18. Author: Todd Jennings
  19. '''
  20. # needed for python 3 compatibility
  21. from __future__ import absolute_import, division, print_function
  22. # import needed core python modules
  23. from os import path
  24. # numpy and quantities are already required by neo
  25. import numpy as np
  26. import quantities as pq
  27. # needed core neo modules
  28. from neo.core import Block, ChannelIndex, Segment, SpikeTrain, Unit
  29. # need to subclass BaseIO
  30. from neo.io.baseio import BaseIO
  31. class BrainwareF32IO(BaseIO):
  32. '''
  33. Class for reading Brainware Spike ReCord files with the extension '.f32'
  34. The read_block method returns the first Block of the file. It will
  35. automatically close the file after reading.
  36. The read method is the same as read_block.
  37. The read_all_blocks method automatically reads all Blocks. It will
  38. automatically close the file after reading.
  39. The read_next_block method will return one Block each time it is called.
  40. It will automatically close the file and reset to the first Block
  41. after reading the last block.
  42. Call the close method to close the file and reset this method
  43. back to the first Block.
  44. The isopen property tells whether the file is currently open and
  45. reading or closed.
  46. Note 1:
  47. There is always only one ChannelIndex. BrainWare stores the
  48. equivalent of ChannelIndexes in separate files.
  49. Usage:
  50. >>> from neo.io.brainwaref32io import BrainwareF32IO
  51. >>> f32file = BrainwareF32IO(filename='multi_500ms_mulitrep_ch1.f32')
  52. >>> blk1 = f32file.read()
  53. >>> blk2 = f32file.read_block()
  54. >>> print blk1.segments
  55. >>> print blk1.segments[0].spiketrains
  56. >>> print blk1.units
  57. >>> print blk1.units[0].name
  58. >>> print blk2
  59. >>> print blk2[0].segments
  60. '''
  61. is_readable = True # This class can only read data
  62. is_writable = False # write is not supported
  63. # This class is able to directly or indirectly handle the following objects
  64. # You can notice that this greatly simplifies the full Neo object hierarchy
  65. supported_objects = [Block, ChannelIndex,
  66. Segment, SpikeTrain, Unit]
  67. readable_objects = [Block]
  68. writeable_objects = []
  69. has_header = False
  70. is_streameable = False
  71. # This is for GUI stuff: a definition for parameters when reading.
  72. # This dict should be keyed by object (`Block`). Each entry is a list
  73. # of tuple. The first entry in each tuple is the parameter name. The
  74. # second entry is a dict with keys 'value' (for default value),
  75. # and 'label' (for a descriptive name).
  76. # Note that if the highest-level object requires parameters,
  77. # common_io_test will be skipped.
  78. read_params = {Block: []}
  79. # does not support write so no GUI stuff
  80. write_params = None
  81. name = 'Brainware F32 File'
  82. extensions = ['f32']
  83. mode = 'file'
  84. def __init__(self, filename=None):
  85. '''
  86. Arguments:
  87. filename: the filename
  88. '''
  89. BaseIO.__init__(self)
  90. self._path = filename
  91. self._filename = path.basename(filename)
  92. self._fsrc = None
  93. self._blk = None
  94. self.__unit = None
  95. self.__t_stop = None
  96. self.__params = None
  97. self.__seg = None
  98. self.__spiketimes = None
  99. def read(self, lazy=False, **kargs):
  100. '''
  101. Reads simple spike data file "fname" generated with BrainWare
  102. '''
  103. return self.read_block(lazy=lazy, )
  104. def read_block(self, lazy=False, **kargs):
  105. '''
  106. Reads a block from the simple spike data file "fname" generated
  107. with BrainWare
  108. '''
  109. assert not lazy, 'Do not support lazy'
  110. # there are no keyargs implemented to so far. If someone tries to pass
  111. # them they are expecting them to do something or making a mistake,
  112. # neither of which should pass silently
  113. if kargs:
  114. raise NotImplementedError('This method does not have any '
  115. 'argument implemented yet')
  116. self._fsrc = None
  117. self._blk = Block(file_origin=self._filename)
  118. block = self._blk
  119. # create the objects to store other objects
  120. chx = ChannelIndex(file_origin=self._filename,
  121. index=np.array([], dtype=np.int))
  122. self.__unit = Unit(file_origin=self._filename)
  123. # load objects into their containers
  124. block.channel_indexes.append(chx)
  125. chx.units.append(self.__unit)
  126. # initialize values
  127. self.__t_stop = None
  128. self.__params = None
  129. self.__seg = None
  130. self.__spiketimes = None
  131. # open the file
  132. with open(self._path, 'rb') as self._fsrc:
  133. res = True
  134. # while the file is not done keep reading segments
  135. while res:
  136. res = self.__read_id()
  137. block.create_many_to_one_relationship()
  138. # cleanup attributes
  139. self._fsrc = None
  140. self._blk = None
  141. self.__t_stop = None
  142. self.__params = None
  143. self.__seg = None
  144. self.__spiketimes = None
  145. return block
  146. # -------------------------------------------------------------------------
  147. # -------------------------------------------------------------------------
  148. # IMPORTANT!!!
  149. # These are private methods implementing the internal reading mechanism.
  150. # Due to the way BrainWare DAM files are structured, they CANNOT be used
  151. # on their own. Calling these manually will almost certainly alter your
  152. # position in the file in an unrecoverable manner, whether they throw
  153. # an exception or not.
  154. # -------------------------------------------------------------------------
  155. # -------------------------------------------------------------------------
  156. def __read_id(self):
  157. '''
  158. Read the next ID number and do the appropriate task with it.
  159. Returns nothing.
  160. '''
  161. try:
  162. # float32 -- ID of the first data sequence
  163. objid = np.fromfile(self._fsrc, dtype=np.float32, count=1)[0]
  164. except IndexError:
  165. # if we have a previous segment, save it
  166. self.__save_segment()
  167. # if there are no more Segments, return
  168. return False
  169. if objid == -2:
  170. self.__read_condition()
  171. elif objid == -1:
  172. self.__read_segment()
  173. else:
  174. self.__spiketimes.append(objid)
  175. return True
  176. def __read_condition(self):
  177. '''
  178. Read the parameter values for a single stimulus condition.
  179. Returns nothing.
  180. '''
  181. # float32 -- SpikeTrain length in ms
  182. self.__t_stop = np.fromfile(self._fsrc, dtype=np.float32, count=1)[0]
  183. # float32 -- number of stimulus parameters
  184. numelements = int(np.fromfile(self._fsrc, dtype=np.float32,
  185. count=1)[0])
  186. # [float32] * numelements -- stimulus parameter values
  187. paramvals = np.fromfile(self._fsrc, dtype=np.float32,
  188. count=numelements).tolist()
  189. # organize the parameers into a dictionary with arbitrary names
  190. paramnames = ['Param%s' % i for i in range(len(paramvals))]
  191. self.__params = dict(zip(paramnames, paramvals))
  192. def __read_segment(self):
  193. '''
  194. Setup the next Segment.
  195. Returns nothing.
  196. '''
  197. # if we have a previous segment, save it
  198. self.__save_segment()
  199. # create the segment
  200. self.__seg = Segment(file_origin=self._filename,
  201. **self.__params)
  202. # create an empy array to save the spike times
  203. # this needs to be converted to a SpikeTrain before it can be used
  204. self.__spiketimes = []
  205. def __save_segment(self):
  206. '''
  207. Write the segment to the Block if it exists
  208. '''
  209. # if this is the beginning of the first condition, then we don't want
  210. # to save, so exit
  211. # but set __seg from None to False so we know next time to create a
  212. # segment even if there are no spike in the condition
  213. if self.__seg is None:
  214. self.__seg = False
  215. return
  216. if not self.__seg:
  217. # create dummy values if there are no SpikeTrains in this condition
  218. self.__seg = Segment(file_origin=self._filename,
  219. **self.__params)
  220. self.__spiketimes = []
  221. times = pq.Quantity(self.__spiketimes, dtype=np.float32,
  222. units=pq.ms)
  223. train = SpikeTrain(times,
  224. t_start=0 * pq.ms, t_stop=self.__t_stop * pq.ms,
  225. file_origin=self._filename)
  226. self.__seg.spiketrains = [train]
  227. self.__unit.spiketrains.append(train)
  228. self._blk.segments.append(self.__seg)
  229. # set an empty segment
  230. # from now on, we need to set __seg to False rather than None so
  231. # that if there is a condition with no SpikeTrains we know
  232. # to create an empty Segment
  233. self.__seg = False