Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

1
0

brainwaref32io.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. # -*- coding: utf-8 -*-
  2. '''
  3. Class for reading from Brainware F32 files
  4. F32 files are simplified binary files for holding spike data. Unlike SRC
  5. files, F32 files carry little metadata. This also means, however, that the
  6. file format does not change, unlike SRC files whose format changes periodically
  7. (although ideally SRC files are backwards-compatible).
  8. Each F32 file only holds a single Block.
  9. The only metadata stored in the file is the length of a single repetition
  10. of the stimulus and the values of the stimulus parameters (but not the names
  11. of the parameters).
  12. Brainware was developed by Dr. Jan Schnupp and is availabe from
  13. Tucker Davis Technologies, Inc.
  14. http://www.tdt.com/downloads.htm
  15. Neither Dr. Jan Schnupp nor Tucker Davis Technologies, Inc. had any part in the
  16. development of this code
  17. The code is implemented with the permission of Dr. Jan Schnupp
  18. Author: Todd Jennings
  19. '''
  20. # needed for python 3 compatibility
  21. from __future__ import absolute_import, division, print_function
  22. # import needed core python modules
  23. from os import path
  24. # numpy and quantities are already required by neo
  25. import numpy as np
  26. import quantities as pq
  27. # needed core neo modules
  28. from neo.core import Block, ChannelIndex, Segment, SpikeTrain, Unit
  29. # need to subclass BaseIO
  30. from neo.io.baseio import BaseIO
  31. class BrainwareF32IO(BaseIO):
  32. '''
  33. Class for reading Brainware Spike ReCord files with the extension '.f32'
  34. The read_block method returns the first Block of the file. It will
  35. automatically close the file after reading.
  36. The read method is the same as read_block.
  37. The read_all_blocks method automatically reads all Blocks. It will
  38. automatically close the file after reading.
  39. The read_next_block method will return one Block each time it is called.
  40. It will automatically close the file and reset to the first Block
  41. after reading the last block.
  42. Call the close method to close the file and reset this method
  43. back to the first Block.
  44. The isopen property tells whether the file is currently open and
  45. reading or closed.
  46. Note 1:
  47. There is always only one ChannelIndex. BrainWare stores the
  48. equivalent of ChannelIndexes in separate files.
  49. Usage:
  50. >>> from neo.io.brainwaref32io import BrainwareF32IO
  51. >>> f32file = BrainwareF32IO(filename='multi_500ms_mulitrep_ch1.f32')
  52. >>> blk1 = f32file.read()
  53. >>> blk2 = f32file.read_block()
  54. >>> print blk1.segments
  55. >>> print blk1.segments[0].spiketrains
  56. >>> print blk1.units
  57. >>> print blk1.units[0].name
  58. >>> print blk2
  59. >>> print blk2[0].segments
  60. '''
  61. is_readable = True # This class can only read data
  62. is_writable = False # write is not supported
  63. # This class is able to directly or indirectly handle the following objects
  64. # You can notice that this greatly simplifies the full Neo object hierarchy
  65. supported_objects = [Block, ChannelIndex,
  66. Segment, SpikeTrain, Unit]
  67. readable_objects = [Block]
  68. writeable_objects = []
  69. has_header = False
  70. is_streameable = False
  71. # This is for GUI stuff: a definition for parameters when reading.
  72. # This dict should be keyed by object (`Block`). Each entry is a list
  73. # of tuple. The first entry in each tuple is the parameter name. The
  74. # second entry is a dict with keys 'value' (for default value),
  75. # and 'label' (for a descriptive name).
  76. # Note that if the highest-level object requires parameters,
  77. # common_io_test will be skipped.
  78. read_params = {Block: []}
  79. # does not support write so no GUI stuff
  80. write_params = None
  81. name = 'Brainware F32 File'
  82. extensions = ['f32']
  83. mode = 'file'
  84. def __init__(self, filename=None):
  85. '''
  86. Arguments:
  87. filename: the filename
  88. '''
  89. BaseIO.__init__(self)
  90. self._path = filename
  91. self._filename = path.basename(filename)
  92. self._fsrc = None
  93. self.__lazy = False
  94. self._blk = None
  95. self.__unit = None
  96. self.__t_stop = None
  97. self.__params = None
  98. self.__seg = None
  99. self.__spiketimes = None
  100. def read(self, lazy=False, cascade=True, **kargs):
  101. '''
  102. Reads simple spike data file "fname" generated with BrainWare
  103. '''
  104. return self.read_block(lazy=lazy, cascade=cascade)
  105. def read_block(self, lazy=False, cascade=True, **kargs):
  106. '''
  107. Reads a block from the simple spike data file "fname" generated
  108. with BrainWare
  109. '''
  110. # there are no keyargs implemented to so far. If someone tries to pass
  111. # them they are expecting them to do something or making a mistake,
  112. # neither of which should pass silently
  113. if kargs:
  114. raise NotImplementedError('This method does not have any '
  115. 'argument implemented yet')
  116. self._fsrc = None
  117. self.__lazy = lazy
  118. self._blk = Block(file_origin=self._filename)
  119. block = self._blk
  120. # if we aren't doing cascade, don't load anything
  121. if not cascade:
  122. return block
  123. # create the objects to store other objects
  124. chx = ChannelIndex(file_origin=self._filename,
  125. index=np.array([], dtype=np.int))
  126. self.__unit = Unit(file_origin=self._filename)
  127. # load objects into their containers
  128. block.channel_indexes.append(chx)
  129. chx.units.append(self.__unit)
  130. # initialize values
  131. self.__t_stop = None
  132. self.__params = None
  133. self.__seg = None
  134. self.__spiketimes = None
  135. # open the file
  136. with open(self._path, 'rb') as self._fsrc:
  137. res = True
  138. # while the file is not done keep reading segments
  139. while res:
  140. res = self.__read_id()
  141. block.create_many_to_one_relationship()
  142. # cleanup attributes
  143. self._fsrc = None
  144. self.__lazy = False
  145. self._blk = None
  146. self.__t_stop = None
  147. self.__params = None
  148. self.__seg = None
  149. self.__spiketimes = None
  150. return block
  151. # -------------------------------------------------------------------------
  152. # -------------------------------------------------------------------------
  153. # IMPORTANT!!!
  154. # These are private methods implementing the internal reading mechanism.
  155. # Due to the way BrainWare DAM files are structured, they CANNOT be used
  156. # on their own. Calling these manually will almost certainly alter your
  157. # position in the file in an unrecoverable manner, whether they throw
  158. # an exception or not.
  159. # -------------------------------------------------------------------------
  160. # -------------------------------------------------------------------------
  161. def __read_id(self):
  162. '''
  163. Read the next ID number and do the appropriate task with it.
  164. Returns nothing.
  165. '''
  166. try:
  167. # float32 -- ID of the first data sequence
  168. objid = np.fromfile(self._fsrc, dtype=np.float32, count=1)[0]
  169. except IndexError:
  170. # if we have a previous segment, save it
  171. self.__save_segment()
  172. # if there are no more Segments, return
  173. return False
  174. if objid == -2:
  175. self.__read_condition()
  176. elif objid == -1:
  177. self.__read_segment()
  178. else:
  179. self.__spiketimes.append(objid)
  180. return True
  181. def __read_condition(self):
  182. '''
  183. Read the parameter values for a single stimulus condition.
  184. Returns nothing.
  185. '''
  186. # float32 -- SpikeTrain length in ms
  187. self.__t_stop = np.fromfile(self._fsrc, dtype=np.float32, count=1)[0]
  188. # float32 -- number of stimulus parameters
  189. numelements = int(np.fromfile(self._fsrc, dtype=np.float32,
  190. count=1)[0])
  191. # [float32] * numelements -- stimulus parameter values
  192. paramvals = np.fromfile(self._fsrc, dtype=np.float32,
  193. count=numelements).tolist()
  194. # organize the parameers into a dictionary with arbitrary names
  195. paramnames = ['Param%s' % i for i in range(len(paramvals))]
  196. self.__params = dict(zip(paramnames, paramvals))
  197. def __read_segment(self):
  198. '''
  199. Setup the next Segment.
  200. Returns nothing.
  201. '''
  202. # if we have a previous segment, save it
  203. self.__save_segment()
  204. # create the segment
  205. self.__seg = Segment(file_origin=self._filename,
  206. **self.__params)
  207. # create an empy array to save the spike times
  208. # this needs to be converted to a SpikeTrain before it can be used
  209. self.__spiketimes = []
  210. def __save_segment(self):
  211. '''
  212. Write the segment to the Block if it exists
  213. '''
  214. # if this is the beginning of the first condition, then we don't want
  215. # to save, so exit
  216. # but set __seg from None to False so we know next time to create a
  217. # segment even if there are no spike in the condition
  218. if self.__seg is None:
  219. self.__seg = False
  220. return
  221. if not self.__seg:
  222. # create dummy values if there are no SpikeTrains in this condition
  223. self.__seg = Segment(file_origin=self._filename,
  224. **self.__params)
  225. self.__spiketimes = []
  226. if self.__lazy:
  227. train = SpikeTrain(pq.Quantity([], dtype=np.float32,
  228. units=pq.ms),
  229. t_start=0*pq.ms, t_stop=self.__t_stop * pq.ms,
  230. file_origin=self._filename)
  231. train.lazy_shape = len(self.__spiketimes)
  232. else:
  233. times = pq.Quantity(self.__spiketimes, dtype=np.float32,
  234. units=pq.ms)
  235. train = SpikeTrain(times,
  236. t_start=0*pq.ms, t_stop=self.__t_stop * pq.ms,
  237. file_origin=self._filename)
  238. self.__seg.spiketrains = [train]
  239. self.__unit.spiketrains.append(train)
  240. self._blk.segments.append(self.__seg)
  241. # set an empty segment
  242. # from now on, we need to set __seg to False rather than None so
  243. # that if there is a condition with no SpikeTrains we know
  244. # to create an empty Segment
  245. self.__seg = False