Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

1
0

neuroexplorerio.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. # -*- coding: utf-8 -*-
  2. """
  3. Class for reading data from NeuroExplorer (.nex)
  4. Documentation for dev :
  5. http://www.neuroexplorer.com/downloads/HowToReadAndWriteNexAndNex5FilesInMatlab.zip
  6. Depend on:
  7. Supported : Read
  8. Author: sgarcia,luc estebanez, mark hollenbeck
  9. """
  10. import os
  11. import struct
  12. import numpy as np
  13. import quantities as pq
  14. from neo.io.baseio import BaseIO
  15. from neo.core import Segment, AnalogSignal, SpikeTrain, Epoch, Event
  16. class NeuroExplorerIO(BaseIO):
  17. """
  18. Class for reading nex files.
  19. Usage:
  20. >>> from neo import io
  21. >>> r = io.NeuroExplorerIO(filename='File_neuroexplorer_1.nex')
  22. >>> seg = r.read_segment(lazy=False, cascade=True)
  23. >>> print seg.analogsignals # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
  24. [<AnalogSignal(array([ 39.0625 , 0. , 0. , ...,
  25. >>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
  26. [<SpikeTrain(array([ 2.29499992e-02, 6.79249987e-02, ...
  27. >>> print seg.events # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
  28. [<Event: @21.1967754364 s, @21.2993755341 s, @21.350725174 s, ...
  29. >>> print seg.epochs # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
  30. [<neo.core.epoch.Epoch object at 0x10561ba90>,
  31. <neo.core.epoch.Epoch object at 0x10561bad0>]
  32. """
  33. is_readable = True
  34. is_writable = False
  35. supported_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
  36. readable_objects = [Segment]
  37. writeable_objects = []
  38. has_header = False
  39. is_streameable = False
  40. # This is for GUI stuff: a definition for parameters when reading.
  41. read_params = {Segment: []}
  42. write_params = None
  43. name = 'NeuroExplorer'
  44. extensions = ['nex']
  45. mode = 'file'
  46. def __init__(self, filename=None):
  47. """
  48. This class read a nex file.
  49. Arguments:
  50. filename: the filename to read
  51. """
  52. BaseIO.__init__(self)
  53. self.filename = filename
  54. def read_segment(self, lazy=False, cascade=True):
  55. fid = open(self.filename, 'rb')
  56. global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
  57. # ~ print globalHeader
  58. #~ print 'version' , globalHeader['version']
  59. seg = Segment()
  60. seg.file_origin = os.path.basename(self.filename)
  61. seg.annotate(neuroexplorer_version=global_header['version'])
  62. seg.annotate(comment=global_header['comment'])
  63. if not cascade:
  64. return seg
  65. offset = 544
  66. for i in range(global_header['nvar']):
  67. entity_header = HeaderReader(fid, EntityHeader).read_f(
  68. offset=offset + i * 208)
  69. entity_header['name'] = entity_header['name'].replace('\x00', '')
  70. #print 'i',i, entityHeader['type']
  71. if entity_header['type'] == 0:
  72. # neuron
  73. if lazy:
  74. spike_times = [] * pq.s
  75. else:
  76. spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
  77. shape=(entity_header['n']),
  78. offset=entity_header['offset'])
  79. spike_times = spike_times.astype('f8') / global_header[
  80. 'freq'] * pq.s
  81. sptr = SpikeTrain(
  82. times=spike_times,
  83. t_start=global_header['tbeg'] /
  84. global_header['freq'] * pq.s,
  85. t_stop=global_header['tend'] /
  86. global_header['freq'] * pq.s,
  87. name=entity_header['name'])
  88. if lazy:
  89. sptr.lazy_shape = entity_header['n']
  90. sptr.annotate(channel_index=entity_header['WireNumber'])
  91. seg.spiketrains.append(sptr)
  92. if entity_header['type'] == 1:
  93. # event
  94. if lazy:
  95. event_times = [] * pq.s
  96. else:
  97. event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
  98. shape=(entity_header['n']),
  99. offset=entity_header['offset'])
  100. event_times = event_times.astype('f8') / global_header[
  101. 'freq'] * pq.s
  102. labels = np.array([''] * event_times.size, dtype='S')
  103. evar = Event(times=event_times, labels=labels,
  104. channel_name=entity_header['name'])
  105. if lazy:
  106. evar.lazy_shape = entity_header['n']
  107. seg.events.append(evar)
  108. if entity_header['type'] == 2:
  109. # interval
  110. if lazy:
  111. start_times = [] * pq.s
  112. stop_times = [] * pq.s
  113. else:
  114. start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
  115. shape=(entity_header['n']),
  116. offset=entity_header['offset'])
  117. start_times = start_times.astype('f8') / global_header[
  118. 'freq'] * pq.s
  119. stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
  120. shape=(entity_header['n']),
  121. offset=entity_header['offset'] +
  122. entity_header['n'] * 4)
  123. stop_times = stop_times.astype('f') / global_header[
  124. 'freq'] * pq.s
  125. epar = Epoch(times=start_times,
  126. durations=stop_times - start_times,
  127. labels=np.array([''] * start_times.size,
  128. dtype='S'),
  129. channel_name=entity_header['name'])
  130. if lazy:
  131. epar.lazy_shape = entity_header['n']
  132. seg.epochs.append(epar)
  133. if entity_header['type'] == 3:
  134. # spiketrain and wavefoms
  135. if lazy:
  136. spike_times = [] * pq.s
  137. waveforms = None
  138. else:
  139. spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
  140. shape=(entity_header['n']),
  141. offset=entity_header['offset'])
  142. spike_times = spike_times.astype('f8') / global_header[
  143. 'freq'] * pq.s
  144. waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
  145. shape=(entity_header['n'], 1,
  146. entity_header['NPointsWave']),
  147. offset=entity_header['offset'] +
  148. entity_header['n'] * 4)
  149. waveforms = (waveforms.astype('f') *
  150. entity_header['ADtoMV'] +
  151. entity_header['MVOffset']) * pq.mV
  152. t_stop = global_header['tend'] / global_header['freq'] * pq.s
  153. if spike_times.size > 0:
  154. t_stop = max(t_stop, max(spike_times))
  155. sptr = SpikeTrain(
  156. times=spike_times,
  157. t_start=global_header['tbeg'] /
  158. global_header['freq'] * pq.s,
  159. #~ t_stop = max(globalHeader['tend']/
  160. #~ globalHeader['freq']*pq.s,max(spike_times)),
  161. t_stop=t_stop, name=entity_header['name'],
  162. waveforms=waveforms,
  163. sampling_rate=entity_header['WFrequency'] * pq.Hz,
  164. left_sweep=0 * pq.ms)
  165. if lazy:
  166. sptr.lazy_shape = entity_header['n']
  167. sptr.annotate(channel_index=entity_header['WireNumber'])
  168. seg.spiketrains.append(sptr)
  169. if entity_header['type'] == 4:
  170. # popvectors
  171. pass
  172. if entity_header['type'] == 5:
  173. # analog
  174. timestamps = np.memmap(self.filename, np.dtype('i4'), 'r',
  175. shape=(entity_header['n']),
  176. offset=entity_header['offset'])
  177. timestamps = timestamps.astype('f8') / global_header['freq']
  178. fragment_starts_offset = entity_header['offset'] + entity_header['n']*4
  179. fragment_starts = np.memmap(self.filename, np.dtype('i4'), 'r',
  180. shape=(entity_header['n']),
  181. offset=fragment_starts_offset)
  182. fragment_starts = fragment_starts.astype('f8') / global_header[
  183. 'freq']
  184. t_start = timestamps[0] - fragment_starts[0] / float(
  185. entity_header['WFrequency'])
  186. del timestamps, fragment_starts
  187. if lazy:
  188. signal = [] * pq.mV
  189. else:
  190. signal_offset = fragment_starts_offset + entity_header['n']*4
  191. signal = np.memmap(self.filename, np.dtype('i2'), 'r',
  192. shape=(entity_header['NPointsWave']),
  193. offset=signal_offset)
  194. signal = signal.astype('f')
  195. signal *= entity_header['ADtoMV']
  196. signal += entity_header['MVOffset']
  197. signal = signal * pq.mV
  198. ana_sig = AnalogSignal(
  199. signal=signal, t_start=t_start * pq.s,
  200. sampling_rate=entity_header['WFrequency'] * pq.Hz,
  201. name=entity_header['name'],
  202. channel_index=entity_header['WireNumber'])
  203. if lazy:
  204. ana_sig.lazy_shape = entity_header['NPointsWave']
  205. seg.analogsignals.append(ana_sig)
  206. if entity_header['type'] == 6:
  207. # markers : TO TEST
  208. if lazy:
  209. times = [] * pq.s
  210. labels = np.array([], dtype='S')
  211. markertype = None
  212. else:
  213. times = np.memmap(self.filename, np.dtype('i4'), 'r',
  214. shape=(entity_header['n']),
  215. offset=entity_header['offset'])
  216. times = times.astype('f8') / global_header['freq'] * pq.s
  217. fid.seek(entity_header['offset'] + entity_header['n'] * 4)
  218. markertype = fid.read(64).replace('\x00', '')
  219. labels = np.memmap(
  220. self.filename, np.dtype(
  221. 'S' + str(entity_header['MarkerLength'])),
  222. 'r', shape=(entity_header['n']),
  223. offset=entity_header['offset'] +
  224. entity_header['n'] * 4 + 64)
  225. ea = Event(times=times,
  226. labels=labels.view(np.ndarray),
  227. name=entity_header['name'],
  228. channel_index=entity_header['WireNumber'],
  229. marker_type=markertype)
  230. if lazy:
  231. ea.lazy_shape = entity_header['n']
  232. seg.events.append(ea)
  233. seg.create_many_to_one_relationship()
  234. return seg
  235. GlobalHeader = [
  236. ('signature', '4s'),
  237. ('version', 'i'),
  238. ('comment', '256s'),
  239. ('freq', 'd'),
  240. ('tbeg', 'i'),
  241. ('tend', 'i'),
  242. ('nvar', 'i'),
  243. ]
  244. EntityHeader = [
  245. ('type', 'i'),
  246. ('varVersion', 'i'),
  247. ('name', '64s'),
  248. ('offset', 'i'),
  249. ('n', 'i'),
  250. ('WireNumber', 'i'),
  251. ('UnitNumber', 'i'),
  252. ('Gain', 'i'),
  253. ('Filter', 'i'),
  254. ('XPos', 'd'),
  255. ('YPos', 'd'),
  256. ('WFrequency', 'd'),
  257. ('ADtoMV', 'd'),
  258. ('NPointsWave', 'i'),
  259. ('NMarkers', 'i'),
  260. ('MarkerLength', 'i'),
  261. ('MVOffset', 'd'),
  262. ('dummy', '60s'),
  263. ]
  264. MarkerHeader = [
  265. ('type', 'i'),
  266. ('varVersion', 'i'),
  267. ('name', '64s'),
  268. ('offset', 'i'),
  269. ('n', 'i'),
  270. ('WireNumber', 'i'),
  271. ('UnitNumber', 'i'),
  272. ('Gain', 'i'),
  273. ('Filter', 'i'),
  274. ]
  275. class HeaderReader():
  276. def __init__(self, fid, description):
  277. self.fid = fid
  278. self.description = description
  279. def read_f(self, offset=0):
  280. self.fid.seek(offset)
  281. d = {}
  282. for key, fmt in self.description:
  283. val = struct.unpack(fmt, self.fid.read(struct.calcsize(fmt)))
  284. if len(val) == 1:
  285. val = val[0]
  286. else:
  287. val = list(val)
  288. d[key] = val
  289. return d