plexonio.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. # -*- coding: utf-8 -*-
  2. """
  3. Class for reading data from Plexon acquisition system (.plx)
  4. Compatible with versions 100 to 106.
  5. Other versions have not been tested.
  6. This IO is developed thanks to the header file downloadable from:
  7. http://www.plexon.com/downloads.html
  8. Supported: Read
  9. Author: sgarcia
  10. """
  11. from __future__ import unicode_literals, print_function, division
  12. import datetime
  13. import struct
  14. import os
  15. import numpy as np
  16. import quantities as pq
  17. from neo.io.baseio import BaseIO
  18. from neo.core import Segment, AnalogSignal, SpikeTrain, Event
  19. from neo.io.tools import iteritems
  20. class PlexonIO(BaseIO):
  21. """
  22. Class for reading data from Plexon acquisition systems (.plx)
  23. Compatible with versions 100 to 106.
  24. Other versions have not been tested.
  25. Usage:
  26. >>> from neo import io
  27. >>> r = io.PlexonIO(filename='File_plexon_1.plx')
  28. >>> seg = r.read_segment(lazy=False, cascade=True)
  29. >>> print seg.analogsignals
  30. []
  31. >>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
  32. [<SpikeTrain(array([ 2.75000000e-02, 5.68250000e-02, ...,
  33. ...
  34. >>> print seg.events
  35. []
  36. """
  37. is_readable = True
  38. is_writable = False
  39. supported_objects = [Segment, AnalogSignal, SpikeTrain, Event]
  40. readable_objects = [Segment]
  41. writeable_objects = []
  42. has_header = False
  43. is_streameable = False
  44. # This is for GUI stuff: a definition for parameters when reading.
  45. read_params = {
  46. Segment: [
  47. ('load_spike_waveform', {'value': False}),
  48. ]
  49. }
  50. write_params = None
  51. name = 'Plexon'
  52. extensions = ['plx']
  53. mode = 'file'
  54. def __init__(self, filename=None):
  55. """
  56. Arguments:
  57. filename : the filename
  58. """
  59. BaseIO.__init__(self, filename)
  60. def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
  61. """
  62. Read in a segment.
  63. Arguments:
  64. load_spike_waveform : load or not waveform of spikes (default True)
  65. """
  66. fid = open(self.filename, 'rb')
  67. global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
  68. # metadatas
  69. seg = Segment()
  70. seg.rec_datetime = datetime.datetime(global_header['Year'],
  71. global_header['Month'],
  72. global_header['Day'],
  73. global_header['Hour'],
  74. global_header['Minute'],
  75. global_header['Second'])
  76. seg.file_origin = os.path.basename(self.filename)
  77. seg.annotate(plexon_version=global_header['Version'])
  78. for key, val in global_header.items():
  79. seg.annotate(**{key: val})
  80. if not cascade:
  81. return seg
  82. ## Step 1 : read headers
  83. # dsp channels header = spikes and waveforms
  84. dspChannelHeaders = {}
  85. maxunit = 0
  86. maxchan = 0
  87. for _ in range(global_header['NumDSPChannels']):
  88. # channel is 1 based
  89. channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
  90. channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
  91. channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
  92. dspChannelHeaders[channelHeader['Channel']] = channelHeader
  93. maxunit = max(channelHeader['NUnits'], maxunit)
  94. maxchan = max(channelHeader['Channel'], maxchan)
  95. # event channel header
  96. eventHeaders = {}
  97. for _ in range(global_header['NumEventChannels']):
  98. eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
  99. eventHeaders[eventHeader['Channel']] = eventHeader
  100. # slow channel header = signal
  101. slowChannelHeaders = {}
  102. for _ in range(global_header['NumSlowChannels']):
  103. slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(
  104. offset=None)
  105. slowChannelHeaders[slowChannelHeader['Channel']] = \
  106. slowChannelHeader
  107. ## Step 2 : a first loop for counting size
  108. # signal
  109. nb_samples = np.zeros(len(slowChannelHeaders))
  110. sample_positions = np.zeros(len(slowChannelHeaders))
  111. t_starts = np.zeros(len(slowChannelHeaders), dtype='f')
  112. #spiketimes and waveform
  113. nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype='i')
  114. wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype='i')
  115. # eventarrays
  116. nb_events = {}
  117. #maxstrsizeperchannel = { }
  118. for chan, h in iteritems(eventHeaders):
  119. nb_events[chan] = 0
  120. #maxstrsizeperchannel[chan] = 0
  121. start = fid.tell()
  122. while fid.tell() != -1:
  123. # read block header
  124. dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(
  125. offset=None)
  126. if dataBlockHeader is None:
  127. break
  128. chan = dataBlockHeader['Channel']
  129. unit = dataBlockHeader['Unit']
  130. n1, n2 = dataBlockHeader['NumberOfWaveforms'], dataBlockHeader[
  131. 'NumberOfWordsInWaveform']
  132. time = (dataBlockHeader['UpperByteOf5ByteTimestamp'] * 2. ** 32 +
  133. dataBlockHeader['TimeStamp'])
  134. if dataBlockHeader['Type'] == 1:
  135. nb_spikes[chan, unit] += 1
  136. wf_sizes[chan, unit, :] = [n1, n2]
  137. fid.seek(n1 * n2 * 2, 1)
  138. elif dataBlockHeader['Type'] == 4:
  139. #event
  140. nb_events[chan] += 1
  141. elif dataBlockHeader['Type'] == 5:
  142. #continuous signal
  143. fid.seek(n2 * 2, 1)
  144. if n2 > 0:
  145. nb_samples[chan] += n2
  146. if nb_samples[chan] == 0:
  147. t_starts[chan] = time
  148. ## Step 3: allocating memory and 2 loop for reading if not lazy
  149. if not lazy:
  150. # allocating mem for signal
  151. sigarrays = {}
  152. for chan, h in iteritems(slowChannelHeaders):
  153. sigarrays[chan] = np.zeros(nb_samples[chan])
  154. # allocating mem for SpikeTrain
  155. stimearrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
  156. swfarrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
  157. for (chan, unit), _ in np.ndenumerate(nb_spikes):
  158. stimearrays[chan, unit] = np.zeros(nb_spikes[chan, unit],
  159. dtype='f')
  160. if load_spike_waveform:
  161. n1, n2 = wf_sizes[chan, unit, :]
  162. swfarrays[chan, unit] = np.zeros(
  163. (nb_spikes[chan, unit], n1, n2), dtype='f4')
  164. pos_spikes = np.zeros(nb_spikes.shape, dtype='i')
  165. # allocating mem for event
  166. eventpositions = {}
  167. evarrays = {}
  168. for chan, nb in iteritems(nb_events):
  169. evarrays[chan] = {
  170. 'times': np.zeros(nb, dtype='f'),
  171. 'labels': np.zeros(nb, dtype='S4')
  172. }
  173. eventpositions[chan]=0
  174. fid.seek(start)
  175. while fid.tell() != -1:
  176. dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(
  177. offset=None)
  178. if dataBlockHeader is None:
  179. break
  180. chan = dataBlockHeader['Channel']
  181. n1, n2 = dataBlockHeader['NumberOfWaveforms'], dataBlockHeader[
  182. 'NumberOfWordsInWaveform']
  183. time = dataBlockHeader['UpperByteOf5ByteTimestamp'] * \
  184. 2. ** 32 + dataBlockHeader['TimeStamp']
  185. time /= global_header['ADFrequency']
  186. if n2 < 0:
  187. break
  188. if dataBlockHeader['Type'] == 1:
  189. #spike
  190. unit = dataBlockHeader['Unit']
  191. pos = pos_spikes[chan, unit]
  192. stimearrays[chan, unit][pos] = time
  193. if load_spike_waveform and n1 * n2 != 0:
  194. swfarrays[chan, unit][pos, :, :] = np.fromstring(
  195. fid.read(n1 * n2 * 2), dtype='i2'
  196. ).reshape(n1, n2).astype('f4')
  197. else:
  198. fid.seek(n1 * n2 * 2, 1)
  199. pos_spikes[chan, unit] += 1
  200. elif dataBlockHeader['Type'] == 4:
  201. # event
  202. pos = eventpositions[chan]
  203. evarrays[chan]['times'][pos] = time
  204. evarrays[chan]['labels'][pos] = dataBlockHeader['Unit']
  205. eventpositions[chan]+= 1
  206. elif dataBlockHeader['Type'] == 5:
  207. #signal
  208. data = np.fromstring(
  209. fid.read(n2 * 2), dtype='i2').astype('f4')
  210. sigarrays[chan][sample_positions[chan]:
  211. sample_positions[chan]+data.size] = data
  212. sample_positions[chan] += data.size
  213. ## Step 4: create neo object
  214. for chan, h in iteritems(eventHeaders):
  215. if lazy:
  216. times = []
  217. labels = None
  218. else:
  219. times = evarrays[chan]['times']
  220. labels = evarrays[chan]['labels']
  221. ea = Event(
  222. times*pq.s,
  223. labels=labels,
  224. channel_name=eventHeaders[chan]['Name'],
  225. channel_index=chan
  226. )
  227. if lazy:
  228. ea.lazy_shape = nb_events[chan]
  229. seg.events.append(ea)
  230. for chan, h in iteritems(slowChannelHeaders):
  231. if lazy:
  232. signal = []
  233. else:
  234. if global_header['Version'] == 100 or global_header[
  235. 'Version'] == 101:
  236. gain = 5000. / (
  237. 2048 * slowChannelHeaders[chan]['Gain'] * 1000.)
  238. elif global_header['Version'] == 102:
  239. gain = 5000. / (2048 * slowChannelHeaders[chan]['Gain'] *
  240. slowChannelHeaders[chan]['PreampGain'])
  241. elif global_header['Version'] >= 103:
  242. gain = global_header['SlowMaxMagnitudeMV'] / (
  243. .5 * (2 ** global_header['BitsPerSpikeSample']) *
  244. slowChannelHeaders[chan]['Gain'] *
  245. slowChannelHeaders[chan]['PreampGain'])
  246. signal = sigarrays[chan] * gain
  247. anasig = AnalogSignal(
  248. signal * pq.V,
  249. sampling_rate=float(
  250. slowChannelHeaders[chan]['ADFreq']) * pq.Hz,
  251. t_start=t_starts[chan] * pq.s,
  252. channel_index=slowChannelHeaders[chan]['Channel'],
  253. channel_name=slowChannelHeaders[chan]['Name'])
  254. if lazy:
  255. anasig.lazy_shape = nb_samples[chan]
  256. seg.analogsignals.append(anasig)
  257. for (chan, unit), value in np.ndenumerate(nb_spikes):
  258. if nb_spikes[chan, unit] == 0:
  259. continue
  260. if lazy:
  261. times = []
  262. waveforms = None
  263. t_stop = 0
  264. else:
  265. times = stimearrays[chan, unit]
  266. t_stop = times.max()
  267. if load_spike_waveform:
  268. if global_header['Version'] < 103:
  269. gain = 3000. / (
  270. 2048 * dspChannelHeaders[chan]['Gain'] * 1000.)
  271. elif global_header['Version'] >= 103 and global_header[
  272. 'Version'] < 105:
  273. gain = global_header['SpikeMaxMagnitudeMV'] / (
  274. .5 * 2. ** (global_header['BitsPerSpikeSample']) *
  275. 1000.)
  276. elif global_header['Version'] > 105:
  277. gain = global_header['SpikeMaxMagnitudeMV'] / (
  278. .5 * 2. ** (global_header['BitsPerSpikeSample']) *
  279. global_header['SpikePreAmpGain'])
  280. waveforms = swfarrays[chan, unit] * gain * pq.V
  281. else:
  282. waveforms = None
  283. sptr = SpikeTrain(
  284. times,
  285. units='s',
  286. t_stop=t_stop*pq.s,
  287. waveforms=waveforms,
  288. sampling_rate=float(
  289. global_header['WaveformFreq']) * pq.Hz,
  290. )
  291. sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
  292. sptr.annotate(channel_index = chan)
  293. for key, val in dspChannelHeaders[chan].items():
  294. sptr.annotate(**{key: val})
  295. if lazy:
  296. sptr.lazy_shape = nb_spikes[chan, unit]
  297. seg.spiketrains.append(sptr)
  298. seg.create_many_to_one_relationship()
  299. return seg
  300. GlobalHeader = [
  301. ('MagicNumber', 'I'),
  302. ('Version', 'i'),
  303. ('Comment', '128s'),
  304. ('ADFrequency', 'i'),
  305. ('NumDSPChannels', 'i'),
  306. ('NumEventChannels', 'i'),
  307. ('NumSlowChannels', 'i'),
  308. ('NumPointsWave', 'i'),
  309. ('NumPointsPreThr', 'i'),
  310. ('Year', 'i'),
  311. ('Month', 'i'),
  312. ('Day', 'i'),
  313. ('Hour', 'i'),
  314. ('Minute', 'i'),
  315. ('Second', 'i'),
  316. ('FastRead', 'i'),
  317. ('WaveformFreq', 'i'),
  318. ('LastTimestamp', 'd'),
  319. # version >103
  320. ('Trodalness', 'b'),
  321. ('DataTrodalness', 'b'),
  322. ('BitsPerSpikeSample', 'b'),
  323. ('BitsPerSlowSample', 'b'),
  324. ('SpikeMaxMagnitudeMV', 'H'),
  325. ('SlowMaxMagnitudeMV', 'H'),
  326. #version 105
  327. ('SpikePreAmpGain', 'H'),
  328. #version 106
  329. ('AcquiringSoftware', '18s'),
  330. ('ProcessingSoftware', '18s'),
  331. ('Padding', '10s'),
  332. # all version
  333. ('TSCounts', '650i'),
  334. ('WFCounts', '650i'),
  335. ('EVCounts', '512i'),
  336. ]
  337. ChannelHeader = [
  338. ('Name', '32s'),
  339. ('SIGName', '32s'),
  340. ('Channel', 'i'),
  341. ('WFRate', 'i'),
  342. ('SIG', 'i'),
  343. ('Ref', 'i'),
  344. ('Gain', 'i'),
  345. ('Filter', 'i'),
  346. ('Threshold', 'i'),
  347. ('Method', 'i'),
  348. ('NUnits', 'i'),
  349. ('Template', '320h'),
  350. ('Fit', '5i'),
  351. ('SortWidth', 'i'),
  352. ('Boxes', '40h'),
  353. ('SortBeg', 'i'),
  354. # version 105
  355. ('Comment', '128s'),
  356. #version 106
  357. ('SrcId', 'b'),
  358. ('reserved', 'b'),
  359. ('ChanId', 'H'),
  360. ('Padding', '10i'),
  361. ]
  362. EventHeader = [
  363. ('Name', '32s'),
  364. ('Channel', 'i'),
  365. # version 105
  366. ('Comment', '128s'),
  367. #version 106
  368. ('SrcId', 'b'),
  369. ('reserved', 'b'),
  370. ('ChanId', 'H'),
  371. ('Padding', '32i'),
  372. ]
  373. SlowChannelHeader = [
  374. ('Name', '32s'),
  375. ('Channel', 'i'),
  376. ('ADFreq', 'i'),
  377. ('Gain', 'i'),
  378. ('Enabled', 'i'),
  379. ('PreampGain', 'i'),
  380. # version 104
  381. ('SpikeChannel', 'i'),
  382. #version 105
  383. ('Comment', '128s'),
  384. #version 106
  385. ('SrcId', 'b'),
  386. ('reserved', 'b'),
  387. ('ChanId', 'H'),
  388. ('Padding', '27i'),
  389. ]
  390. DataBlockHeader = [
  391. ('Type', 'h'),
  392. ('UpperByteOf5ByteTimestamp', 'h'),
  393. ('TimeStamp', 'i'),
  394. ('Channel', 'h'),
  395. ('Unit', 'h'),
  396. ('NumberOfWaveforms', 'h'),
  397. ('NumberOfWordsInWaveform', 'h'),
  398. ] # 16 bytes
  399. class HeaderReader():
  400. def __init__(self, fid, description):
  401. self.fid = fid
  402. self.description = description
  403. def read_f(self, offset=None):
  404. if offset is not None:
  405. self.fid.seek(offset)
  406. d = {}
  407. for key, fmt in self.description:
  408. buf = self.fid.read(struct.calcsize(fmt))
  409. if len(buf) != struct.calcsize(fmt):
  410. return None
  411. val = list(struct.unpack(fmt, buf))
  412. for i, ival in enumerate(val):
  413. if hasattr(ival, 'replace'):
  414. ival = ival.replace(str.encode('\x03'), str.encode(''))
  415. ival = ival.replace(str.encode('\x00'), str.encode(''))
  416. val[i] = ival.decode("utf-8")
  417. if len(val) == 1:
  418. val = val[0]
  419. d[key] = val
  420. return d