plexonio.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. # -*- coding: utf-8 -*-
  2. """
  3. Class for reading data from Plexon acquisition system (.plx)
  4. Compatible with versions 100 to 106.
  5. Other versions have not been tested.
  6. This IO is developed thanks to the header file downloadable from:
  7. http://www.plexon.com/downloads.html
  8. Supported: Read
  9. Author: sgarcia
  10. """
  11. from __future__ import unicode_literals, print_function, division
  12. import datetime
  13. import struct
  14. import os
  15. import numpy as np
  16. import quantities as pq
  17. from neo.io.baseio import BaseIO
  18. from neo.core import Segment, AnalogSignal, SpikeTrain, Event
  19. class PlexonIO(BaseIO):
  20. """
  21. Class for reading data from Plexon acquisition systems (.plx)
  22. Compatible with versions 100 to 106.
  23. Other versions have not been tested.
  24. Usage:
  25. >>> from neo import io
  26. >>> r = io.PlexonIO(filename='File_plexon_1.plx')
  27. >>> seg = r.read_segment(lazy=False, cascade=True)
  28. >>> print seg.analogsignals
  29. []
  30. >>> print seg.spiketrains # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
  31. [<SpikeTrain(array([ 2.75000000e-02, 5.68250000e-02, ...,
  32. ...
  33. >>> print seg.events
  34. []
  35. """
  36. is_readable = True
  37. is_writable = False
  38. supported_objects = [Segment, AnalogSignal, SpikeTrain, Event]
  39. readable_objects = [Segment]
  40. writeable_objects = []
  41. has_header = False
  42. is_streameable = False
  43. # This is for GUI stuff: a definition for parameters when reading.
  44. read_params = {
  45. Segment: [
  46. ('load_spike_waveform', {'value': False}),
  47. ]
  48. }
  49. write_params = None
  50. name = 'Plexon'
  51. extensions = ['plx']
  52. mode = 'file'
  53. def __init__(self, filename=None):
  54. """
  55. Arguments:
  56. filename : the filename
  57. """
  58. BaseIO.__init__(self, filename)
  59. def read_segment(self, lazy=False, cascade=True, load_spike_waveform=True):
  60. """
  61. Read in a segment.
  62. Arguments:
  63. load_spike_waveform : load or not waveform of spikes (default True)
  64. """
  65. fid = open(self.filename, 'rb')
  66. global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
  67. # metadatas
  68. seg = Segment()
  69. seg.rec_datetime = datetime.datetime(global_header['Year'],
  70. global_header['Month'],
  71. global_header['Day'],
  72. global_header['Hour'],
  73. global_header['Minute'],
  74. global_header['Second'])
  75. seg.file_origin = os.path.basename(self.filename)
  76. seg.annotate(plexon_version=global_header['Version'])
  77. for key, val in global_header.items():
  78. seg.annotate(**{key: val})
  79. if not cascade:
  80. fid.close()
  81. return seg
  82. ## Step 1 : read headers
  83. # dsp channels header = spikes and waveforms
  84. dspChannelHeaders = {}
  85. maxunit = 0
  86. maxchan = 0
  87. for _ in range(global_header['NumDSPChannels']):
  88. # channel is 1 based
  89. channelHeader = HeaderReader(fid, ChannelHeader).read_f(offset=None)
  90. channelHeader['Template'] = np.array(channelHeader['Template']).reshape((5,64))
  91. channelHeader['Boxes'] = np.array(channelHeader['Boxes']).reshape((5,2,4))
  92. dspChannelHeaders[channelHeader['Channel']] = channelHeader
  93. maxunit = max(channelHeader['NUnits'], maxunit)
  94. maxchan = max(channelHeader['Channel'], maxchan)
  95. # event channel header
  96. eventHeaders = {}
  97. for _ in range(global_header['NumEventChannels']):
  98. eventHeader = HeaderReader(fid, EventHeader).read_f(offset=None)
  99. eventHeaders[eventHeader['Channel']] = eventHeader
  100. # slow channel header = signal
  101. slowChannelHeaders = {}
  102. for _ in range(global_header['NumSlowChannels']):
  103. slowChannelHeader = HeaderReader(fid, SlowChannelHeader).read_f(
  104. offset=None)
  105. slowChannelHeaders[slowChannelHeader['Channel']] = \
  106. slowChannelHeader
  107. ## Step 2 : a first loop for counting size
  108. # signal
  109. nb_samples = np.zeros(len(slowChannelHeaders), dtype='int64')
  110. sample_positions = np.zeros(len(slowChannelHeaders), dtype='int64')
  111. t_starts = np.zeros(len(slowChannelHeaders), dtype='float64')
  112. #spiketimes and waveform
  113. nb_spikes = np.zeros((maxchan + 1, maxunit + 1), dtype='i')
  114. wf_sizes = np.zeros((maxchan + 1, maxunit + 1, 2), dtype='i')
  115. # eventarrays
  116. nb_events = {}
  117. #maxstrsizeperchannel = { }
  118. for chan, h in eventHeaders.items():
  119. nb_events[chan] = 0
  120. #maxstrsizeperchannel[chan] = 0
  121. start = fid.tell()
  122. while fid.tell() != -1:
  123. # read block header
  124. dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(
  125. offset=None)
  126. if dataBlockHeader is None:
  127. break
  128. chan = dataBlockHeader['Channel']
  129. unit = dataBlockHeader['Unit']
  130. n1, n2 = dataBlockHeader['NumberOfWaveforms'], dataBlockHeader[
  131. 'NumberOfWordsInWaveform']
  132. time = (dataBlockHeader['UpperByteOf5ByteTimestamp'] * 2. ** 32 +
  133. dataBlockHeader['TimeStamp'])
  134. if dataBlockHeader['Type'] == 1:
  135. nb_spikes[chan, unit] += 1
  136. wf_sizes[chan, unit, :] = [n1, n2]
  137. fid.seek(n1 * n2 * 2, 1)
  138. elif dataBlockHeader['Type'] == 4:
  139. #event
  140. nb_events[chan] += 1
  141. elif dataBlockHeader['Type'] == 5:
  142. #continuous signal
  143. fid.seek(n2 * 2, 1)
  144. if n2 > 0:
  145. nb_samples[chan] += n2
  146. if nb_samples[chan] == 0:
  147. t_starts[chan] = time
  148. ## Step 3: allocating memory and 2 loop for reading if not lazy
  149. if not lazy:
  150. # allocating mem for signal
  151. sigarrays = {}
  152. for chan, h in slowChannelHeaders.items():
  153. sigarrays[chan] = np.zeros(nb_samples[chan])
  154. # allocating mem for SpikeTrain
  155. stimearrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
  156. swfarrays = np.zeros((maxchan + 1, maxunit + 1), dtype=object)
  157. for (chan, unit), _ in np.ndenumerate(nb_spikes):
  158. stimearrays[chan, unit] = np.zeros(nb_spikes[chan, unit],
  159. dtype='f')
  160. if load_spike_waveform:
  161. n1, n2 = wf_sizes[chan, unit, :]
  162. swfarrays[chan, unit] = np.zeros(
  163. (nb_spikes[chan, unit], n1, n2), dtype='f4')
  164. pos_spikes = np.zeros(nb_spikes.shape, dtype='i')
  165. # allocating mem for event
  166. eventpositions = {}
  167. evarrays = {}
  168. for chan, nb in nb_events.items():
  169. evarrays[chan] = {
  170. 'times': np.zeros(nb, dtype='f'),
  171. 'labels': np.zeros(nb, dtype='S4')
  172. }
  173. eventpositions[chan]=0
  174. fid.seek(start)
  175. while fid.tell() != -1:
  176. dataBlockHeader = HeaderReader(fid, DataBlockHeader).read_f(
  177. offset=None)
  178. if dataBlockHeader is None:
  179. break
  180. chan = dataBlockHeader['Channel']
  181. n1, n2 = dataBlockHeader['NumberOfWaveforms'], dataBlockHeader[
  182. 'NumberOfWordsInWaveform']
  183. time = dataBlockHeader['UpperByteOf5ByteTimestamp'] * \
  184. 2. ** 32 + dataBlockHeader['TimeStamp']
  185. time /= global_header['ADFrequency']
  186. if n2 < 0:
  187. break
  188. if dataBlockHeader['Type'] == 1:
  189. #spike
  190. unit = dataBlockHeader['Unit']
  191. pos = pos_spikes[chan, unit]
  192. stimearrays[chan, unit][pos] = time
  193. if load_spike_waveform and n1 * n2 != 0:
  194. swfarrays[chan, unit][pos, :, :] = np.fromstring(
  195. fid.read(n1 * n2 * 2), dtype='i2'
  196. ).reshape(n1, n2).astype('f4')
  197. else:
  198. fid.seek(n1 * n2 * 2, 1)
  199. pos_spikes[chan, unit] += 1
  200. elif dataBlockHeader['Type'] == 4:
  201. # event
  202. pos = eventpositions[chan]
  203. evarrays[chan]['times'][pos] = time
  204. evarrays[chan]['labels'][pos] = dataBlockHeader['Unit']
  205. eventpositions[chan]+= 1
  206. elif dataBlockHeader['Type'] == 5:
  207. #signal
  208. data = np.fromstring(
  209. fid.read(n2 * 2), dtype='i2').astype('f4')
  210. sigarrays[chan][sample_positions[chan]:
  211. sample_positions[chan]+data.size] = data
  212. sample_positions[chan] += data.size
  213. ## Step 4: create neo object
  214. for chan, h in eventHeaders.items():
  215. if lazy:
  216. times = []
  217. labels = None
  218. else:
  219. times = evarrays[chan]['times']
  220. labels = evarrays[chan]['labels']
  221. ea = Event(
  222. times*pq.s,
  223. labels=labels,
  224. channel_name=eventHeaders[chan]['Name'],
  225. channel_index=chan
  226. )
  227. if lazy:
  228. ea.lazy_shape = nb_events[chan]
  229. seg.events.append(ea)
  230. for chan, h in slowChannelHeaders.items():
  231. if lazy:
  232. signal = []
  233. else:
  234. if global_header['Version'] == 100 or global_header[
  235. 'Version'] == 101:
  236. gain = 5000. / (
  237. 2048 * slowChannelHeaders[chan]['Gain'] * 1000.)
  238. elif global_header['Version'] == 102:
  239. gain = 5000. / (2048 * slowChannelHeaders[chan]['Gain'] *
  240. slowChannelHeaders[chan]['PreampGain'])
  241. elif global_header['Version'] >= 103:
  242. gain = global_header['SlowMaxMagnitudeMV'] / (
  243. .5 * (2 ** global_header['BitsPerSpikeSample']) *
  244. slowChannelHeaders[chan]['Gain'] *
  245. slowChannelHeaders[chan]['PreampGain'])
  246. signal = sigarrays[chan] * gain
  247. anasig = AnalogSignal(
  248. signal,
  249. sampling_rate=float(
  250. slowChannelHeaders[chan]['ADFreq']) * pq.Hz,
  251. units='mV',
  252. t_start=t_starts[chan] * pq.s,
  253. channel_index=slowChannelHeaders[chan]['Channel'],
  254. channel_name=slowChannelHeaders[chan]['Name'])
  255. if lazy:
  256. anasig.lazy_shape = nb_samples[chan]
  257. seg.analogsignals.append(anasig)
  258. for (chan, unit), value in np.ndenumerate(nb_spikes):
  259. if nb_spikes[chan, unit] == 0:
  260. continue
  261. if lazy:
  262. times = []
  263. waveforms = None
  264. t_stop = 0
  265. else:
  266. times = stimearrays[chan, unit]
  267. t_stop = times.max()
  268. if load_spike_waveform:
  269. if global_header['Version'] < 103:
  270. gain = 3000. / (
  271. 2048 * dspChannelHeaders[chan]['Gain'] * 1000.)
  272. elif global_header['Version'] >= 103 and global_header[
  273. 'Version'] < 105:
  274. gain = global_header['SpikeMaxMagnitudeMV'] / (
  275. .5 * 2. ** (global_header['BitsPerSpikeSample']) *
  276. dspChannelHeaders[chan]['Gain'] *
  277. 1000.)
  278. elif global_header['Version'] > 105:
  279. gain = global_header['SpikeMaxMagnitudeMV'] / (
  280. .5 * 2. ** (global_header['BitsPerSpikeSample']) *
  281. dspChannelHeaders[chan]['Gain'] *
  282. global_header['SpikePreAmpGain'])
  283. waveforms = swfarrays[chan, unit] * gain * pq.mV
  284. else:
  285. waveforms = None
  286. if global_header['WaveformFreq']>0:
  287. wf_sampling_rate= float(global_header['WaveformFreq']) * pq.Hz
  288. else:
  289. wf_sampling_rate = float(global_header['ADFrequency']) * pq.Hz
  290. sptr = SpikeTrain(
  291. times,
  292. units='s',
  293. t_stop=t_stop*pq.s,
  294. waveforms=waveforms,
  295. sampling_rate=wf_sampling_rate,
  296. )
  297. sptr.annotate(unit_name = dspChannelHeaders[chan]['Name'])
  298. sptr.annotate(channel_index = chan)
  299. for key, val in dspChannelHeaders[chan].items():
  300. sptr.annotate(**{key: val})
  301. if lazy:
  302. sptr.lazy_shape = nb_spikes[chan, unit]
  303. seg.spiketrains.append(sptr)
  304. seg.create_many_to_one_relationship()
  305. fid.close()
  306. return seg
  307. GlobalHeader = [
  308. ('MagicNumber', 'I'),
  309. ('Version', 'i'),
  310. ('Comment', '128s'),
  311. ('ADFrequency', 'i'), #this rate is only for events
  312. ('NumDSPChannels', 'i'),
  313. ('NumEventChannels', 'i'),
  314. ('NumSlowChannels', 'i'),
  315. ('NumPointsWave', 'i'),
  316. ('NumPointsPreThr', 'i'),
  317. ('Year', 'i'),
  318. ('Month', 'i'),
  319. ('Day', 'i'),
  320. ('Hour', 'i'),
  321. ('Minute', 'i'),
  322. ('Second', 'i'),
  323. ('FastRead', 'i'),
  324. ('WaveformFreq', 'i'),
  325. ('LastTimestamp', 'd'),
  326. # version >103
  327. ('Trodalness', 'b'),
  328. ('DataTrodalness', 'b'),
  329. ('BitsPerSpikeSample', 'b'),
  330. ('BitsPerSlowSample', 'b'),
  331. ('SpikeMaxMagnitudeMV', 'H'),
  332. ('SlowMaxMagnitudeMV', 'H'),
  333. #version 105
  334. ('SpikePreAmpGain', 'H'),
  335. #version 106
  336. ('AcquiringSoftware', '18s'),
  337. ('ProcessingSoftware', '18s'),
  338. ('Padding', '10s'),
  339. # all version
  340. ('TSCounts', '650i'),
  341. ('WFCounts', '650i'),
  342. ('EVCounts', '512i'),
  343. ]
  344. ChannelHeader = [
  345. ('Name', '32s'),
  346. ('SIGName', '32s'),
  347. ('Channel', 'i'),
  348. ('WFRate', 'i'), #this is not the waveform sampling rate!!!
  349. ('SIG', 'i'),
  350. ('Ref', 'i'),
  351. ('Gain', 'i'),
  352. ('Filter', 'i'),
  353. ('Threshold', 'i'),
  354. ('Method', 'i'),
  355. ('NUnits', 'i'),
  356. ('Template', '320h'),
  357. ('Fit', '5i'),
  358. ('SortWidth', 'i'),
  359. ('Boxes', '40h'),
  360. ('SortBeg', 'i'),
  361. # version 105
  362. ('Comment', '128s'),
  363. #version 106
  364. ('SrcId', 'b'),
  365. ('reserved', 'b'),
  366. ('ChanId', 'H'),
  367. ('Padding', '10i'),
  368. ]
  369. EventHeader = [
  370. ('Name', '32s'),
  371. ('Channel', 'i'),
  372. # version 105
  373. ('Comment', '128s'),
  374. #version 106
  375. ('SrcId', 'b'),
  376. ('reserved', 'b'),
  377. ('ChanId', 'H'),
  378. ('Padding', '32i'),
  379. ]
  380. SlowChannelHeader = [
  381. ('Name', '32s'),
  382. ('Channel', 'i'),
  383. ('ADFreq', 'i'),
  384. ('Gain', 'i'),
  385. ('Enabled', 'i'),
  386. ('PreampGain', 'i'),
  387. # version 104
  388. ('SpikeChannel', 'i'),
  389. #version 105
  390. ('Comment', '128s'),
  391. #version 106
  392. ('SrcId', 'b'),
  393. ('reserved', 'b'),
  394. ('ChanId', 'H'),
  395. ('Padding', '27i'),
  396. ]
  397. DataBlockHeader = [
  398. ('Type', 'h'),
  399. ('UpperByteOf5ByteTimestamp', 'h'),
  400. ('TimeStamp', 'i'),
  401. ('Channel', 'h'),
  402. ('Unit', 'h'),
  403. ('NumberOfWaveforms', 'h'),
  404. ('NumberOfWordsInWaveform', 'h'),
  405. ] # 16 bytes
  406. class HeaderReader():
  407. def __init__(self, fid, description):
  408. self.fid = fid
  409. self.description = description
  410. def read_f(self, offset=None):
  411. if offset is not None:
  412. self.fid.seek(offset)
  413. d = {}
  414. for key, fmt in self.description:
  415. buf = self.fid.read(struct.calcsize(fmt))
  416. if len(buf) != struct.calcsize(fmt):
  417. return None
  418. val = list(struct.unpack(fmt, buf))
  419. for i, ival in enumerate(val):
  420. if hasattr(ival, 'replace'):
  421. ival = ival.replace(b'\x03', b'')
  422. ival = ival.replace(b'\x00', b'')
  423. val[i] = ival.decode("utf-8")
  424. if len(val) == 1:
  425. val = val[0]
  426. d[key] = val
  427. return d