Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

spike2io.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. # -*- coding: utf-8 -*-
  2. """
  3. Classe for reading data in CED spike2 files (.smr).
  4. This code is based on:
  5. - sonpy, written by Antonio Gonzalez <Antonio.Gonzalez@cantab.net>
  6. Disponible here ::
  7. http://www.neuro.ki.se/broberger/
  8. and sonpy come from :
  9. - SON Library 2.0 for MATLAB, written by Malcolm Lidierth at
  10. King's College London.
  11. See http://www.kcl.ac.uk/depsta/biomedical/cfnr/lidierth.html
  12. This IO support old (<v6) and new files (>v7) of spike2
  13. Depend on:
  14. Supported : Read
  15. Author: sgarcia
  16. """
  17. import os
  18. import sys
  19. import numpy as np
  20. import quantities as pq
  21. from neo.io.baseio import BaseIO
  22. from neo.core import Segment, AnalogSignal, SpikeTrain, Event
  23. PY3K = (sys.version_info[0] == 3)
  24. class Spike2IO(BaseIO):
  25. """
  26. Class for reading data from CED spike2.
  27. Usage:
  28. >>> from neo import io
  29. >>> r = io.Spike2IO( filename = 'File_spike2_1.smr')
  30. >>> seg = r.read_segment(lazy = False, cascade = True,)
  31. >>> print seg.analogsignals
  32. >>> print seg.spiketrains
  33. >>> print seg.events
  34. """
  35. is_readable = True
  36. is_writable = False
  37. supported_objects = [Segment, AnalogSignal, Event, SpikeTrain]
  38. readable_objects = [Segment]
  39. writeable_objects = []
  40. has_header = False
  41. is_streameable = False
  42. read_params = {Segment: [('take_ideal_sampling_rate', {'value': False})]}
  43. write_params = None
  44. name = 'Spike 2 CED'
  45. extensions = ['smr']
  46. mode = 'file'
  47. ced_units = False
  48. def __init__(self, filename=None, ced_units=False):
  49. """
  50. This class reads an smr file.
  51. Arguments:
  52. filename : the filename
  53. ced_units: whether a spike trains should be added for each unit
  54. as determined by Spike2's spike sorting (True), or if a spike
  55. channel should be considered a single unit and will ignore
  56. Spike2's spike sorting (False). Defaults to False.
  57. """
  58. BaseIO.__init__(self)
  59. self.filename = filename
  60. self.ced_units = ced_units
  61. def read_segment(self, take_ideal_sampling_rate=False,
  62. lazy=False, cascade=True):
  63. """
  64. Arguments:
  65. """
  66. header = self.read_header(filename=self.filename)
  67. # ~ print header
  68. fid = open(self.filename, 'rb')
  69. seg = Segment(
  70. file_origin=os.path.basename(self.filename),
  71. ced_version=str(header.system_id),
  72. )
  73. if not cascade:
  74. return seg
  75. def addannotations(ob, channelHeader):
  76. ob.annotate(title=channelHeader.title)
  77. ob.annotate(physical_channel_index=channelHeader.phy_chan)
  78. ob.annotate(comment=channelHeader.comment)
  79. for i in range(header.channels):
  80. channelHeader = header.channelHeaders[i]
  81. #~ print 'channel' , i , 'kind' , channelHeader.kind
  82. if channelHeader.kind != 0:
  83. #~ print '####'
  84. #~ print 'channel' , i, 'kind' , channelHeader.kind , \
  85. #~ channelHeader.type , channelHeader.phy_chan
  86. #~ print channelHeader
  87. pass
  88. if channelHeader.kind in [1, 9]:
  89. #~ print 'analogChanel'
  90. ana_sigs = self.read_one_channel_continuous(
  91. fid, i, header, take_ideal_sampling_rate, lazy=lazy)
  92. #~ print 'nb sigs', len(anaSigs) , ' sizes : ',
  93. for anaSig in ana_sigs:
  94. addannotations(anaSig, channelHeader)
  95. anaSig.name = str(anaSig.annotations['title'])
  96. seg.analogsignals.append(anaSig)
  97. #~ print sig.signal.size,
  98. #~ print ''
  99. elif channelHeader.kind in [2, 3, 4, 5, 8]:
  100. ea = self.read_one_channel_event_or_spike(
  101. fid, i, header, lazy=lazy)
  102. if ea is not None:
  103. addannotations(ea, channelHeader)
  104. seg.events.append(ea)
  105. elif channelHeader.kind in [6, 7]:
  106. sptrs = self.read_one_channel_event_or_spike(
  107. fid, i, header, lazy=lazy)
  108. if sptrs is not None:
  109. for sptr in sptrs:
  110. addannotations(sptr, channelHeader)
  111. seg.spiketrains.append(sptr)
  112. fid.close()
  113. seg.create_many_to_one_relationship()
  114. return seg
  115. def read_header(self, filename=''):
  116. fid = open(filename, 'rb')
  117. header = HeaderReader(fid, np.dtype(headerDescription))
  118. # ~ print 'chan_size' , header.chan_size
  119. if header.system_id < 6:
  120. header.dtime_base = 1e-6
  121. header.datetime_detail = 0
  122. header.datetime_year = 0
  123. channelHeaders = []
  124. for i in range(header.channels):
  125. # read global channel header
  126. fid.seek(512 + 140 * i) # TODO verifier i ou i-1
  127. channelHeader = HeaderReader(fid,
  128. np.dtype(channelHeaderDesciption1))
  129. if channelHeader.kind in [1, 6]:
  130. dt = [('scale', 'f4'),
  131. ('offset', 'f4'),
  132. ('unit', 'S6'), ]
  133. channelHeader += HeaderReader(fid, np.dtype(dt))
  134. if header.system_id < 6:
  135. channelHeader += HeaderReader(fid, np.dtype([ ('divide' , 'i2')]) )
  136. else :
  137. channelHeader +=HeaderReader(fid, np.dtype([ ('interleave' , 'i2')]) )
  138. if channelHeader.kind in [7, 9]:
  139. dt = [('min', 'f4'),
  140. ('max', 'f4'),
  141. ('unit', 'S6'), ]
  142. channelHeader += HeaderReader(fid, np.dtype(dt))
  143. if header.system_id < 6:
  144. channelHeader += HeaderReader(fid, np.dtype([ ('divide' , 'i2')]))
  145. else :
  146. channelHeader += HeaderReader(fid, np.dtype([ ('interleave' , 'i2')]) )
  147. if channelHeader.kind in [4]:
  148. dt = [('init_low', 'u1'),
  149. ('next_low', 'u1'), ]
  150. channelHeader += HeaderReader(fid, np.dtype(dt))
  151. channelHeader.type = dict_kind[channelHeader.kind]
  152. #~ print i, channelHeader
  153. channelHeaders.append(channelHeader)
  154. header.channelHeaders = channelHeaders
  155. fid.close()
  156. return header
  157. def read_one_channel_continuous(self, fid, channel_num, header,
  158. take_ideal_sampling_rate, lazy=True):
  159. # read AnalogSignal
  160. channelHeader = header.channelHeaders[channel_num]
  161. # data type
  162. if channelHeader.kind == 1:
  163. dt = np.dtype('i2')
  164. elif channelHeader.kind == 9:
  165. dt = np.dtype('f4')
  166. # sample rate
  167. if take_ideal_sampling_rate:
  168. sampling_rate = channelHeader.ideal_rate * pq.Hz
  169. else:
  170. if header.system_id in [1, 2, 3, 4, 5]: # Before version 5
  171. #~ print channel_num, channelHeader.divide, \
  172. #~ header.us_per_time, header.time_per_adc
  173. sample_interval = (channelHeader.divide * header.us_per_time *
  174. header.time_per_adc) * 1e-6
  175. else:
  176. sample_interval = (channelHeader.l_chan_dvd *
  177. header.us_per_time * header.dtime_base)
  178. sampling_rate = (1. / sample_interval) * pq.Hz
  179. # read blocks header to preallocate memory by jumping block to block
  180. if channelHeader.blocks==0:
  181. return [ ]
  182. fid.seek(channelHeader.firstblock)
  183. blocksize = [0]
  184. starttimes = []
  185. for b in range(channelHeader.blocks):
  186. blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
  187. if len(blocksize) > len(starttimes):
  188. starttimes.append(blockHeader.start_time)
  189. blocksize[-1] += blockHeader.items
  190. if blockHeader.succ_block > 0:
  191. # ugly but CED does not guarantee continuity in AnalogSignal
  192. fid.seek(blockHeader.succ_block)
  193. nextBlockHeader = HeaderReader(fid,
  194. np.dtype(blockHeaderDesciption))
  195. sample_interval = (blockHeader.end_time -
  196. blockHeader.start_time) / \
  197. (blockHeader.items - 1)
  198. interval_with_next = nextBlockHeader.start_time - \
  199. blockHeader.end_time
  200. if interval_with_next > sample_interval:
  201. blocksize.append(0)
  202. fid.seek(blockHeader.succ_block)
  203. ana_sigs = []
  204. if channelHeader.unit in unit_convert:
  205. unit = pq.Quantity(1, unit_convert[channelHeader.unit])
  206. else:
  207. # print channelHeader.unit
  208. try:
  209. unit = pq.Quantity(1, channelHeader.unit)
  210. except:
  211. unit = pq.Quantity(1, '')
  212. for b, bs in enumerate(blocksize):
  213. if lazy:
  214. signal = [] * unit
  215. else:
  216. signal = pq.Quantity(np.empty(bs, dtype='f4'), units=unit)
  217. ana_sig = AnalogSignal(
  218. signal, sampling_rate=sampling_rate,
  219. t_start=(starttimes[b] * header.us_per_time *
  220. header.dtime_base * pq.s),
  221. channel_index=channel_num)
  222. ana_sigs.append(ana_sig)
  223. if lazy:
  224. for s, ana_sig in enumerate(ana_sigs):
  225. ana_sig.lazy_shape = blocksize[s]
  226. else:
  227. # read data by jumping block to block
  228. fid.seek(channelHeader.firstblock)
  229. pos = 0
  230. numblock = 0
  231. for b in range(channelHeader.blocks):
  232. blockHeader = HeaderReader(
  233. fid, np.dtype(blockHeaderDesciption))
  234. # read data
  235. sig = np.fromstring(fid.read(blockHeader.items * dt.itemsize),
  236. dtype=dt)
  237. ana_sigs[numblock][pos:pos + sig.size] = \
  238. sig.reshape(-1, 1).astype('f4') * unit
  239. pos += sig.size
  240. if pos >= blocksize[numblock]:
  241. numblock += 1
  242. pos = 0
  243. # jump to next block
  244. if blockHeader.succ_block > 0:
  245. fid.seek(blockHeader.succ_block)
  246. # convert for int16
  247. if dt.kind == 'i':
  248. for ana_sig in ana_sigs:
  249. ana_sig *= channelHeader.scale / 6553.6
  250. ana_sig += channelHeader.offset * unit
  251. return ana_sigs
  252. def read_one_channel_event_or_spike(self, fid, channel_num, header,
  253. lazy=True):
  254. # return SPikeTrain or Event
  255. channelHeader = header.channelHeaders[channel_num]
  256. if channelHeader.firstblock < 0:
  257. return
  258. if channelHeader.kind not in [2, 3, 4, 5, 6, 7, 8]:
  259. return
  260. # # Step 1 : type of blocks
  261. if channelHeader.kind in [2, 3, 4]:
  262. # Event data
  263. fmt = [('tick', 'i4')]
  264. elif channelHeader.kind in [5]:
  265. # Marker data
  266. fmt = [('tick', 'i4'), ('marker', 'i4')]
  267. elif channelHeader.kind in [6]:
  268. # AdcMark data
  269. fmt = [('tick', 'i4'), ('marker', 'i4'),
  270. ('adc', 'S%d' % channelHeader.n_extra)]
  271. elif channelHeader.kind in [7]:
  272. # RealMark data
  273. fmt = [('tick', 'i4'), ('marker', 'i4'),
  274. ('real', 'S%d' % channelHeader.n_extra)]
  275. elif channelHeader.kind in [8]:
  276. # TextMark data
  277. fmt = [('tick', 'i4'), ('marker', 'i4'),
  278. ('label', 'S%d' % channelHeader.n_extra)]
  279. dt = np.dtype(fmt)
  280. ## Step 2 : first read for allocating mem
  281. fid.seek(channelHeader.firstblock)
  282. totalitems = 0
  283. for _ in range(channelHeader.blocks):
  284. blockHeader = HeaderReader(fid, np.dtype(blockHeaderDesciption))
  285. totalitems += blockHeader.items
  286. if blockHeader.succ_block > 0:
  287. fid.seek(blockHeader.succ_block)
  288. #~ print 'totalitems' , totalitems
  289. if lazy:
  290. if channelHeader.kind in [2, 3, 4, 5, 8]:
  291. ea = Event()
  292. ea.annotate(channel_index=channel_num)
  293. ea.lazy_shape = totalitems
  294. return ea
  295. elif channelHeader.kind in [6, 7]:
  296. # correct value for t_stop to be put in later
  297. sptr = SpikeTrain([] * pq.s, t_stop=1e99)
  298. sptr.annotate(channel_index=channel_num, ced_unit = 0)
  299. sptr.lazy_shape = totalitems
  300. return sptr
  301. else:
  302. alltrigs = np.zeros(totalitems, dtype=dt)
  303. ## Step 3 : read
  304. fid.seek(channelHeader.firstblock)
  305. pos = 0
  306. for _ in range(channelHeader.blocks):
  307. blockHeader = HeaderReader(
  308. fid, np.dtype(blockHeaderDesciption))
  309. # read all events in block
  310. trigs = np.fromstring(
  311. fid.read(blockHeader.items * dt.itemsize), dtype=dt)
  312. alltrigs[pos:pos + trigs.size] = trigs
  313. pos += trigs.size
  314. if blockHeader.succ_block > 0:
  315. fid.seek(blockHeader.succ_block)
  316. ## Step 3 convert in neo standard class: eventarrays or spiketrains
  317. alltimes = alltrigs['tick'].astype(
  318. 'f') * header.us_per_time * header.dtime_base * pq.s
  319. if channelHeader.kind in [2, 3, 4, 5, 8]:
  320. #events
  321. ea = Event(alltimes)
  322. ea.annotate(channel_index=channel_num)
  323. if channelHeader.kind >= 5:
  324. # Spike2 marker is closer to label sens of neo
  325. ea.labels = alltrigs['marker'].astype('S32')
  326. if channelHeader.kind == 8:
  327. ea.annotate(extra_labels=alltrigs['label'])
  328. return ea
  329. elif channelHeader.kind in [6, 7]:
  330. # spiketrains
  331. # waveforms
  332. if channelHeader.kind == 6:
  333. waveforms = np.fromstring(alltrigs['adc'].tostring(),
  334. dtype='i2')
  335. waveforms = waveforms.astype(
  336. 'f4') * channelHeader.scale / 6553.6 + \
  337. channelHeader.offset
  338. elif channelHeader.kind == 7:
  339. waveforms = np.fromstring(alltrigs['real'].tostring(),
  340. dtype='f4')
  341. if header.system_id >= 6 and channelHeader.interleave > 1:
  342. waveforms = waveforms.reshape(
  343. (alltimes.size, -1, channelHeader.interleave))
  344. waveforms = waveforms.swapaxes(1, 2)
  345. else:
  346. waveforms = waveforms.reshape((alltimes.size, 1, -1))
  347. if header.system_id in [1, 2, 3, 4, 5]:
  348. sample_interval = (channelHeader.divide *
  349. header.us_per_time *
  350. header.time_per_adc) * 1e-6
  351. else:
  352. sample_interval = (channelHeader.l_chan_dvd *
  353. header.us_per_time *
  354. header.dtime_base)
  355. if channelHeader.unit in unit_convert:
  356. unit = pq.Quantity(1, unit_convert[channelHeader.unit])
  357. else:
  358. #print channelHeader.unit
  359. try:
  360. unit = pq.Quantity(1, channelHeader.unit)
  361. except:
  362. unit = pq.Quantity(1, '')
  363. if len(alltimes) > 0:
  364. # can get better value from associated AnalogSignal(s) ?
  365. t_stop = alltimes.max()
  366. else:
  367. t_stop = 0.0
  368. if not self.ced_units:
  369. sptr = SpikeTrain(alltimes,
  370. waveforms = waveforms*unit,
  371. sampling_rate = (1./sample_interval)*pq.Hz,
  372. t_stop = t_stop
  373. )
  374. sptr.annotate(channel_index = channel_num, ced_unit = 0)
  375. return [sptr]
  376. sptrs = []
  377. for i in set(alltrigs['marker'] & 255):
  378. sptr = SpikeTrain(alltimes[alltrigs['marker'] == i],
  379. waveforms = waveforms[alltrigs['marker'] == i]*unit,
  380. sampling_rate = (1./sample_interval)*pq.Hz,
  381. t_stop = t_stop
  382. )
  383. sptr.annotate(channel_index = channel_num, ced_unit = i)
  384. sptrs.append(sptr)
  385. return sptrs
  386. class HeaderReader(object):
  387. def __init__(self, fid, dtype):
  388. if fid is not None:
  389. array = np.fromstring(fid.read(dtype.itemsize), dtype)[0]
  390. else:
  391. array = np.zeros(1, dtype=dtype)[0]
  392. super(HeaderReader, self).__setattr__('dtype', dtype)
  393. super(HeaderReader, self).__setattr__('array', array)
  394. def __setattr__(self, name, val):
  395. if name in self.dtype.names:
  396. self.array[name] = val
  397. else:
  398. super(HeaderReader, self).__setattr__(name, val)
  399. def __getattr__(self, name):
  400. # ~ print name
  401. if name in self.dtype.names:
  402. if self.dtype[name].kind == 'S':
  403. if PY3K:
  404. l = np.fromstring(self.array[name].decode('iso-8859-1')[0],
  405. 'u1')
  406. else:
  407. l = np.fromstring(self.array[name][0], 'u1')
  408. l = l[0]
  409. return self.array[name][1:l + 1]
  410. else:
  411. return self.array[name]
  412. def names(self):
  413. return self.array.dtype.names
  414. def __repr__(self):
  415. s = 'HEADER'
  416. for name in self.dtype.names:
  417. # ~ if self.dtype[name].kind != 'S' :
  418. #~ s += name + self.__getattr__(name)
  419. s += '{}: {}\n'.format(name, getattr(self, name))
  420. return s
  421. def __add__(self, header2):
  422. # print 'add' , self.dtype, header2.dtype
  423. newdtype = []
  424. for name in self.dtype.names:
  425. newdtype.append((name, self.dtype[name].str))
  426. for name in header2.dtype.names:
  427. newdtype.append((name, header2.dtype[name].str))
  428. newdtype = np.dtype(newdtype)
  429. newHeader = HeaderReader(None, newdtype)
  430. newHeader.array = np.fromstring(
  431. self.array.tostring() + header2.array.tostring(), newdtype)[0]
  432. return newHeader
  433. # headers structures :
  434. headerDescription = [
  435. ('system_id', 'i2'),
  436. ('copyright', 'S10'),
  437. ('creator', 'S8'),
  438. ('us_per_time', 'i2'),
  439. ('time_per_adc', 'i2'),
  440. ('filestate', 'i2'),
  441. ('first_data', 'i4'), # i8
  442. ('channels', 'i2'),
  443. ('chan_size', 'i2'),
  444. ('extra_data', 'i2'),
  445. ('buffersize', 'i2'),
  446. ('os_format', 'i2'),
  447. ('max_ftime', 'i4'), # i8
  448. ('dtime_base', 'f8'),
  449. ('datetime_detail', 'u1'),
  450. ('datetime_year', 'i2'),
  451. ('pad', 'S52'),
  452. ('comment1', 'S80'),
  453. ('comment2', 'S80'),
  454. ('comment3', 'S80'),
  455. ('comment4', 'S80'),
  456. ('comment5', 'S80'),
  457. ]
  458. channelHeaderDesciption1 = [
  459. ('del_size', 'i2'),
  460. ('next_del_block', 'i4'), # i8
  461. ('firstblock', 'i4'), # i8
  462. ('lastblock', 'i4'), # i8
  463. ('blocks', 'i2'),
  464. ('n_extra', 'i2'),
  465. ('pre_trig', 'i2'),
  466. ('free0', 'i2'),
  467. ('py_sz', 'i2'),
  468. ('max_data', 'i2'),
  469. ('comment', 'S72'),
  470. ('max_chan_time', 'i4'), # i8
  471. ('l_chan_dvd', 'i4'), # i8
  472. ('phy_chan', 'i2'),
  473. ('title', 'S10'),
  474. ('ideal_rate', 'f4'),
  475. ('kind', 'u1'),
  476. ('unused1', 'i1'),
  477. ]
  478. dict_kind = {
  479. 0: 'empty',
  480. 1: 'Adc',
  481. 2: 'EventFall',
  482. 3: 'EventRise',
  483. 4: 'EventBoth',
  484. 5: 'Marker',
  485. 6: 'AdcMark',
  486. 7: 'RealMark',
  487. 8: 'TextMark',
  488. 9: 'RealWave',
  489. }
  490. blockHeaderDesciption = [
  491. ('pred_block', 'i4'), # i8
  492. ('succ_block', 'i4'), # i8
  493. ('start_time', 'i4'), # i8
  494. ('end_time', 'i4'), # i8
  495. ('channel_num', 'i2'),
  496. ('items', 'i2'),
  497. ]
  498. unit_convert = {
  499. 'Volts': 'V',
  500. }