neuralynxrawio.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. # -*- coding: utf-8 -*-
  2. """
  3. Class for reading data from Neuralynx files.
  4. This IO supports NCS, NEV, NSE and NTT file formats.
  5. NCS contains signals for one channel
  6. NEV contains events
  7. NSE contains spikes and waveforms for mono electrodes
  8. NTT contains spikes and waveforms for tetrodes
  9. NCS can contains gaps that can be detected in inregularity
  10. in timestamps of data blocks. Each gap lead to one new segment.
  11. NCS files need to be read entirely to detect that gaps.... too bad....
  12. Author: Julia Sprenger, Carlos Canova, Samuel Garcia
  13. """
  14. from __future__ import print_function, division, absolute_import
  15. # from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
  16. from .baserawio import (BaseRawIO, _signal_channel_dtype,
  17. _unit_channel_dtype, _event_channel_dtype)
  18. import numpy as np
  19. import os
  20. import re
  21. import distutils.version
  22. import datetime
  23. from collections import OrderedDict
  24. BLOCK_SIZE = 512 # nb sample per signal block
  25. HEADER_SIZE = 2 ** 14 # file have a txt header of 16kB
  26. class NeuralynxRawIO(BaseRawIO):
  27. """"
  28. Class for reading dataset recorded by Neuralynx.
  29. Examples:
  30. >>> reader = NeuralynxRawIO(dirname='Cheetah_v5.5.1/original_data')
  31. >>> reader.parse_header()
  32. Inspect all file in the directory.
  33. >>> print(reader)
  34. Display all informations about signal channels, units, segment size....
  35. """
  36. extensions = ['nse', 'ncs', 'nev', 'ntt']
  37. rawmode = 'one-dir'
  38. def __init__(self, dirname='', **kargs):
  39. self.dirname = dirname
  40. BaseRawIO.__init__(self, **kargs)
  41. def _source_name(self):
  42. return self.dirname
  43. def _parse_header(self):
  44. sig_channels = []
  45. unit_channels = []
  46. event_channels = []
  47. self.ncs_filenames = OrderedDict() # chan_id: filename
  48. self.nse_ntt_filenames = OrderedDict() # chan_id: filename
  49. self.nev_filenames = OrderedDict() # chan_id: filename
  50. self._nev_memmap = {}
  51. self._spike_memmap = {}
  52. self.internal_unit_ids = [] # channel_index > (channel_id, unit_id)
  53. self.internal_event_ids = []
  54. self._empty_ncs = [] # this list contains filenames of empty records
  55. self._empty_nse_ntt = []
  56. # explore the directory looking for ncs, nev, nse and ntt
  57. # And construct channels headers
  58. signal_annotations = []
  59. unit_annotations = []
  60. event_annotations = []
  61. for filename in sorted(os.listdir(self.dirname)):
  62. filename = os.path.join(self.dirname, filename)
  63. _, ext = os.path.splitext(filename)
  64. ext = ext[1:] # remove dot
  65. if ext not in self.extensions:
  66. continue
  67. if (os.path.getsize(filename) <= HEADER_SIZE) and (ext in ['ncs']):
  68. self._empty_ncs.append(filename)
  69. continue
  70. # All file have more or less the same header structure
  71. info = read_txt_header(filename)
  72. chan_names = info['channel_names']
  73. chan_ids = info['channel_ids']
  74. for idx, chan_id in enumerate(chan_ids):
  75. chan_name = chan_names[idx]
  76. if ext == 'ncs':
  77. # a signal channels
  78. units = 'uV'
  79. gain = info['bit_to_microVolt'][idx]
  80. if info['input_inverted']:
  81. gain *= -1
  82. offset = 0.
  83. group_id = 0
  84. sig_channels.append((chan_name, chan_id, info['sampling_rate'],
  85. 'int16', units, gain, offset, group_id))
  86. self.ncs_filenames[chan_id] = filename
  87. keys = [
  88. 'DspFilterDelay_µs',
  89. 'recording_opened',
  90. 'FileType',
  91. 'DspDelayCompensation',
  92. 'recording_closed',
  93. 'DspLowCutFilterType',
  94. 'HardwareSubSystemName',
  95. 'DspLowCutNumTaps',
  96. 'DSPLowCutFilterEnabled',
  97. 'HardwareSubSystemType',
  98. 'DspHighCutNumTaps',
  99. 'ADMaxValue',
  100. 'DspLowCutFrequency',
  101. 'DSPHighCutFilterEnabled',
  102. 'RecordSize',
  103. 'InputRange',
  104. 'DspHighCutFrequency',
  105. 'input_inverted',
  106. 'NumADChannels',
  107. 'DspHighCutFilterType',
  108. ]
  109. d = {k: info[k] for k in keys if k in info}
  110. signal_annotations.append(d)
  111. elif ext in ('nse', 'ntt'):
  112. # nse and ntt are pretty similar except for the wavform shape
  113. # a file can contain several unit_id (so several unit channel)
  114. assert chan_id not in self.nse_ntt_filenames, \
  115. 'Several nse or ntt files have the same unit_id!!!'
  116. self.nse_ntt_filenames[chan_id] = filename
  117. dtype = get_nse_or_ntt_dtype(info, ext)
  118. if (os.path.getsize(filename) <= HEADER_SIZE):
  119. self._empty_nse_ntt.append(filename)
  120. data = np.zeros((0,), dtype=dtype)
  121. else:
  122. data = np.memmap(filename, dtype=dtype, mode='r', offset=HEADER_SIZE)
  123. self._spike_memmap[chan_id] = data
  124. unit_ids = np.unique(data['unit_id'])
  125. for unit_id in unit_ids:
  126. # a spike channel for each (chan_id, unit_id)
  127. self.internal_unit_ids.append((chan_id, unit_id))
  128. unit_name = "ch{}#{}".format(chan_id, unit_id)
  129. unit_id = '{}'.format(unit_id)
  130. wf_units = 'uV'
  131. wf_gain = info['bit_to_microVolt'][idx]
  132. if info['input_inverted']:
  133. wf_gain *= -1
  134. wf_offset = 0.
  135. wf_left_sweep = -1 # NOT KNOWN
  136. wf_sampling_rate = info['sampling_rate']
  137. unit_channels.append(
  138. (unit_name, '{}'.format(unit_id), wf_units, wf_gain,
  139. wf_offset, wf_left_sweep, wf_sampling_rate))
  140. unit_annotations.append(dict(file_origin=filename))
  141. elif ext == 'nev':
  142. # an event channel
  143. # each ('event_id', 'ttl_input') give a new event channel
  144. self.nev_filenames[chan_id] = filename
  145. data = np.memmap(
  146. filename, dtype=nev_dtype, mode='r', offset=HEADER_SIZE)
  147. internal_ids = np.unique(
  148. data[['event_id', 'ttl_input']]).tolist()
  149. for internal_event_id in internal_ids:
  150. if internal_event_id not in self.internal_event_ids:
  151. event_id, ttl_input = internal_event_id
  152. name = '{} event_id={} ttl={}'.format(
  153. chan_name, event_id, ttl_input)
  154. event_channels.append((name, chan_id, 'event'))
  155. self.internal_event_ids.append(internal_event_id)
  156. self._nev_memmap[chan_id] = data
  157. sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
  158. unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
  159. event_channels = np.array(event_channels, dtype=_event_channel_dtype)
  160. if sig_channels.size > 0:
  161. sampling_rate = np.unique(sig_channels['sampling_rate'])
  162. assert sampling_rate.size == 1
  163. self._sigs_sampling_rate = sampling_rate[0]
  164. # read ncs files for gaps detection and nb_segment computation
  165. self.read_ncs_files(self.ncs_filenames)
  166. # timestamp limit in nev, nse
  167. # so need to scan all spike and event to
  168. ts0, ts1 = None, None
  169. for _data_memmap in (self._spike_memmap, self._nev_memmap):
  170. for chan_id, data in _data_memmap.items():
  171. ts = data['timestamp']
  172. if ts.size == 0:
  173. continue
  174. if ts0 is None:
  175. ts0 = ts[0]
  176. ts1 = ts[-1]
  177. ts0 = min(ts0, ts[0])
  178. ts1 = max(ts0, ts[-1])
  179. if self._timestamp_limits is None:
  180. # case NO ncs but HAVE nev or nse
  181. self._timestamp_limits = [(ts0, ts1)]
  182. self._seg_t_starts = [ts0 / 1e6]
  183. self._seg_t_stops = [ts1 / 1e6]
  184. self.global_t_start = ts0 / 1e6
  185. self.global_t_stop = ts1 / 1e6
  186. elif ts0 is not None:
  187. # case HAVE ncs AND HAVE nev or nse
  188. self.global_t_start = min(ts0 / 1e6, self._sigs_t_start[0])
  189. self.global_t_stop = max(ts1 / 1e6, self._sigs_t_stop[-1])
  190. self._seg_t_starts = list(self._sigs_t_start)
  191. self._seg_t_starts[0] = self.global_t_start
  192. self._seg_t_stops = list(self._sigs_t_stop)
  193. self._seg_t_stops[-1] = self.global_t_stop
  194. else:
  195. # case HAVE ncs but NO nev or nse
  196. self._seg_t_starts = self._sigs_t_start
  197. self._seg_t_stops = self._sigs_t_stop
  198. self.global_t_start = self._sigs_t_start[0]
  199. self.global_t_stop = self._sigs_t_stop[-1]
  200. # fille into header dict
  201. self.header = {}
  202. self.header['nb_block'] = 1
  203. self.header['nb_segment'] = [self._nb_segment]
  204. self.header['signal_channels'] = sig_channels
  205. self.header['unit_channels'] = unit_channels
  206. self.header['event_channels'] = event_channels
  207. # Annotations
  208. self._generate_minimal_annotations()
  209. bl_annotations = self.raw_annotations['blocks'][0]
  210. for seg_index in range(self._nb_segment):
  211. seg_annotations = bl_annotations['segments'][seg_index]
  212. for c in range(sig_channels.size):
  213. sig_ann = seg_annotations['signals'][c]
  214. sig_ann.update(signal_annotations[c])
  215. for c in range(unit_channels.size):
  216. unit_ann = seg_annotations['units'][c]
  217. unit_ann.update(unit_annotations[c])
  218. for c in range(event_channels.size):
  219. # annotations for channel events
  220. event_id, ttl_input = self.internal_event_ids[c]
  221. chan_id = event_channels[c]['id']
  222. ev_ann = seg_annotations['events'][c]
  223. ev_ann['file_origin'] = self.nev_filenames[chan_id]
  224. # ~ ev_ann['marker_id'] =
  225. # ~ ev_ann['nttl'] =
  226. # ~ ev_ann['digital_marker'] =
  227. # ~ ev_ann['analog_marker'] =
  228. def _segment_t_start(self, block_index, seg_index):
  229. return self._seg_t_starts[seg_index] - self.global_t_start
  230. def _segment_t_stop(self, block_index, seg_index):
  231. return self._seg_t_stops[seg_index] - self.global_t_start
  232. def _get_signal_size(self, block_index, seg_index, channel_indexes):
  233. return self._sigs_length[seg_index]
  234. def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
  235. return self._sigs_t_start[seg_index] - self.global_t_start
  236. def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
  237. if i_start is None:
  238. i_start = 0
  239. if i_stop is None:
  240. i_stop = self._sigs_length[seg_index]
  241. block_start = i_start // BLOCK_SIZE
  242. block_stop = i_stop // BLOCK_SIZE + 1
  243. sl0 = i_start % 512
  244. sl1 = sl0 + (i_stop - i_start)
  245. if channel_indexes is None:
  246. channel_indexes = slice(None)
  247. channel_ids = self.header['signal_channels'][channel_indexes]['id']
  248. sigs_chunk = np.zeros((i_stop - i_start, len(channel_ids)), dtype='int16')
  249. for i, chan_id in enumerate(channel_ids):
  250. data = self._sigs_memmap[seg_index][chan_id]
  251. sub = data[block_start:block_stop]
  252. sigs_chunk[:, i] = sub['samples'].flatten()[sl0:sl1]
  253. return sigs_chunk
  254. def _spike_count(self, block_index, seg_index, unit_index):
  255. chan_id, unit_id = self.internal_unit_ids[unit_index]
  256. data = self._spike_memmap[chan_id]
  257. ts = data['timestamp']
  258. ts0, ts1 = self._timestamp_limits[seg_index]
  259. keep = (ts >= ts0) & (ts <= ts1) & (unit_id == data['unit_id'])
  260. nb_spike = int(data[keep].size)
  261. return nb_spike
  262. def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
  263. chan_id, unit_id = self.internal_unit_ids[unit_index]
  264. data = self._spike_memmap[chan_id]
  265. ts = data['timestamp']
  266. ts0, ts1 = self._timestamp_limits[seg_index]
  267. if t_start is not None:
  268. ts0 = int((t_start + self.global_t_start) * 1e6)
  269. if t_start is not None:
  270. ts1 = int((t_stop + self.global_t_start) * 1e6)
  271. keep = (ts >= ts0) & (ts <= ts1) & (unit_id == data['unit_id'])
  272. timestamps = ts[keep]
  273. return timestamps
  274. def _rescale_spike_timestamp(self, spike_timestamps, dtype):
  275. spike_times = spike_timestamps.astype(dtype)
  276. spike_times /= 1e6
  277. spike_times -= self.global_t_start
  278. return spike_times
  279. def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index,
  280. t_start, t_stop):
  281. chan_id, unit_id = self.internal_unit_ids[unit_index]
  282. data = self._spike_memmap[chan_id]
  283. ts = data['timestamp']
  284. ts0, ts1 = self._timestamp_limits[seg_index]
  285. if t_start is not None:
  286. ts0 = int((t_start + self.global_t_start) * 1e6)
  287. if t_start is not None:
  288. ts1 = int((t_stop + self.global_t_start) * 1e6)
  289. keep = (ts >= ts0) & (ts <= ts1) & (unit_id == data['unit_id'])
  290. wfs = data[keep]['samples']
  291. if wfs.ndim == 2:
  292. # case for nse
  293. waveforms = wfs[:, None, :]
  294. else:
  295. # case for ntt change (n, 32, 4) to (n, 4, 32)
  296. waveforms = wfs.swapaxes(1, 2)
  297. return waveforms
  298. def _event_count(self, block_index, seg_index, event_channel_index):
  299. event_id, ttl_input = self.internal_event_ids[event_channel_index]
  300. chan_id = self.header['event_channels'][event_channel_index]['id']
  301. data = self._nev_memmap[chan_id]
  302. ts0, ts1 = self._timestamp_limits[seg_index]
  303. ts = data['timestamp']
  304. keep = (ts >= ts0) & (ts <= ts1) & (data['event_id'] == event_id) & \
  305. (data['ttl_input'] == ttl_input)
  306. nb_event = int(data[keep].size)
  307. return nb_event
  308. def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
  309. event_id, ttl_input = self.internal_event_ids[event_channel_index]
  310. chan_id = self.header['event_channels'][event_channel_index]['id']
  311. data = self._nev_memmap[chan_id]
  312. ts0, ts1 = self._timestamp_limits[seg_index]
  313. if t_start is not None:
  314. ts0 = int((t_start + self.global_t_start) * 1e6)
  315. if t_start is not None:
  316. ts1 = int((t_stop + self.global_t_start) * 1e6)
  317. ts = data['timestamp']
  318. keep = (ts >= ts0) & (ts <= ts1) & (data['event_id'] == event_id) & \
  319. (data['ttl_input'] == ttl_input)
  320. subdata = data[keep]
  321. timestamps = subdata['timestamp']
  322. labels = subdata['event_string'].astype('U')
  323. durations = None
  324. return timestamps, durations, labels
  325. def _rescale_event_timestamp(self, event_timestamps, dtype):
  326. event_times = event_timestamps.astype(dtype)
  327. event_times /= 1e6
  328. event_times -= self.global_t_start
  329. return event_times
  330. def read_ncs_files(self, ncs_filenames):
  331. """
  332. Given a list of ncs files contrsuct:
  333. * self._sigs_memmap = [ {} for seg_index in range(self._nb_segment) ]
  334. * self._sigs_t_start = []
  335. * self._sigs_t_stop = []
  336. * self._sigs_length = []
  337. * self._nb_segment
  338. * self._timestamp_limits
  339. The first file is read entirely to detect gaps in timestamp.
  340. each gap lead to a new segment.
  341. Other files are not read entirely but we check than gaps
  342. are at the same place.
  343. gap_indexes can be given (when cached) to avoid full read.
  344. """
  345. if len(ncs_filenames) == 0:
  346. self._nb_segment = 1
  347. self._timestamp_limits = None
  348. return
  349. good_delta = int(BLOCK_SIZE * 1e6 / self._sigs_sampling_rate)
  350. chan_id0 = list(ncs_filenames.keys())[0]
  351. filename0 = ncs_filenames[chan_id0]
  352. data0 = np.memmap(filename0, dtype=ncs_dtype, mode='r', offset=HEADER_SIZE)
  353. gap_indexes = None
  354. if self.use_cache:
  355. gap_indexes = self._cache.get('gap_indexes')
  356. # detect gaps on first file
  357. if gap_indexes is None:
  358. # this can be long!!!!
  359. timestamps0 = data0['timestamp']
  360. deltas0 = np.diff(timestamps0)
  361. # It should be that:
  362. # gap_indexes, = np.nonzero(deltas0!=good_delta)
  363. # but for a file I have found many deltas0==15999 deltas0==16000
  364. # I guess this is a round problem
  365. # So this is the same with a tolerance of 1 or 2 ticks
  366. mask = deltas0 != good_delta
  367. for tolerance in (1, 2):
  368. mask &= (deltas0 != good_delta - tolerance)
  369. mask &= (deltas0 != good_delta + tolerance)
  370. gap_indexes, = np.nonzero(mask)
  371. if self.use_cache:
  372. self.add_in_cache(gap_indexes=gap_indexes)
  373. gap_bounds = [0] + (gap_indexes + 1).tolist() + [data0.size]
  374. self._nb_segment = len(gap_bounds) - 1
  375. self._sigs_memmap = [{} for seg_index in range(self._nb_segment)]
  376. self._sigs_t_start = []
  377. self._sigs_t_stop = []
  378. self._sigs_length = []
  379. self._timestamp_limits = []
  380. # create segment with subdata block/t_start/t_stop/length
  381. for chan_id, ncs_filename in self.ncs_filenames.items():
  382. data = np.memmap(ncs_filename, dtype=ncs_dtype, mode='r', offset=HEADER_SIZE)
  383. assert data.size == data0.size, 'ncs files do not have the same data length'
  384. for seg_index in range(self._nb_segment):
  385. i0 = gap_bounds[seg_index]
  386. i1 = gap_bounds[seg_index + 1]
  387. assert data[i0]['timestamp'] == data0[i0][
  388. 'timestamp'], 'ncs files do not have the same gaps'
  389. assert data[i1 - 1]['timestamp'] == data0[i1 - 1][
  390. 'timestamp'], 'ncs files do not have the same gaps'
  391. subdata = data[i0:i1]
  392. self._sigs_memmap[seg_index][chan_id] = subdata
  393. if chan_id == chan_id0:
  394. ts0 = subdata[0]['timestamp']
  395. ts1 = subdata[-1]['timestamp'] + \
  396. np.uint64(BLOCK_SIZE / self._sigs_sampling_rate * 1e6)
  397. self._timestamp_limits.append((ts0, ts1))
  398. t_start = ts0 / 1e6
  399. self._sigs_t_start.append(t_start)
  400. t_stop = ts1 / 1e6
  401. self._sigs_t_stop.append(t_stop)
  402. length = subdata.size * BLOCK_SIZE
  403. self._sigs_length.append(length)
  404. # Keys funcitons
  405. def _to_bool(txt):
  406. if txt == 'True':
  407. return True
  408. elif txt == 'False':
  409. return False
  410. else:
  411. raise Exception('Can not convert %s to bool' % (txt))
  412. # keys in
  413. txt_header_keys = [
  414. ('AcqEntName', 'channel_names', None), # used
  415. ('FileType', '', None),
  416. ('FileVersion', '', None),
  417. ('RecordSize', '', None),
  418. ('HardwareSubSystemName', '', None),
  419. ('HardwareSubSystemType', '', None),
  420. ('SamplingFrequency', 'sampling_rate', float), # used
  421. ('ADMaxValue', '', None),
  422. ('ADBitVolts', 'bit_to_microVolt', None), # used
  423. ('NumADChannels', '', None),
  424. ('ADChannel', 'channel_ids', None), # used
  425. ('InputRange', '', None),
  426. ('InputInverted', 'input_inverted', _to_bool), # used
  427. ('DSPLowCutFilterEnabled', '', None),
  428. ('DspLowCutFrequency', '', None),
  429. ('DspLowCutNumTaps', '', None),
  430. ('DspLowCutFilterType', '', None),
  431. ('DSPHighCutFilterEnabled', '', None),
  432. ('DspHighCutFrequency', '', None),
  433. ('DspHighCutNumTaps', '', None),
  434. ('DspHighCutFilterType', '', None),
  435. ('DspDelayCompensation', '', None),
  436. ('DspFilterDelay_µs', '', None),
  437. ('DisabledSubChannels', '', None),
  438. ('WaveformLength', '', int),
  439. ('AlignmentPt', '', None),
  440. ('ThreshVal', '', None),
  441. ('MinRetriggerSamples', '', None),
  442. ('SpikeRetriggerTime', '', None),
  443. ('DualThresholding', '', None),
  444. (r'Feature \w+ \d+', '', None),
  445. ('SessionUUID', '', None),
  446. ('FileUUID', '', None),
  447. ('CheetahRev', 'version', None), # used possibilty 1 for version
  448. ('ProbeName', '', None),
  449. ('OriginalFileName', '', None),
  450. ('TimeCreated', '', None),
  451. ('TimeClosed', '', None),
  452. ('ApplicationName Cheetah', 'version', None), # used possibilty 2 for version
  453. ('AcquisitionSystem', '', None),
  454. ('ReferenceChannel', '', None),
  455. ]
  456. def read_txt_header(filename):
  457. """
  458. All file in neuralynx contains a 16kB hedaer in txt
  459. format.
  460. This function parse it to create info dict.
  461. This include datetime
  462. """
  463. with open(filename, 'rb') as f:
  464. txt_header = f.read(HEADER_SIZE)
  465. txt_header = txt_header.strip(b'\x00').decode('latin-1')
  466. # find keys
  467. info = OrderedDict()
  468. for k1, k2, type_ in txt_header_keys:
  469. pattern = r'-(?P<name>' + k1 + r') (?P<value>[\S ]*)'
  470. matches = re.findall(pattern, txt_header)
  471. for match in matches:
  472. if k2 == '':
  473. name = match[0]
  474. else:
  475. name = k2
  476. value = match[1].rstrip(' ')
  477. if type_ is not None:
  478. value = type_(value)
  479. info[name] = value
  480. # if channel_ids or s not in info then the filename is used
  481. name = os.path.splitext(os.path.basename(filename))[0]
  482. # convert channel ids
  483. if 'channel_ids' in info:
  484. chid_entries = re.findall(r'\w+', info['channel_ids'])
  485. info['channel_ids'] = [int(c) for c in chid_entries]
  486. else:
  487. info['channel_ids'] = [name]
  488. # convert channel names
  489. if 'channel_names' in info:
  490. name_entries = re.findall(r'\w+', info['channel_names'])
  491. if len(name_entries) == 1:
  492. info['channel_names'] = name_entries * len(info['channel_ids'])
  493. assert len(info['channel_names']) == len(info['channel_ids']), \
  494. 'Number of channel ids does not match channel names.'
  495. else:
  496. info['channel_names'] = [name] * len(info['channel_ids'])
  497. if 'version' in info:
  498. version = info['version'].replace('"', '')
  499. info['version'] = distutils.version.LooseVersion(version)
  500. # convert bit_to_microvolt
  501. if 'bit_to_microVolt' in info:
  502. btm_entries = re.findall(r'\S+', info['bit_to_microVolt'])
  503. if len(btm_entries) == 1:
  504. btm_entries = btm_entries * len(info['channel_ids'])
  505. info['bit_to_microVolt'] = [float(e) * 1e6 for e in btm_entries]
  506. assert len(info['bit_to_microVolt']) == len(info['channel_ids']), \
  507. 'Number of channel ids does not match bit_to_microVolt conversion factors.'
  508. if 'InputRange' in info:
  509. ir_entries = re.findall(r'\w+', info['InputRange'])
  510. if len(ir_entries) == 1:
  511. info['InputRange'] = [int(ir_entries[0])] * len(chid_entries)
  512. else:
  513. info['InputRange'] = [int(e) for e in ir_entries]
  514. assert len(info['InputRange']) == len(chid_entries), \
  515. 'Number of channel ids does not match input range values.'
  516. # filename and datetime
  517. if info['version'] <= distutils.version.LooseVersion('5.6.4'):
  518. datetime1_regex = r'## Time Opened \(m/d/y\): (?P<date>\S+) \(h:m:s\.ms\) (?P<time>\S+)'
  519. datetime2_regex = r'## Time Closed \(m/d/y\): (?P<date>\S+) \(h:m:s\.ms\) (?P<time>\S+)'
  520. filename_regex = r'## File Name (?P<filename>\S+)'
  521. datetimeformat = '%m/%d/%Y %H:%M:%S.%f'
  522. else:
  523. datetime1_regex = r'-TimeCreated (?P<date>\S+) (?P<time>\S+)'
  524. datetime2_regex = r'-TimeClosed (?P<date>\S+) (?P<time>\S+)'
  525. filename_regex = r'-OriginalFileName "?(?P<filename>\S+)"?'
  526. datetimeformat = '%Y/%m/%d %H:%M:%S'
  527. original_filename = re.search(filename_regex, txt_header).groupdict()['filename']
  528. dt1 = re.search(datetime1_regex, txt_header).groupdict()
  529. dt2 = re.search(datetime2_regex, txt_header).groupdict()
  530. info['recording_opened'] = datetime.datetime.strptime(
  531. dt1['date'] + ' ' + dt1['time'], datetimeformat)
  532. info['recording_closed'] = datetime.datetime.strptime(
  533. dt2['date'] + ' ' + dt2['time'], datetimeformat)
  534. return info
  535. ncs_dtype = [('timestamp', 'uint64'), ('channel_id', 'uint32'), ('sample_rate', 'uint32'),
  536. ('nb_valid', 'uint32'), ('samples', 'int16', (BLOCK_SIZE,))]
  537. nev_dtype = [
  538. ('reserved', '<i2'),
  539. ('system_id', '<i2'),
  540. ('data_size', '<i2'),
  541. ('timestamp', '<u8'),
  542. ('event_id', '<i2'),
  543. ('ttl_input', '<i2'),
  544. ('crc_check', '<i2'),
  545. ('dummy1', '<i2'),
  546. ('dummy2', '<i2'),
  547. ('extra', '<i4', (8,)),
  548. ('event_string', 'S128'),
  549. ]
  550. def get_nse_or_ntt_dtype(info, ext):
  551. """
  552. For NSE and NTT the dtype depend on the header.
  553. """
  554. dtype = [('timestamp', 'uint64'), ('channel_id', 'uint32'), ('unit_id', 'uint32')]
  555. # count feature
  556. nb_feature = 0
  557. for k in info.keys():
  558. if k.startswith('Feature '):
  559. nb_feature += 1
  560. dtype += [('features', 'int32', (nb_feature,))]
  561. # count sample
  562. if ext == 'nse':
  563. nb_sample = info['WaveformLength']
  564. dtype += [('samples', 'int16', (nb_sample,))]
  565. elif ext == 'ntt':
  566. nb_sample = info['WaveformLength']
  567. nb_chan = 4 # check this if not tetrode
  568. dtype += [('samples', 'int16', (nb_sample, nb_chan))]
  569. return dtype