neuroshareapiio.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. """
  2. Class for "reading" data from Neuroshare compatible files (check neuroshare.org)
  3. It runs through the whole file and searches for: analog signals, spike cutouts,
  4. and trigger events (without duration)
  5. Depends on: Neuroshare API 0.9.1, numpy 1.6.1, quantities 0.10.1
  6. Supported: Read
  7. Author: Andre Maia Chagas
  8. """
  9. # note neo.core needs only numpy and quantities
  10. import numpy as np
  11. import quantities as pq
  12. import os
  13. # check to see if the neuroshare bindings are properly imported
  14. try:
  15. import neuroshare as ns
  16. except ImportError as err:
  17. print(err)
  18. # print('\n neuroshare library not found, loading data will not work!' )
  19. # print('\n be sure to install the library found at:')
  20. # print('\n www.http://pythonhosted.org/neuroshare/')
  21. else:
  22. pass
  23. # print('neuroshare library successfully imported')
  24. # import BaseIO
  25. from neo.io.baseio import BaseIO
  26. # import objects from neo.core
  27. from neo.core import Segment, AnalogSignal, SpikeTrain, Event, Epoch
  28. # create an object based on BaseIO
  29. class NeuroshareapiIO(BaseIO):
  30. # setting some class parameters
  31. is_readable = True # This class can only read data
  32. is_writable = False # write is not supported
  33. supported_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
  34. has_header = False
  35. is_streameable = False
  36. readable_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
  37. # This class is not able to write objects
  38. writeable_objects = []
  39. # # This is for GUI stuff : a definition for parameters when reading.
  40. # # This dict should be keyed by object (`Block`). Each entry is a list
  41. # # of tuple. The first entry in each tuple is the parameter name. The
  42. # # second entry is a dict with keys 'value' (for default value),
  43. # # and 'label' (for a descriptive name).
  44. # # Note that if the highest-level object requires parameters,
  45. # # common_io_test will be skipped.
  46. read_params = {
  47. Segment: [
  48. ("segment_duration", {"value": 0., "label": "Segment size (s.)"}),
  49. ("t_start", {"value": 0., "label": "start reading (s.)"}),
  50. # ("num_analogsignal",
  51. # {'value" : 8, "label" : "Number of recording points"}),
  52. # ("num_spiketrain_by_channel',
  53. # {"value" : 3, "label" : "Num of spiketrains"}),
  54. ],
  55. }
  56. #
  57. # do not supported write so no GUI stuff
  58. write_params = None
  59. name = "Neuroshare"
  60. extensions = []
  61. # This object operates on neuroshare files
  62. mode = "file"
  63. def __init__(self, filename=None, dllpath=None):
  64. """
  65. Arguments:
  66. filename : the filename
  67. The init function will run automatically upon calling of the class, as
  68. in: test = MultichannelIO(filename = filetoberead.mcd), therefore the first
  69. operations with the file are set here, so that the user doesn't have to
  70. remember to use another method, than the ones defined in the NEO library
  71. """
  72. BaseIO.__init__(self)
  73. self.filename = filename
  74. # set the flags for each event type
  75. eventID = 1
  76. analogID = 2
  77. epochID = 3
  78. # if a filename was given, create a dictionary with information that will
  79. # be needed later on.
  80. if self.filename is not None:
  81. if dllpath is not None:
  82. name = os.path.splitext(os.path.basename(dllpath))[0]
  83. library = ns.Library(name, dllpath)
  84. else:
  85. library = None
  86. self.fd = ns.File(self.filename, library=library)
  87. # get all the metadata from file
  88. self.metadata = self.fd.metadata_raw
  89. # get sampling rate
  90. self.metadata["sampRate"] = 1. / self.metadata["TimeStampResolution"] # hz
  91. # create lists and array for electrode, spike cutouts and trigger channels
  92. self.metadata["elecChannels"] = list()
  93. self.metadata["elecChanId"] = list()
  94. self.metadata["num_analogs"] = 0
  95. self.metadata["spkChannels"] = list()
  96. self.metadata["spkChanId"] = list()
  97. self.metadata["num_spkChans"] = 0
  98. self.metadata["triggers"] = list()
  99. self.metadata["triggersId"] = list()
  100. self.metadata["num_trigs"] = 0
  101. self.metadata["digital epochs"] = list()
  102. self.metadata["digiEpochId"] = list()
  103. self.metadata["num_digiEpochs"] = 0
  104. # loop through all entities in file to get the indexes for each entity
  105. # type, so that one can run through the indexes later, upon reading the
  106. # segment
  107. for entity in self.fd.entities:
  108. # if entity is analog and not the digital line recording
  109. # (stored as analog in neuroshare files)
  110. if entity.entity_type == analogID and entity.label[0:4] != "digi":
  111. # get the electrode number
  112. self.metadata["elecChannels"].append(entity.label[-4:])
  113. # get the electrode index
  114. self.metadata["elecChanId"].append(entity.id)
  115. # increase the number of electrodes found
  116. self.metadata["num_analogs"] += 1
  117. # if the entity is a event entitiy and a trigger
  118. if entity.entity_type == eventID and entity.label[0:4] == "trig":
  119. # get the digital bit/trigger number
  120. self.metadata["triggers"].append(entity.label[0:4] + entity.label[-4:])
  121. # get the digital bit index
  122. self.metadata["triggersId"].append(entity.id)
  123. # increase the number of triggers found
  124. self.metadata["num_trigs"] += 1
  125. # if the entity is non triggered digital values with duration
  126. if entity.entity_type == eventID and entity.label[0:4] == "digi":
  127. # get the digital bit number
  128. self.metadata["digital epochs"].append(entity.label[-5:])
  129. # get the digital bit index
  130. self.metadata["digiEpochId"].append(entity.id)
  131. # increase the number of triggers found
  132. self.metadata["num_digiEpochs"] += 1
  133. # if the entity is spike cutouts
  134. if entity.entity_type == epochID and entity.label[0:4] == "spks":
  135. self.metadata["spkChannels"].append(entity.label[-4:])
  136. self.metadata["spkChanId"].append(entity.id)
  137. self.metadata["num_spkChans"] += 1
  138. # function to create a block and read in a segment
  139. # def create_block(self,
  140. #
  141. # ):
  142. #
  143. # blk=Block(name = self.fileName+"_segment:",
  144. # file_datetime = str(self.metadata_raw["Time_Day"])+"/"+
  145. # str(self.metadata_raw["Time_Month"])+"/"+
  146. # str(self.metadata_raw["Time_Year"])+"_"+
  147. # str(self.metadata_raw["Time_Hour"])+":"+
  148. # str(self.metadata_raw["Time_Min"]))
  149. #
  150. # blk.rec_datetime = blk.file_datetime
  151. # return blk
  152. # create function to read segment
  153. def read_segment(self,
  154. lazy=False,
  155. # all following arguments are decided by this IO and are free
  156. t_start=0.,
  157. segment_duration=0.,
  158. ):
  159. """
  160. Return a Segment containing all analog and spike channels, as well as
  161. all trigger events.
  162. Parameters:
  163. segment_duration :is the size in secend of the segment.
  164. num_analogsignal : number of AnalogSignal in this segment
  165. num_spiketrain : number of SpikeTrain in this segment
  166. """
  167. assert not lazy, 'Do not support lazy'
  168. # if no segment duration is given, use the complete file
  169. if segment_duration == 0.:
  170. segment_duration = float(self.metadata["TimeSpan"])
  171. # if the segment duration is bigger than file, use the complete file
  172. if segment_duration >= float(self.metadata["TimeSpan"]):
  173. segment_duration = float(self.metadata["TimeSpan"])
  174. # if the time sum of start point and segment duration is bigger than
  175. # the file time span, cap it at the end
  176. if segment_duration + t_start > float(self.metadata["TimeSpan"]):
  177. segment_duration = float(self.metadata["TimeSpan"]) - t_start
  178. # create an empty segment
  179. seg = Segment(name="segment from the NeuroshareapiIO")
  180. # read nested analosignal
  181. if self.metadata["num_analogs"] == 0:
  182. print("no analog signals in this file!")
  183. else:
  184. # run through the number of analog channels found at the __init__ function
  185. for i in range(self.metadata["num_analogs"]):
  186. # create an analog signal object for each channel found
  187. ana = self.read_analogsignal(channel_index=self.metadata["elecChanId"][i],
  188. segment_duration=segment_duration, t_start=t_start)
  189. # add analog signal read to segment object
  190. seg.analogsignals += [ana]
  191. # read triggers (in this case without any duration)
  192. for i in range(self.metadata["num_trigs"]):
  193. # create event object for each trigger/bit found
  194. eva = self.read_event(channel_index=self.metadata["triggersId"][i],
  195. segment_duration=segment_duration,
  196. t_start=t_start, )
  197. # add event object to segment
  198. seg.events += [eva]
  199. # read epochs (digital events with duration)
  200. for i in range(self.metadata["num_digiEpochs"]):
  201. # create event object for each trigger/bit found
  202. epa = self.read_epoch(channel_index=self.metadata["digiEpochId"][i],
  203. segment_duration=segment_duration,
  204. t_start=t_start, )
  205. # add event object to segment
  206. seg.epochs += [epa]
  207. # read nested spiketrain
  208. # run through all spike channels found
  209. for i in range(self.metadata["num_spkChans"]):
  210. # create spike object
  211. sptr = self.read_spiketrain(channel_index=self.metadata["spkChanId"][i],
  212. segment_duration=segment_duration,
  213. t_start=t_start)
  214. # add the spike object to segment
  215. seg.spiketrains += [sptr]
  216. seg.create_many_to_one_relationship()
  217. return seg
  218. """
  219. With this IO AnalogSignal can be accessed directly with its channel number
  220. """
  221. def read_analogsignal(self,
  222. lazy=False,
  223. # channel index as given by the neuroshare API
  224. channel_index=0,
  225. # time in seconds to be read
  226. segment_duration=0.,
  227. # time in seconds to start reading from
  228. t_start=0.,
  229. ):
  230. assert not lazy, 'Do not support lazy'
  231. # some controls:
  232. # if no segment duration is given, use the complete file
  233. if segment_duration == 0.:
  234. segment_duration = float(self.metadata["TimeSpan"])
  235. # if the segment duration is bigger than file, use the complete file
  236. if segment_duration >= float(self.metadata["TimeSpan"]):
  237. segment_duration = float(self.metadata["TimeSpan"])
  238. # get the analog object
  239. sig = self.fd.get_entity(channel_index)
  240. # get the units (V, mV etc)
  241. sigUnits = sig.units
  242. # get the electrode number
  243. chanName = sig.label[-4:]
  244. # transform t_start into index (reading will start from this index)
  245. startat = int(t_start * self.metadata["sampRate"])
  246. # get the number of bins to read in
  247. bins = int(segment_duration * self.metadata["sampRate"])
  248. # if the number of bins to read is bigger than
  249. # the total number of bins, read only till the end of analog object
  250. if startat + bins > sig.item_count:
  251. bins = sig.item_count - startat
  252. # read the data from the sig object
  253. sig, _, _ = sig.get_data(index=startat, count=bins)
  254. # store it to the 'AnalogSignal' object
  255. anasig = AnalogSignal(sig, units=sigUnits, sampling_rate=self.metadata["sampRate"] * pq.Hz,
  256. t_start=t_start * pq.s,
  257. t_stop=(t_start + segment_duration) * pq.s,
  258. channel_index=channel_index)
  259. # annotate from which electrode the signal comes from
  260. anasig.annotate(info="signal from channel %s" % chanName)
  261. return anasig
  262. # function to read spike trains
  263. def read_spiketrain(self,
  264. lazy=False,
  265. channel_index=0,
  266. segment_duration=0.,
  267. t_start=0.):
  268. """
  269. Function to read in spike trains. This API still does not support read in of
  270. specific channels as they are recorded. rather the fuunction gets the entity set
  271. by 'channel_index' which is set in the __init__ function (all spike channels)
  272. """
  273. assert not lazy, 'Do not support lazy'
  274. # sampling rate
  275. sr = self.metadata["sampRate"]
  276. # create a list to store spiketrain times
  277. times = list()
  278. # get the spike data from a specific channel index
  279. tempSpks = self.fd.get_entity(channel_index)
  280. # transform t_start into index (reading will start from this index)
  281. startat = tempSpks.get_index_by_time(t_start, 0) # zero means closest index to value
  282. # get the last index to read, using segment duration and t_start
  283. # -1 means last index before time
  284. endat = tempSpks.get_index_by_time(float(segment_duration + t_start), -1)
  285. numIndx = endat - startat
  286. # get the end point using segment duration
  287. # create a numpy empty array to store the waveforms
  288. waveforms = np.array(np.zeros([numIndx, tempSpks.max_sample_count]))
  289. # loop through the data from the specific channel index
  290. for i in range(startat, endat, 1):
  291. # get cutout, timestamp, cutout duration, and spike unit
  292. tempCuts, timeStamp, duration, unit = tempSpks.get_data(i)
  293. # save the cutout in the waveform matrix
  294. waveforms[i] = tempCuts[0]
  295. # append time stamp to list
  296. times.append(timeStamp)
  297. # create a spike train object
  298. spiketr = SpikeTrain(times, units=pq.s,
  299. t_stop=t_start + segment_duration,
  300. t_start=t_start * pq.s,
  301. name="spikes from electrode" + tempSpks.label[-3:],
  302. waveforms=waveforms * pq.volt,
  303. sampling_rate=sr * pq.Hz,
  304. file_origin=self.filename,
  305. annotate=("channel_index:" + str(channel_index)))
  306. return spiketr
  307. def read_event(self, lazy=False, channel_index=0,
  308. t_start=0.,
  309. segment_duration=0.):
  310. """function to read digital timestamps. this function only reads the event
  311. onset. to get digital event durations, use the epoch function (to be implemented)."""
  312. assert not lazy, 'Do not support lazy'
  313. # create temporary empty lists to store data
  314. tempNames = list()
  315. tempTimeStamp = list()
  316. # get entity from file
  317. trigEntity = self.fd.get_entity(channel_index)
  318. # transform t_start into index (reading will start from this index)
  319. startat = trigEntity.get_index_by_time(t_start, 0) # zero means closest index to value
  320. # get the last index to read, using segment duration and t_start
  321. endat = trigEntity.get_index_by_time(
  322. float(segment_duration + t_start), -1) # -1 means last index before time
  323. # numIndx = endat-startat
  324. # run through specified intervals in entity
  325. for i in range(startat, endat + 1, 1): # trigEntity.item_count):
  326. # get in which digital bit was the trigger detected
  327. tempNames.append(trigEntity.label[-8:])
  328. # get the time stamps of onset events
  329. tempData, onOrOff = trigEntity.get_data(i)
  330. # if this was an onset event, save it to the list
  331. # on triggered recordings it seems that only onset events are
  332. # recorded. On continuous recordings both onset(==1)
  333. # and offset(==255) seem to be recorded
  334. if onOrOff == 1:
  335. # append the time stamp to them empty list
  336. tempTimeStamp.append(tempData)
  337. # create an event array
  338. eva = Event(labels=np.array(tempNames, dtype="U"),
  339. times=np.array(tempTimeStamp) * pq.s,
  340. file_origin=self.filename,
  341. description="the trigger events (without durations)")
  342. return eva
  343. def read_epoch(self, lazy=False,
  344. channel_index=0,
  345. t_start=0.,
  346. segment_duration=0.):
  347. """function to read digital timestamps. this function reads the event
  348. onset and offset and outputs onset and duration. to get only onsets use
  349. the event array function"""
  350. assert not lazy, 'Do not support lazy'
  351. # create temporary empty lists to store data
  352. tempNames = list()
  353. tempTimeStamp = list()
  354. durations = list()
  355. # get entity from file
  356. digEntity = self.fd.get_entity(channel_index)
  357. # transform t_start into index (reading will start from this index)
  358. startat = digEntity.get_index_by_time(t_start, 0) # zero means closest index to value
  359. # get the last index to read, using segment duration and t_start
  360. # -1 means last index before time
  361. endat = digEntity.get_index_by_time(float(segment_duration + t_start), -1)
  362. # run through entity using only odd "i"s
  363. for i in range(startat, endat + 1, 1):
  364. if i % 2 == 1:
  365. # get in which digital bit was the trigger detected
  366. tempNames.append(digEntity.label[-8:])
  367. # get the time stamps of even events
  368. tempData, onOrOff = digEntity.get_data(i - 1)
  369. # if this was an onset event, save it to the list
  370. # on triggered recordings it seems that only onset events are
  371. # recorded. On continuous recordings both onset(==1)
  372. # and offset(==255) seem to be recorded
  373. # if onOrOff == 1:
  374. # append the time stamp to them empty list
  375. tempTimeStamp.append(tempData)
  376. # get time stamps of odd events
  377. tempData1, onOrOff = digEntity.get_data(i)
  378. # if onOrOff == 255:
  379. # pass
  380. durations.append(tempData1 - tempData)
  381. epa = Epoch(file_origin=self.filename,
  382. times=np.array(tempTimeStamp) * pq.s,
  383. durations=np.array(durations) * pq.s,
  384. labels=np.array(tempNames, dtype="U"),
  385. description="digital events with duration")
  386. return epa