Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

neuroshareapiio.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. """
  2. Class for "reading" data from Neuroshare compatible files (check neuroshare.org)
  3. It runs through the whole file and searches for: analog signals, spike cutouts,
  4. and trigger events (without duration)
  5. Depends on: Neuroshare API 0.9.1, numpy 1.6.1, quantities 0.10.1
  6. Supported: Read
  7. Author: Andre Maia Chagas
  8. """
  9. # needed for python 3 compatibility
  10. from __future__ import absolute_import
  11. # note neo.core needs only numpy and quantities
  12. import numpy as np
  13. import quantities as pq
  14. import os
  15. # check to see if the neuroshare bindings are properly imported
  16. try:
  17. import neuroshare as ns
  18. except ImportError as err:
  19. print(err)
  20. # print('\n neuroshare library not found, loading data will not work!' )
  21. # print('\n be sure to install the library found at:')
  22. # print('\n www.http://pythonhosted.org/neuroshare/')
  23. else:
  24. pass
  25. # print('neuroshare library successfully imported')
  26. # import BaseIO
  27. from neo.io.baseio import BaseIO
  28. # import objects from neo.core
  29. from neo.core import Segment, AnalogSignal, SpikeTrain, Event, Epoch
  30. # create an object based on BaseIO
  31. class NeuroshareapiIO(BaseIO):
  32. # setting some class parameters
  33. is_readable = True # This class can only read data
  34. is_writable = False # write is not supported
  35. supported_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
  36. has_header = False
  37. is_streameable = False
  38. readable_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
  39. # This class is not able to write objects
  40. writeable_objects = []
  41. # # This is for GUI stuff : a definition for parameters when reading.
  42. # # This dict should be keyed by object (`Block`). Each entry is a list
  43. # # of tuple. The first entry in each tuple is the parameter name. The
  44. # # second entry is a dict with keys 'value' (for default value),
  45. # # and 'label' (for a descriptive name).
  46. # # Note that if the highest-level object requires parameters,
  47. # # common_io_test will be skipped.
  48. read_params = {
  49. Segment: [
  50. ("segment_duration", {"value": 0., "label": "Segment size (s.)"}),
  51. ("t_start", {"value": 0., "label": "start reading (s.)"}),
  52. # ("num_analogsignal",
  53. # {'value" : 8, "label" : "Number of recording points"}),
  54. # ("num_spiketrain_by_channel',
  55. # {"value" : 3, "label" : "Num of spiketrains"}),
  56. ],
  57. }
  58. #
  59. # do not supported write so no GUI stuff
  60. write_params = None
  61. name = "Neuroshare"
  62. extensions = []
  63. # This object operates on neuroshare files
  64. mode = "file"
  65. def __init__(self, filename=None, dllpath=None):
  66. """
  67. Arguments:
  68. filename : the filename
  69. The init function will run automatically upon calling of the class, as
  70. in: test = MultichannelIO(filename = filetoberead.mcd), therefore the first
  71. operations with the file are set here, so that the user doesn't have to
  72. remember to use another method, than the ones defined in the NEO library
  73. """
  74. BaseIO.__init__(self)
  75. self.filename = filename
  76. # set the flags for each event type
  77. eventID = 1
  78. analogID = 2
  79. epochID = 3
  80. # if a filename was given, create a dictionary with information that will
  81. # be needed later on.
  82. if self.filename is not None:
  83. if dllpath is not None:
  84. name = os.path.splitext(os.path.basename(dllpath))[0]
  85. library = ns.Library(name, dllpath)
  86. else:
  87. library = None
  88. self.fd = ns.File(self.filename, library=library)
  89. # get all the metadata from file
  90. self.metadata = self.fd.metadata_raw
  91. # get sampling rate
  92. self.metadata["sampRate"] = 1. / self.metadata["TimeStampResolution"] # hz
  93. # create lists and array for electrode, spike cutouts and trigger channels
  94. self.metadata["elecChannels"] = list()
  95. self.metadata["elecChanId"] = list()
  96. self.metadata["num_analogs"] = 0
  97. self.metadata["spkChannels"] = list()
  98. self.metadata["spkChanId"] = list()
  99. self.metadata["num_spkChans"] = 0
  100. self.metadata["triggers"] = list()
  101. self.metadata["triggersId"] = list()
  102. self.metadata["num_trigs"] = 0
  103. self.metadata["digital epochs"] = list()
  104. self.metadata["digiEpochId"] = list()
  105. self.metadata["num_digiEpochs"] = 0
  106. # loop through all entities in file to get the indexes for each entity
  107. # type, so that one can run through the indexes later, upon reading the
  108. # segment
  109. for entity in self.fd.entities:
  110. # if entity is analog and not the digital line recording
  111. # (stored as analog in neuroshare files)
  112. if entity.entity_type == analogID and entity.label[0:4] != "digi":
  113. # get the electrode number
  114. self.metadata["elecChannels"].append(entity.label[-4:])
  115. # get the electrode index
  116. self.metadata["elecChanId"].append(entity.id)
  117. # increase the number of electrodes found
  118. self.metadata["num_analogs"] += 1
  119. # if the entity is a event entitiy and a trigger
  120. if entity.entity_type == eventID and entity.label[0:4] == "trig":
  121. # get the digital bit/trigger number
  122. self.metadata["triggers"].append(entity.label[0:4] + entity.label[-4:])
  123. # get the digital bit index
  124. self.metadata["triggersId"].append(entity.id)
  125. # increase the number of triggers found
  126. self.metadata["num_trigs"] += 1
  127. # if the entity is non triggered digital values with duration
  128. if entity.entity_type == eventID and entity.label[0:4] == "digi":
  129. # get the digital bit number
  130. self.metadata["digital epochs"].append(entity.label[-5:])
  131. # get the digital bit index
  132. self.metadata["digiEpochId"].append(entity.id)
  133. # increase the number of triggers found
  134. self.metadata["num_digiEpochs"] += 1
  135. # if the entity is spike cutouts
  136. if entity.entity_type == epochID and entity.label[0:4] == "spks":
  137. self.metadata["spkChannels"].append(entity.label[-4:])
  138. self.metadata["spkChanId"].append(entity.id)
  139. self.metadata["num_spkChans"] += 1
  140. # function to create a block and read in a segment
  141. # def create_block(self,
  142. #
  143. # ):
  144. #
  145. # blk=Block(name = self.fileName+"_segment:",
  146. # file_datetime = str(self.metadata_raw["Time_Day"])+"/"+
  147. # str(self.metadata_raw["Time_Month"])+"/"+
  148. # str(self.metadata_raw["Time_Year"])+"_"+
  149. # str(self.metadata_raw["Time_Hour"])+":"+
  150. # str(self.metadata_raw["Time_Min"]))
  151. #
  152. # blk.rec_datetime = blk.file_datetime
  153. # return blk
  154. # create function to read segment
  155. def read_segment(self,
  156. lazy=False,
  157. # all following arguments are decided by this IO and are free
  158. t_start=0.,
  159. segment_duration=0.,
  160. ):
  161. """
  162. Return a Segment containing all analog and spike channels, as well as
  163. all trigger events.
  164. Parameters:
  165. segment_duration :is the size in secend of the segment.
  166. num_analogsignal : number of AnalogSignal in this segment
  167. num_spiketrain : number of SpikeTrain in this segment
  168. """
  169. assert not lazy, 'Do not support lazy'
  170. # if no segment duration is given, use the complete file
  171. if segment_duration == 0.:
  172. segment_duration = float(self.metadata["TimeSpan"])
  173. # if the segment duration is bigger than file, use the complete file
  174. if segment_duration >= float(self.metadata["TimeSpan"]):
  175. segment_duration = float(self.metadata["TimeSpan"])
  176. # if the time sum of start point and segment duration is bigger than
  177. # the file time span, cap it at the end
  178. if segment_duration + t_start > float(self.metadata["TimeSpan"]):
  179. segment_duration = float(self.metadata["TimeSpan"]) - t_start
  180. # create an empty segment
  181. seg = Segment(name="segment from the NeuroshareapiIO")
  182. # read nested analosignal
  183. if self.metadata["num_analogs"] == 0:
  184. print("no analog signals in this file!")
  185. else:
  186. # run through the number of analog channels found at the __init__ function
  187. for i in range(self.metadata["num_analogs"]):
  188. # create an analog signal object for each channel found
  189. ana = self.read_analogsignal(channel_index=self.metadata["elecChanId"][i],
  190. segment_duration=segment_duration, t_start=t_start)
  191. # add analog signal read to segment object
  192. seg.analogsignals += [ana]
  193. # read triggers (in this case without any duration)
  194. for i in range(self.metadata["num_trigs"]):
  195. # create event object for each trigger/bit found
  196. eva = self.read_event(channel_index=self.metadata["triggersId"][i],
  197. segment_duration=segment_duration,
  198. t_start=t_start, )
  199. # add event object to segment
  200. seg.events += [eva]
  201. # read epochs (digital events with duration)
  202. for i in range(self.metadata["num_digiEpochs"]):
  203. # create event object for each trigger/bit found
  204. epa = self.read_epoch(channel_index=self.metadata["digiEpochId"][i],
  205. segment_duration=segment_duration,
  206. t_start=t_start, )
  207. # add event object to segment
  208. seg.epochs += [epa]
  209. # read nested spiketrain
  210. # run through all spike channels found
  211. for i in range(self.metadata["num_spkChans"]):
  212. # create spike object
  213. sptr = self.read_spiketrain(channel_index=self.metadata["spkChanId"][i],
  214. segment_duration=segment_duration,
  215. t_start=t_start)
  216. # add the spike object to segment
  217. seg.spiketrains += [sptr]
  218. seg.create_many_to_one_relationship()
  219. return seg
  220. """
  221. With this IO AnalogSignal can be accessed directly with its channel number
  222. """
  223. def read_analogsignal(self,
  224. lazy=False,
  225. # channel index as given by the neuroshare API
  226. channel_index=0,
  227. # time in seconds to be read
  228. segment_duration=0.,
  229. # time in seconds to start reading from
  230. t_start=0.,
  231. ):
  232. assert not lazy, 'Do not support lazy'
  233. # some controls:
  234. # if no segment duration is given, use the complete file
  235. if segment_duration == 0.:
  236. segment_duration = float(self.metadata["TimeSpan"])
  237. # if the segment duration is bigger than file, use the complete file
  238. if segment_duration >= float(self.metadata["TimeSpan"]):
  239. segment_duration = float(self.metadata["TimeSpan"])
  240. # get the analog object
  241. sig = self.fd.get_entity(channel_index)
  242. # get the units (V, mV etc)
  243. sigUnits = sig.units
  244. # get the electrode number
  245. chanName = sig.label[-4:]
  246. # transform t_start into index (reading will start from this index)
  247. startat = int(t_start * self.metadata["sampRate"])
  248. # get the number of bins to read in
  249. bins = int(segment_duration * self.metadata["sampRate"])
  250. # if the number of bins to read is bigger than
  251. # the total number of bins, read only till the end of analog object
  252. if startat + bins > sig.item_count:
  253. bins = sig.item_count - startat
  254. # read the data from the sig object
  255. sig, _, _ = sig.get_data(index=startat, count=bins)
  256. # store it to the 'AnalogSignal' object
  257. anasig = AnalogSignal(sig, units=sigUnits, sampling_rate=self.metadata["sampRate"] * pq.Hz,
  258. t_start=t_start * pq.s,
  259. t_stop=(t_start + segment_duration) * pq.s,
  260. channel_index=channel_index)
  261. # annotate from which electrode the signal comes from
  262. anasig.annotate(info="signal from channel %s" % chanName)
  263. return anasig
  264. # function to read spike trains
  265. def read_spiketrain(self,
  266. lazy=False,
  267. channel_index=0,
  268. segment_duration=0.,
  269. t_start=0.):
  270. """
  271. Function to read in spike trains. This API still does not support read in of
  272. specific channels as they are recorded. rather the fuunction gets the entity set
  273. by 'channel_index' which is set in the __init__ function (all spike channels)
  274. """
  275. assert not lazy, 'Do not support lazy'
  276. # sampling rate
  277. sr = self.metadata["sampRate"]
  278. # create a list to store spiketrain times
  279. times = list()
  280. # get the spike data from a specific channel index
  281. tempSpks = self.fd.get_entity(channel_index)
  282. # transform t_start into index (reading will start from this index)
  283. startat = tempSpks.get_index_by_time(t_start, 0) # zero means closest index to value
  284. # get the last index to read, using segment duration and t_start
  285. # -1 means last index before time
  286. endat = tempSpks.get_index_by_time(float(segment_duration + t_start), -1)
  287. numIndx = endat - startat
  288. # get the end point using segment duration
  289. # create a numpy empty array to store the waveforms
  290. waveforms = np.array(np.zeros([numIndx, tempSpks.max_sample_count]))
  291. # loop through the data from the specific channel index
  292. for i in range(startat, endat, 1):
  293. # get cutout, timestamp, cutout duration, and spike unit
  294. tempCuts, timeStamp, duration, unit = tempSpks.get_data(i)
  295. # save the cutout in the waveform matrix
  296. waveforms[i] = tempCuts[0]
  297. # append time stamp to list
  298. times.append(timeStamp)
  299. # create a spike train object
  300. spiketr = SpikeTrain(times, units=pq.s,
  301. t_stop=t_start + segment_duration,
  302. t_start=t_start * pq.s,
  303. name="spikes from electrode" + tempSpks.label[-3:],
  304. waveforms=waveforms * pq.volt,
  305. sampling_rate=sr * pq.Hz,
  306. file_origin=self.filename,
  307. annotate=("channel_index:" + str(channel_index)))
  308. return spiketr
  309. def read_event(self, lazy=False, channel_index=0,
  310. t_start=0.,
  311. segment_duration=0.):
  312. """function to read digital timestamps. this function only reads the event
  313. onset. to get digital event durations, use the epoch function (to be implemented)."""
  314. assert not lazy, 'Do not support lazy'
  315. # create temporary empty lists to store data
  316. tempNames = list()
  317. tempTimeStamp = list()
  318. # get entity from file
  319. trigEntity = self.fd.get_entity(channel_index)
  320. # transform t_start into index (reading will start from this index)
  321. startat = trigEntity.get_index_by_time(t_start, 0) # zero means closest index to value
  322. # get the last index to read, using segment duration and t_start
  323. endat = trigEntity.get_index_by_time(
  324. float(segment_duration + t_start), -1) # -1 means last index before time
  325. # numIndx = endat-startat
  326. # run through specified intervals in entity
  327. for i in range(startat, endat + 1, 1): # trigEntity.item_count):
  328. # get in which digital bit was the trigger detected
  329. tempNames.append(trigEntity.label[-8:])
  330. # get the time stamps of onset events
  331. tempData, onOrOff = trigEntity.get_data(i)
  332. # if this was an onset event, save it to the list
  333. # on triggered recordings it seems that only onset events are
  334. # recorded. On continuous recordings both onset(==1)
  335. # and offset(==255) seem to be recorded
  336. if onOrOff == 1:
  337. # append the time stamp to them empty list
  338. tempTimeStamp.append(tempData)
  339. # create an event array
  340. eva = Event(labels=np.array(tempNames, dtype="S"),
  341. times=np.array(tempTimeStamp) * pq.s,
  342. file_origin=self.filename,
  343. description="the trigger events (without durations)")
  344. return eva
  345. def read_epoch(self, lazy=False,
  346. channel_index=0,
  347. t_start=0.,
  348. segment_duration=0.):
  349. """function to read digital timestamps. this function reads the event
  350. onset and offset and outputs onset and duration. to get only onsets use
  351. the event array function"""
  352. assert not lazy, 'Do not support lazy'
  353. # create temporary empty lists to store data
  354. tempNames = list()
  355. tempTimeStamp = list()
  356. durations = list()
  357. # get entity from file
  358. digEntity = self.fd.get_entity(channel_index)
  359. # transform t_start into index (reading will start from this index)
  360. startat = digEntity.get_index_by_time(t_start, 0) # zero means closest index to value
  361. # get the last index to read, using segment duration and t_start
  362. # -1 means last index before time
  363. endat = digEntity.get_index_by_time(float(segment_duration + t_start), -1)
  364. # run through entity using only odd "i"s
  365. for i in range(startat, endat + 1, 1):
  366. if i % 2 == 1:
  367. # get in which digital bit was the trigger detected
  368. tempNames.append(digEntity.label[-8:])
  369. # get the time stamps of even events
  370. tempData, onOrOff = digEntity.get_data(i - 1)
  371. # if this was an onset event, save it to the list
  372. # on triggered recordings it seems that only onset events are
  373. # recorded. On continuous recordings both onset(==1)
  374. # and offset(==255) seem to be recorded
  375. # if onOrOff == 1:
  376. # append the time stamp to them empty list
  377. tempTimeStamp.append(tempData)
  378. # get time stamps of odd events
  379. tempData1, onOrOff = digEntity.get_data(i)
  380. # if onOrOff == 255:
  381. # pass
  382. durations.append(tempData1 - tempData)
  383. epa = Epoch(file_origin=self.filename,
  384. times=np.array(tempTimeStamp) * pq.s,
  385. durations=np.array(durations) * pq.s,
  386. labels=np.array(tempNames, dtype="S"),
  387. description="digital events with duration")
  388. return epa