Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

1
0

neuroshareapiio.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. """
  2. Class for "reading" data from Neuroshare compatible files (check neuroshare.org)
  3. It runs through the whole file and searches for: analog signals, spike cutouts,
  4. and trigger events (without duration)
  5. Depends on: Neuroshare API 0.9.1, numpy 1.6.1, quantities 0.10.1
  6. Supported: Read
  7. Author: Andre Maia Chagas
  8. """
  9. # needed for python 3 compatibility
  10. from __future__ import absolute_import
  11. # note neo.core needs only numpy and quantities
  12. import numpy as np
  13. import quantities as pq
  14. import os
  15. #check to see if the neuroshare bindings are properly imported
  16. try:
  17. import neuroshare as ns
  18. except ImportError as err:
  19. print (err)
  20. #print('\n neuroshare library not found, loading data will not work!' )
  21. #print('\n be sure to install the library found at:')
  22. #print('\n www.http://pythonhosted.org/neuroshare/')
  23. else:
  24. pass
  25. #print('neuroshare library successfully imported')
  26. #import BaseIO
  27. from neo.io.baseio import BaseIO
  28. #import objects from neo.core
  29. from neo.core import Segment, AnalogSignal, SpikeTrain, Event, Epoch
  30. # create an object based on BaseIO
  31. class NeuroshareapiIO(BaseIO):
  32. #setting some class parameters
  33. is_readable = True # This class can only read data
  34. is_writable = False # write is not supported
  35. supported_objects = [ Segment , AnalogSignal, SpikeTrain, Event, Epoch ]
  36. has_header = False
  37. is_streameable = False
  38. readable_objects = [ Segment , AnalogSignal, SpikeTrain, Event, Epoch]
  39. # This class is not able to write objects
  40. writeable_objects = [ ]
  41. # # This is for GUI stuff : a definition for parameters when reading.
  42. # # This dict should be keyed by object (`Block`). Each entry is a list
  43. # # of tuple. The first entry in each tuple is the parameter name. The
  44. # # second entry is a dict with keys 'value' (for default value),
  45. # # and 'label' (for a descriptive name).
  46. # # Note that if the highest-level object requires parameters,
  47. # # common_io_test will be skipped.
  48. read_params = {
  49. Segment : [
  50. ("segment_duration",{"value" : 0., "label" : "Segment size (s.)"}),
  51. ("t_start",{"value" : 0.,"label" : "start reading (s.)"}),
  52. #("lazy",{"value" : False,"label" : "load in lazy mode?"}),
  53. #("cascade",{"value" : True,"label" : "Cascade?"})
  54. # ("num_analogsignal",
  55. # {'value" : 8, "label" : "Number of recording points"}),
  56. # ("num_spiketrain_by_channel',
  57. # {"value" : 3, "label" : "Num of spiketrains"}),
  58. ],
  59. }
  60. #
  61. # do not supported write so no GUI stuff
  62. write_params = None
  63. name = "Neuroshare"
  64. extensions = []
  65. # This object operates on neuroshare files
  66. mode = "file"
  67. def __init__(self , filename = None, dllpath = None) :
  68. """
  69. Arguments:
  70. filename : the filename
  71. The init function will run automatically upon calling of the class, as
  72. in: test = MultichannelIO(filename = filetoberead.mcd), therefore the first
  73. operations with the file are set here, so that the user doesn't have to
  74. remember to use another method, than the ones defined in the NEO library
  75. """
  76. BaseIO.__init__(self)
  77. self.filename = filename
  78. #set the flags for each event type
  79. eventID = 1
  80. analogID = 2
  81. epochID = 3
  82. #if a filename was given, create a dictionary with information that will
  83. #be needed later on.
  84. if self.filename != None:
  85. if dllpath is not None:
  86. name = os.path.splitext(os.path.basename(dllpath))[0]
  87. library = ns.Library(name, dllpath)
  88. else:
  89. library = None
  90. self.fd = ns.File(self.filename, library = library)
  91. #get all the metadata from file
  92. self.metadata = self.fd.metadata_raw
  93. #get sampling rate
  94. self.metadata["sampRate"] = 1./self.metadata["TimeStampResolution"]#hz
  95. #create lists and array for electrode, spike cutouts and trigger channels
  96. self.metadata["elecChannels"] = list()
  97. self.metadata["elecChanId"] = list()
  98. self.metadata["num_analogs"] = 0
  99. self.metadata["spkChannels"] = list()
  100. self.metadata["spkChanId"] = list()
  101. self.metadata["num_spkChans"] = 0
  102. self.metadata["triggers"] = list()
  103. self.metadata["triggersId"] = list()
  104. self.metadata["num_trigs"] = 0
  105. self.metadata["digital epochs"] = list()
  106. self.metadata["digiEpochId"] = list()
  107. self.metadata["num_digiEpochs"] = 0
  108. #loop through all entities in file to get the indexes for each entity
  109. #type, so that one can run through the indexes later, upon reading the
  110. #segment
  111. for entity in self.fd.entities:
  112. #if entity is analog and not the digital line recording
  113. #(stored as analog in neuroshare files)
  114. if entity.entity_type == analogID and entity.label[0:4]!= "digi":
  115. #get the electrode number
  116. self.metadata["elecChannels"].append(entity.label[-4:])
  117. #get the electrode index
  118. self.metadata["elecChanId"].append(entity.id)
  119. #increase the number of electrodes found
  120. self.metadata["num_analogs"] += 1
  121. # if the entity is a event entitiy and a trigger
  122. if entity.entity_type == eventID and entity.label[0:4] == "trig":
  123. #get the digital bit/trigger number
  124. self.metadata["triggers"].append(entity.label[0:4]+entity.label[-4:])
  125. #get the digital bit index
  126. self.metadata["triggersId"].append(entity.id)
  127. #increase the number of triggers found
  128. self.metadata["num_trigs"] += 1
  129. #if the entity is non triggered digital values with duration
  130. if entity.entity_type == eventID and entity.label[0:4] == "digi":
  131. #get the digital bit number
  132. self.metadata["digital epochs"].append(entity.label[-5:])
  133. #get the digital bit index
  134. self.metadata["digiEpochId"].append(entity.id)
  135. #increase the number of triggers found
  136. self.metadata["num_digiEpochs"] += 1
  137. #if the entity is spike cutouts
  138. if entity.entity_type == epochID and entity.label[0:4] == "spks":
  139. self.metadata["spkChannels"].append(entity.label[-4:])
  140. self.metadata["spkChanId"].append(entity.id)
  141. self.metadata["num_spkChans"] += 1
  142. #function to create a block and read in a segment
  143. # def create_block(self,
  144. # lazy = False,
  145. # cascade = True,
  146. #
  147. # ):
  148. #
  149. # blk=Block(name = self.fileName+"_segment:",
  150. # file_datetime = str(self.metadata_raw["Time_Day"])+"/"+
  151. # str(self.metadata_raw["Time_Month"])+"/"+
  152. # str(self.metadata_raw["Time_Year"])+"_"+
  153. # str(self.metadata_raw["Time_Hour"])+":"+
  154. # str(self.metadata_raw["Time_Min"]))
  155. #
  156. # blk.rec_datetime = blk.file_datetime
  157. # return blk
  158. #create function to read segment
  159. def read_segment(self,
  160. # the 2 first keyword arguments are imposed by neo.io API
  161. lazy = False,
  162. cascade = True,
  163. # all following arguments are decided by this IO and are free
  164. t_start = 0.,
  165. segment_duration = 0.,
  166. ):
  167. """
  168. Return a Segment containing all analog and spike channels, as well as
  169. all trigger events.
  170. Parameters:
  171. segment_duration :is the size in secend of the segment.
  172. num_analogsignal : number of AnalogSignal in this segment
  173. num_spiketrain : number of SpikeTrain in this segment
  174. """
  175. #if no segment duration is given, use the complete file
  176. if segment_duration == 0. :
  177. segment_duration=float(self.metadata["TimeSpan"])
  178. #if the segment duration is bigger than file, use the complete file
  179. if segment_duration >=float(self.metadata["TimeSpan"]):
  180. segment_duration=float(self.metadata["TimeSpan"])
  181. #if the time sum of start point and segment duration is bigger than
  182. #the file time span, cap it at the end
  183. if segment_duration+t_start>float(self.metadata["TimeSpan"]):
  184. segment_duration = float(self.metadata["TimeSpan"])-t_start
  185. # create an empty segment
  186. seg = Segment( name = "segment from the NeuroshareapiIO")
  187. if cascade:
  188. # read nested analosignal
  189. if self.metadata["num_analogs"] == 0:
  190. print ("no analog signals in this file!")
  191. else:
  192. #run through the number of analog channels found at the __init__ function
  193. for i in range(self.metadata["num_analogs"]):
  194. #create an analog signal object for each channel found
  195. ana = self.read_analogsignal( lazy = lazy , cascade = cascade ,
  196. channel_index = self.metadata["elecChanId"][i],
  197. segment_duration = segment_duration, t_start=t_start)
  198. #add analog signal read to segment object
  199. seg.analogsignals += [ ana ]
  200. # read triggers (in this case without any duration)
  201. for i in range(self.metadata["num_trigs"]):
  202. #create event object for each trigger/bit found
  203. eva = self.read_eventarray(lazy = lazy ,
  204. cascade = cascade,
  205. channel_index = self.metadata["triggersId"][i],
  206. segment_duration = segment_duration,
  207. t_start = t_start,)
  208. #add event object to segment
  209. seg.eventarrays += [eva]
  210. #read epochs (digital events with duration)
  211. for i in range(self.metadata["num_digiEpochs"]):
  212. #create event object for each trigger/bit found
  213. epa = self.read_epocharray(lazy = lazy,
  214. cascade = cascade,
  215. channel_index = self.metadata["digiEpochId"][i],
  216. segment_duration = segment_duration,
  217. t_start = t_start,)
  218. #add event object to segment
  219. seg.epocharrays += [epa]
  220. # read nested spiketrain
  221. #run through all spike channels found
  222. for i in range(self.metadata["num_spkChans"]):
  223. #create spike object
  224. sptr = self.read_spiketrain(lazy = lazy, cascade = cascade,
  225. channel_index = self.metadata["spkChanId"][i],
  226. segment_duration = segment_duration,
  227. t_start = t_start)
  228. #add the spike object to segment
  229. seg.spiketrains += [sptr]
  230. seg.create_many_to_one_relationship()
  231. return seg
  232. """
  233. With this IO AnalogSignal can be accessed directly with its channel number
  234. """
  235. def read_analogsignal(self,
  236. # the 2 first key arguments are imposed by neo.io
  237. lazy = False,
  238. cascade = True,
  239. #channel index as given by the neuroshare API
  240. channel_index = 0,
  241. #time in seconds to be read
  242. segment_duration = 0.,
  243. #time in seconds to start reading from
  244. t_start = 0.,
  245. ):
  246. #some controls:
  247. #if no segment duration is given, use the complete file
  248. if segment_duration ==0.:
  249. segment_duration=float(self.metadata["TimeSpan"])
  250. #if the segment duration is bigger than file, use the complete file
  251. if segment_duration >=float(self.metadata["TimeSpan"]):
  252. segment_duration=float(self.metadata["TimeSpan"])
  253. if lazy:
  254. anasig = AnalogSignal([], units="V", sampling_rate = self.metadata["sampRate"] * pq.Hz,
  255. t_start=t_start * pq.s,
  256. )
  257. #create a dummie time vector
  258. tvect = np.arange(t_start, t_start+ segment_duration , 1./self.metadata["sampRate"])
  259. # we add the attribute lazy_shape with the size if loaded
  260. anasig.lazy_shape = tvect.shape
  261. else:
  262. #get the analog object
  263. sig = self.fd.get_entity(channel_index)
  264. #get the units (V, mV etc)
  265. sigUnits = sig.units
  266. #get the electrode number
  267. chanName = sig.label[-4:]
  268. #transform t_start into index (reading will start from this index)
  269. startat = int(t_start*self.metadata["sampRate"])
  270. #get the number of bins to read in
  271. bins = int(segment_duration * self.metadata["sampRate"])
  272. #if the number of bins to read is bigger than
  273. #the total number of bins, read only till the end of analog object
  274. if startat+bins > sig.item_count:
  275. bins = sig.item_count-startat
  276. #read the data from the sig object
  277. sig,_,_ = sig.get_data(index = startat, count = bins)
  278. #store it to the 'AnalogSignal' object
  279. anasig = AnalogSignal(sig, units = sigUnits, sampling_rate=self.metadata["sampRate"] * pq.Hz,
  280. t_start=t_start * pq.s,
  281. t_stop = (t_start+segment_duration)*pq.s,
  282. channel_index=channel_index)
  283. # annotate from which electrode the signal comes from
  284. anasig.annotate(info = "signal from channel %s" %chanName )
  285. return anasig
  286. #function to read spike trains
  287. def read_spiketrain(self ,
  288. # the 2 first key arguments are imposed by neo.io API
  289. lazy = False,
  290. cascade = True,
  291. channel_index = 0,
  292. segment_duration = 0.,
  293. t_start = 0.):
  294. """
  295. Function to read in spike trains. This API still does not support read in of
  296. specific channels as they are recorded. rather the fuunction gets the entity set
  297. by 'channel_index' which is set in the __init__ function (all spike channels)
  298. """
  299. #sampling rate
  300. sr = self.metadata["sampRate"]
  301. # create a list to store spiketrain times
  302. times = list()
  303. if lazy:
  304. # we add the attribute lazy_shape with the size if lazy
  305. spiketr = SpikeTrain(times,units = pq.s,
  306. t_stop = t_start+segment_duration,
  307. t_start = t_start*pq.s,lazy_shape = 40)
  308. else:
  309. #get the spike data from a specific channel index
  310. tempSpks = self.fd.get_entity(channel_index)
  311. #transform t_start into index (reading will start from this index)
  312. startat = tempSpks.get_index_by_time(t_start,0)#zero means closest index to value
  313. #get the last index to read, using segment duration and t_start
  314. endat = tempSpks.get_index_by_time(float(segment_duration+t_start),-1)#-1 means last index before time
  315. numIndx = endat-startat
  316. #get the end point using segment duration
  317. #create a numpy empty array to store the waveforms
  318. waveforms=np.array(np.zeros([numIndx,tempSpks.max_sample_count]))
  319. #loop through the data from the specific channel index
  320. for i in range(startat,endat,1):
  321. #get cutout, timestamp, cutout duration, and spike unit
  322. tempCuts,timeStamp,duration,unit = tempSpks.get_data(i)
  323. #save the cutout in the waveform matrix
  324. waveforms[i]=tempCuts[0]
  325. #append time stamp to list
  326. times.append(timeStamp)
  327. #create a spike train object
  328. spiketr = SpikeTrain(times,units = pq.s,
  329. t_stop = t_start+segment_duration,
  330. t_start = t_start*pq.s,
  331. name ="spikes from electrode"+tempSpks.label[-3:],
  332. waveforms = waveforms*pq.volt,
  333. sampling_rate = sr * pq.Hz,
  334. file_origin = self.filename,
  335. annotate = ("channel_index:"+ str(channel_index)))
  336. return spiketr
  337. def read_eventarray(self,lazy = False, cascade = True,
  338. channel_index = 0,
  339. t_start = 0.,
  340. segment_duration = 0.):
  341. """function to read digital timestamps. this function only reads the event
  342. onset. to get digital event durations, use the epoch function (to be implemented)."""
  343. if lazy:
  344. eva = Event(file_origin = self.filename)
  345. else:
  346. #create temporary empty lists to store data
  347. tempNames = list()
  348. tempTimeStamp = list()
  349. #get entity from file
  350. trigEntity = self.fd.get_entity(channel_index)
  351. #transform t_start into index (reading will start from this index)
  352. startat = trigEntity.get_index_by_time(t_start,0)#zero means closest index to value
  353. #get the last index to read, using segment duration and t_start
  354. endat = trigEntity.get_index_by_time(float(segment_duration+t_start),-1)#-1 means last index before time
  355. #numIndx = endat-startat
  356. #run through specified intervals in entity
  357. for i in range(startat,endat+1,1):#trigEntity.item_count):
  358. #get in which digital bit was the trigger detected
  359. tempNames.append(trigEntity.label[-8:])
  360. #get the time stamps of onset events
  361. tempData, onOrOff = trigEntity.get_data(i)
  362. #if this was an onset event, save it to the list
  363. #on triggered recordings it seems that only onset events are
  364. #recorded. On continuous recordings both onset(==1)
  365. #and offset(==255) seem to be recorded
  366. if onOrOff == 1:
  367. #append the time stamp to them empty list
  368. tempTimeStamp.append(tempData)
  369. #create an event array
  370. eva = Event(labels = np.array(tempNames,dtype = "S"),
  371. times = np.array(tempTimeStamp)*pq.s,
  372. file_origin = self.filename,
  373. description = "the trigger events (without durations)")
  374. return eva
  375. def read_epocharray(self,lazy = False, cascade = True,
  376. channel_index = 0,
  377. t_start = 0.,
  378. segment_duration = 0.):
  379. """function to read digital timestamps. this function reads the event
  380. onset and offset and outputs onset and duration. to get only onsets use
  381. the event array function"""
  382. if lazy:
  383. epa = Epoch(file_origin = self.filename,
  384. times=None, durations=None, labels=None)
  385. else:
  386. #create temporary empty lists to store data
  387. tempNames = list()
  388. tempTimeStamp = list()
  389. durations = list()
  390. #get entity from file
  391. digEntity = self.fd.get_entity(channel_index)
  392. #transform t_start into index (reading will start from this index)
  393. startat = digEntity.get_index_by_time(t_start,0)#zero means closest index to value
  394. #get the last index to read, using segment duration and t_start
  395. endat = digEntity.get_index_by_time(float(segment_duration+t_start),-1)#-1 means last index before time
  396. #run through entity using only odd "i"s
  397. for i in range(startat,endat+1,1):
  398. if i % 2 == 1:
  399. #get in which digital bit was the trigger detected
  400. tempNames.append(digEntity.label[-8:])
  401. #get the time stamps of even events
  402. tempData, onOrOff = digEntity.get_data(i-1)
  403. #if this was an onset event, save it to the list
  404. #on triggered recordings it seems that only onset events are
  405. #recorded. On continuous recordings both onset(==1)
  406. #and offset(==255) seem to be recorded
  407. #if onOrOff == 1:
  408. #append the time stamp to them empty list
  409. tempTimeStamp.append(tempData)
  410. #get time stamps of odd events
  411. tempData1, onOrOff = digEntity.get_data(i)
  412. #if onOrOff == 255:
  413. #pass
  414. durations.append(tempData1-tempData)
  415. epa = Epoch(file_origin = self.filename,
  416. times = np.array(tempTimeStamp)*pq.s,
  417. durations = np.array(durations)*pq.s,
  418. labels = np.array(tempNames,dtype = "S"),
  419. description = "digital events with duration")
  420. return epa