RunSimulation_hyperparameterize.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604
  1. ############
  2. '''import packages'''
  3. from tqdm import tqdm
  4. import h5py
  5. import tables as tb
  6. import platform
  7. import sys
  8. os_name = platform.system()
  9. if os_name == 'Darwin':
  10. sys.path.append('/Users/kperks/mnt/engram_share/home/kep2142/scripts/Python/Analysis/')
  11. if os_name == 'Linux':
  12. sys.path.append('/mnt/engram/scripts/Python/Analysis/')
  13. from ClassDef_AmplitudeShift_Stable import AmpShift_Stable
  14. from Plotting import figsave, create_fig, create_fig_tuning, set_fig_style
  15. from brian2 import *
  16. import sympy
  17. import matplotlib.pyplot as plt
  18. import numpy as np
  19. import pandas as pd
  20. from pathlib import Path
  21. import seaborn as sns
  22. from scipy import signal
  23. from scipy import optimize
  24. from scipy import stats
  25. from scipy.stats import gaussian_kde
  26. import pickle
  27. import random
  28. from sklearn.linear_model import LinearRegression
  29. from sklearn import datasets,linear_model
  30. # from sklearn.cross_validation import train_test_split
  31. import matplotlib
  32. # matplotlib.rcParams['pdf.fonttype'] = 42
  33. ############
  34. '''set up parameters and filepaths'''
  35. rootpath = Path('/Users/kperks/mnt/')
  36. savepath = rootpath / 'engram_share/locker/GranularCellPaperResources/Draft_For_Submission/Code/Results'
  37. exptpath = rootpath / 'engram_share/home/kep2142/spikedata'
  38. data_folder = exptpath / 'data_raw'
  39. df_folder = exptpath / 'data_processed' / 'Figures_GRC_properties' / 'Unsubtracted_CvsU' / 'df_cmdintact'
  40. meta_data_folder = exptpath / 'data_processed' / 'GRC_properties_Meta'
  41. figure_folder = Path('/Users/kperks/mnt/engram_share') / 'locker' / 'GranularCellPaperResources' / 'Figure_RawEPScomponents'
  42. #for storing simulation states
  43. sim_filename = 'grc_model_init.pickle'
  44. sim_filepath = savepath / sim_filename
  45. n_runs = 20
  46. n_inputs_list = [4,5,6,7]
  47. x = np.asarray([-40,-30,-20,-10,-5,0,5,10,20,30,40])
  48. sim_dt = 0.1
  49. # xtime = np.linspace(0,50,int(0.05/sim_dt*1000))
  50. meta_params = {
  51. 'N_inputs' : 4*4, # 7 inputs with 4 possible spikes each
  52. 'N_runs' : n_runs,
  53. 'duration' : 0.05*second,
  54. 'onset_offset' : 0, # 5msec is for figure making because data plotted with 5msec pre-stimonset #4.5,
  55. 'tau_e1' : 4*ms,
  56. 'tau_e2' : 1*ms,#ms, time of normal stimulus onset relative to cmd
  57. 'e_lmi_delay' : 4*ms #ms
  58. }
  59. invpeak = (meta_params['tau_e2'] / meta_params['tau_e1']) ** \
  60. (meta_params['tau_e1'] / (meta_params['tau_e2'] - meta_params['tau_e1']))
  61. namespace_sim = {
  62. 'sim_dt' : 0.1*ms,
  63. 'Cm' : 12*pF,
  64. 'E_l' : -70*mV,
  65. 'g_l' : 1*nS, # a 1MOhm cell has gl = 1*nS
  66. 'E_e' : 0*mV,
  67. 'E_e_lmi' : -90*mV,
  68. 'V_th' : 0*mV,
  69. 'V_r' : -70*mV,
  70. 'w_e' : 0.1*nS,
  71. 'w_e_lmi' : 0*nS, #0*nS,##0,#either on and off... weight by logistic 0*nS,
  72. 'tau_e1' : 4*ms,
  73. 'tau_e2' : 1*ms,
  74. 'tau_e_lmi' : 5*ms,
  75. 'invpeak' : invpeak
  76. }
  77. ############
  78. '''Define Functions'''
  79. def exp_fit(x, a, k, b):
  80. return a*np.exp(x*k) + b
  81. def exclude_HighThreshAff(meta_df):
  82. x = np.unique(meta_df.ampshift) #array([-40,-30,-20,-10,-5,0,5,10,20,30,40])
  83. spk_thresh = 11
  84. n_excluded = 0
  85. n_included = 0
  86. expt_excluded = []
  87. meta_params_df = pd.DataFrame()
  88. colors = ['blue','orange','brown','green']
  89. for i,animal in enumerate(np.unique(meta_df['animalID'])):
  90. animal_df = meta_df[meta_df['animalID'] == animal]
  91. # print(str(animal) + '(' + colors[i] + ')' + ' has a total of ' + str(len(np.unique(animal_df['exptname']))) + ' afferents recorded')
  92. params_all = []
  93. sse_all = []
  94. df_all = []
  95. for name in np.unique(animal_df['exptname']):
  96. expt_df = animal_df[animal_df['exptname'] == name]
  97. try:
  98. x_data = expt_df.dropna(0).ampshift.values
  99. y_data = expt_df.dropna(0).fsl.values
  100. if len(np.unique(x_data)) > 5 :
  101. n_included += 1
  102. spk_thresh = np.max(y_data)
  103. params, params_covariance = optimize.curve_fit(exp_fit, x_data, y_data,
  104. p0=[1,-0.05, 3],
  105. bounds = ([0,-1,1],[np.inf,0,10]),
  106. maxfev=1000)
  107. params_all.append(params)
  108. c = []
  109. for x_,y_ in zip(x_data,y_data):
  110. yh = exp_fit(x_,params[0],params[1],params[2])
  111. s = np.std(y_data[x_data==x_]) #calc std of y measure at each x to adjust chi2
  112. c.append(((y_ - yh)**2))
  113. sse_all.append(np.sum(c))
  114. df_all.append(len(y_data)-3)
  115. if len(np.unique(x_data)) <= 5:
  116. n_excluded += 1
  117. expt_excluded.append(name)
  118. params = array([np.NaN,np.NaN,np.NaN])
  119. params_all.append(params)
  120. sse_all.append(np.NaN)
  121. df_all.append(len(y_data)-3)
  122. except:
  123. n_excluded += 1
  124. expt_excluded.append(name)
  125. params = array([np.NaN,np.NaN,np.NaN])
  126. params_all.append(params)
  127. sse_all.append(np.NaN)
  128. df_all.append(len(y_data)-3)
  129. pass # doing nothing on exception
  130. exptname = animal_df.groupby('exptname').exptname.describe().top.values
  131. max_fsl = animal_df.groupby('exptname').fsl.max().values
  132. a = [p[0] for p in params_all]
  133. k = [p[1] for p in params_all]
  134. b = [p[2] for p in params_all]
  135. meta_params = {
  136. 'exptname' : exptname,
  137. 'animal' : animal,
  138. 'stretch' : a,
  139. 'tau' : k,
  140. 'offset' : b,
  141. 'max_fsl' : max_fsl,
  142. 'sse' : sse_all,
  143. 'df' : df_all
  144. }
  145. animal_df = pd.DataFrame(meta_params)
  146. meta_params_df = meta_params_df.append(animal_df,sort = False,ignore_index = False)
  147. # print('number afferents excluded because first fsl threshold too high or could not be fit: ' + str(n_excluded))
  148. # print('number afferents included (spike thresh at least 0%): ' + str(n_included))
  149. # plt.ylim(1.8,15)
  150. # plt.legend(bbox_to_anchor=(1.1, 1.05))
  151. return meta_params_df,expt_excluded
  152. def assess_fits(meta_params_df):
  153. un_fit = meta_params_df[meta_params_df['sse'].gt(meta_params_df['sse'].quantile(.9))]
  154. well_fit = meta_params_df[meta_params_df['sse'].lt(meta_params_df['sse'].quantile(.9))]
  155. # plt.figure(figsize = (2,4))
  156. # well_fit.boxplot('sse')
  157. print('unfit afferents')
  158. print(un_fit)
  159. return well_fit,un_fit
  160. def get_afferents_subsampled(n_inputs):
  161. # get spike times for afferent population
  162. subset = random.choices(np.unique(meta_df['exptname']),k=n_inputs) #divide by 4 if multispike
  163. multispike = []
  164. fsl = []
  165. # get spike times across stimamps
  166. for exptname in subset:
  167. expt_df = meta_df[meta_df['exptname']==exptname]
  168. multispike.append(expt_df.groupby('ampshift').mean()['s0t'].values)
  169. fsl.append(expt_df.groupby('ampshift').mean()['s0t'].values)
  170. multispike.append(expt_df.groupby('ampshift').mean()['s1t'].values)
  171. multispike.append(expt_df.groupby('ampshift').mean()['s2t'].values)
  172. multispike.append(expt_df.groupby('ampshift').mean()['s3t'].values)
  173. multispike = np.asarray(multispike).T
  174. fsl = np.asarray(fsl).T
  175. return multispike, fsl
  176. def generate_y_fit_multispike_gauss(x,n_inputs,dataset,rv):
  177. #generate y_fit array on each iteration
  178. stretch_min = np.min(dataset[:,0])
  179. stretch_max = np.max(dataset[:,0])
  180. tau_min = np.min(dataset[:,1])
  181. tau_max = np.max(dataset[:,1])
  182. offset_min = np.min(dataset[:,2])
  183. offset_max = np.max(dataset[:,2])
  184. # max_fsl = np.max(dataset[:,3])
  185. max_fsl = 11
  186. offset_2 = 1.74 #(these are averages from empirical data in ms from first spikes)
  187. offset_3 = 3.66
  188. offset_4 = 6.37
  189. i = 0
  190. y_fit = []
  191. fsl = []
  192. while i <n_inputs:
  193. [s,t,o] = rv.resample(1)
  194. while (s<stretch_min)\
  195. | (s>stretch_max)\
  196. | (t<tau_min)\
  197. | (t>tau_max)\
  198. | (o<offset_min)\
  199. | (o>offset_max):
  200. [s,t,o] = rv.resample(1)
  201. i+=1
  202. y = exp_fit(x,s,t,o)
  203. spk_thresh = max_fsl
  204. y[y>spk_thresh] = np.NaN
  205. y_fit.append(y)
  206. fsl.append(y)
  207. y2 = y + offset_2
  208. y2[y2>spk_thresh] = np.NaN
  209. y_fit.append(y2)
  210. y3 = y + offset_3
  211. y3[y3>spk_thresh] = np.NaN
  212. y_fit.append(y3)
  213. y4 = y + offset_4
  214. y4[y4>spk_thresh] = np.NaN
  215. y_fit.append(y4)
  216. y_fit = np.asarray(y_fit).T
  217. fsl = np.asarray(fsl).T
  218. return y_fit, fsl
  219. def generate_y_fit_multispike_homogisi(x,n_inputs,dataset,rv):
  220. basis, _ = generate_y_fit_multispike_gauss(x,1,dataset,rv)
  221. # select a subset of real afferent p(spike) to mimic
  222. subset = random.choices(np.unique(meta_df['exptname']),k=n_inputs) #divide by 4 if multispike
  223. multispike = []
  224. # get spike times across stimamps
  225. for expt_ in subset:
  226. spkt = meta_df[meta_df['exptname'].isin([expt_])].groupby(['ampshift']).mean()[['s0t','s1t','s2t','s3t']].values
  227. spk_mask = np.isnan(spkt)
  228. y = basis.copy()
  229. y[spk_mask] = np.nan
  230. multispike.extend(y.T)
  231. multispike = np.asarray(multispike).T
  232. return multispike
  233. def generate_y_fit_multispike_homog(x,n_inputs,dataset,rv):
  234. #generate y_fit array on each iteration
  235. stretch_static = np.median(dataset[:,0])
  236. tau_static = np.median(dataset[:,1])
  237. offset_static = np.median(dataset[:,2])
  238. max_fsl = 11
  239. # offset_min = np.min(dataset[:,2])
  240. # offset_max = np.max(dataset[:,2])
  241. offset_2 = 1.74 #(these are averages from empirical data in ms from first spikes)
  242. offset_3 = 3.66
  243. offset_4 = 6.37
  244. i = 0
  245. y_fit = []
  246. fsl = []
  247. for i in range(n_inputs):
  248. y = exp_fit(x,stretch_static,tau_static,offset_static)
  249. spk_thresh = max_fsl
  250. y[y>spk_thresh] = np.NaN
  251. y_fit.append(y)
  252. fsl.append(y)
  253. y2 = y + offset_2
  254. y2[y2>spk_thresh] = np.NaN
  255. y_fit.append(y2)
  256. y3 = y + offset_3
  257. y3[y3>spk_thresh] = np.NaN
  258. y_fit.append(y3)
  259. y4 = y + offset_4
  260. y4[y4>spk_thresh] = np.NaN
  261. y_fit.append(y4)
  262. y_fit = np.asarray(y_fit).T
  263. fsl = np.asarray(fsl).T
  264. return y_fit, fsl
  265. def initialize_model(namespace_sim,meta_params):
  266. start_scope()
  267. ################################################################################
  268. # Model definition
  269. ################################################################################
  270. ### Neurons
  271. neuron_eq = '''
  272. dv/dt = (g_l*(E_l-v) + g_e*(E_e-v) + g_e_lmi*(E_e_lmi-v))/Cm : volt
  273. dg_e/dt = (invpeak * s - g_e)/tau_e1 : siemens
  274. ds/dt = -s/tau_e2 : siemens
  275. dg_e_lmi/dt = -g_e_lmi/tau_e_lmi : siemens # post-synaptic electrosensory inhibitory conductance
  276. '''
  277. neurons = NeuronGroup(1, model=neuron_eq, method='euler', name = 'grc', namespace=namespace_sim)
  278. neurons.v = -70*mV
  279. # set up monitor of post-synaptic neuron parameters
  280. state_mon = StateMonitor(neurons, ['v'],record=0, name = 'state_mon')
  281. n_aff = meta_params['N_inputs']
  282. aff_ind = arange(n_aff)
  283. aff_t = sorted(np.zeros(n_aff))*ms
  284. G_e = SpikeGeneratorGroup(n_aff, aff_ind, aff_t, name='aff_input')
  285. lmi_t = np.zeros(1)*ms
  286. G_e_lmi = SpikeGeneratorGroup(1, array([0]),lmi_t,name = 'lmi_input')
  287. syn_e = Synapses(G_e, neurons,on_pre='s += w_e',namespace=namespace_sim,name='aff_syn')
  288. syn_e_lmi = Synapses(G_e_lmi, neurons, on_pre='g_e_lmi_post += w_e_lmi',delay=meta_params['e_lmi_delay'],namespace=namespace_sim,name='aff_lmi_syn')
  289. #Now connect the synapses...
  290. syn_e.connect(i=aff_ind, j = 0)
  291. syn_e_lmi.connect(i=0, j = 0)
  292. net = Network(collect())
  293. return net
  294. ############
  295. '''MAIN'''
  296. exptdate = 'all'
  297. # meta_df.to_csv('DF_AfferentPopulation_' + exptdate + '.csv')
  298. meta_df = pd.read_csv('/Users/kperks/mnt/engram_share/home/kep2142/spikedata/data_processed/DF_AfferentPopulation_all.csv')
  299. # meta_df = pd.read_csv('DF_AfferentPopulation_' + exptdate + '.csv')
  300. meta_params_df,expt_excluded = exclude_HighThreshAff(meta_df)
  301. well_fit,un_fit = assess_fits(meta_params_df)
  302. data_df = well_fit
  303. dataset = list(zip(
  304. data_df['stretch'],
  305. data_df['tau'],
  306. data_df['offset'],
  307. data_df['max_fsl']
  308. ))
  309. dataset = np.asarray(dataset)
  310. rv = gaussian_kde(dataset[:,0:3].T)
  311. max_fsl_global = np.max(meta_params_df['max_fsl'])
  312. filtered_df = meta_df[meta_df.exptname.str.match('|'.join(well_fit.exptname.values))]
  313. meta_df = pd.read_csv('/Users/kperks/mnt/engram_share/home/kep2142/spikedata/data_processed/DF_Afferent_MultiSpikePop_ALL.csv')
  314. for a in n_inputs_list:
  315. filename = 'simulation_results_12pF_'+ str(a) + 'inputs.h5'
  316. model_h5_resultspath = savepath / filename
  317. meta_params['N_inputs'] = a*4
  318. with h5py.File(model_h5_resultspath, 'a') as h5file:
  319. group = h5file.require_group('results')
  320. group = h5file.require_group('metadata')
  321. group.create_dataset('stimamp',data=x)
  322. # group.create_dataset('xtime',data=xtime) # get this from subsamp simulation
  323. group.create_dataset('n_inputs',data=a)
  324. group.create_dataset('n_runs',data=n_runs)
  325. ############# def run_4_model_conditions(x, n_runs, n_inputs):
  326. # get a canonical multispike tuning function from fsl generative model and offsets with basic spike threshold at 12ms
  327. net = initialize_model(namespace_sim,meta_params)
  328. net.store('intialized',filename=sim_filepath)
  329. ### subsample from data
  330. R_wavs = []
  331. onset = []
  332. print('doing subsampled simulation')
  333. for _ in tqdm(range(n_runs)):
  334. # get spike times for afferent population
  335. multispike,fsl = get_afferents_subsampled(a)
  336. R_wavs_ = [] # initialize array to store responses across stimulus set
  337. onset_ = []
  338. for y in multispike:
  339. miny = np.NaN
  340. net.restore('intialized',filename=sim_filepath) # this resets simulation clock to 0
  341. # the state of the system at time of restore should affect the network operation accordingly
  342. y = y[~np.isnan(y)] # remove nan values from array of spike times
  343. spk_t_aff = np.asarray(y)*ms # create spike time array for SpikeGeneratorGroup, sorted
  344. ind_aff = np.empty(0) # create default afferent index array in case all were nan
  345. spk_t_lmi = np.empty(0)*ms # create default lmi spike time in case where no afferent input (all nan)
  346. ind_lmi = np.empty(0) # create default lmi index array in case all were nan
  347. if len(y)!=0: # if not all were nan, create index array for afferent spikes, lmi neuron, and lmi spike
  348. ind_aff = np.arange(len(y))
  349. ind_lmi = np.arange(1)
  350. spk_t_lmi = [np.min(y)]*ms
  351. miny = np.min(y)
  352. # update SpikeGeneratorGroup neuron indices and spikes
  353. net['aff_input'].set_spikes(ind_aff,spk_t_aff)
  354. net['lmi_input'].set_spikes(ind_lmi,spk_t_lmi)
  355. net.run(duration = meta_params['duration'])
  356. r =net['state_mon'].v/mV # set r to voltage trace from simulation trial
  357. xtime = net['state_mon'].t/ms
  358. R_wavs_.append(r.reshape(-1)) # append response for this stimamp to response mat across all stimamp
  359. onset_.append(miny)
  360. R_wavs.append(np.asarray(R_wavs_).T) #append the result from this run across stimamp
  361. onset.append(np.asarray(onset_).T)
  362. R_wavs = np.asarray(R_wavs).T
  363. onset = np.asarray(onset).T
  364. with h5py.File(model_h5_resultspath, 'a') as h5file:
  365. group = h5file.get('metadata')
  366. group.create_dataset('xtime',data=xtime)
  367. group = h5file.get('results')
  368. group.create_dataset('subsamp',data = R_wavs)
  369. group.create_dataset('subsamp_onset',data = onset)
  370. # homogeneous isi
  371. # multispike = generate_y_fit_multispike_homogisi(x,n_inputs,dataset,rv)
  372. R_wavs = []
  373. onset = []
  374. print('doing homog_isi simulation')
  375. for _ in tqdm(range(n_runs)):
  376. # get spike times for afferent population
  377. multispike = generate_y_fit_multispike_homogisi(x,a,dataset,rv)
  378. R_wavs_ = [] # initialize array to store responses across stimulus set
  379. onset_ = []
  380. for y in multispike:
  381. miny = np.NaN
  382. net.restore('intialized',filename=sim_filepath) # this resets simulation clock to 0
  383. # the state of the system at time of restore should affect the network operation accordingly
  384. y = y[~np.isnan(y)] # remove nan values from array of spike times
  385. spk_t_aff = np.asarray(y)*ms # create spike time array for SpikeGeneratorGroup, sorted
  386. ind_aff = np.empty(0) # create default afferent index array in case all were nan
  387. spk_t_lmi = np.empty(0)*ms # create default lmi spike time in case where no afferent input (all nan)
  388. ind_lmi = np.empty(0) # create default lmi index array in case all were nan
  389. if len(y)!=0: # if not all were nan, create index array for afferent spikes, lmi neuron, and lmi spike
  390. ind_aff = np.arange(len(y))
  391. ind_lmi = np.arange(1)
  392. spk_t_lmi = [np.min(y)]*ms
  393. miny = np.min(y)
  394. # update SpikeGeneratorGroup neuron indices and spikes
  395. net['aff_input'].set_spikes(ind_aff,spk_t_aff)
  396. net['lmi_input'].set_spikes(ind_lmi,spk_t_lmi)
  397. net.run(duration = meta_params['duration'])
  398. r =net['state_mon'].v/mV # set r to voltage trace from simulation trial
  399. R_wavs_.append(r.reshape(-1)) # append response for this stimamp to response mat across all stimamp
  400. onset_.append(miny)
  401. R_wavs.append(np.asarray(R_wavs_).T) #append the result from this run across stimamp
  402. onset.append(np.asarray(onset_).T)
  403. R_wavs = np.asarray(R_wavs).T
  404. onset = np.asarray(onset).T
  405. with h5py.File(model_h5_resultspath, 'a') as h5file:
  406. group = h5file.get('results')
  407. group.create_dataset('homog_isi',data = R_wavs)
  408. group.create_dataset('homog_isi_onset',data = onset)
  409. # homog pspike
  410. # multispike,fsl = generate_y_fit_multispike_gauss(x,n_inputs,dataset,rv)
  411. R_wavs = []
  412. onset = []
  413. print('doing homog_pspike simulation')
  414. for _ in tqdm(range(n_runs)):
  415. # get spike times for afferent population
  416. multispike,fsl = generate_y_fit_multispike_gauss(x,a,dataset,rv)
  417. R_wavs_ = [] # initialize array to store responses across stimulus set
  418. onset_ = []
  419. for y in multispike:
  420. miny = np.NaN
  421. net.restore('intialized',filename=sim_filepath) # this resets simulation clock to 0
  422. # the state of the system at time of restore should affect the network operation accordingly
  423. y = y[~np.isnan(y)] # remove nan values from array of spike times
  424. spk_t_aff = np.asarray(y)*ms # create spike time array for SpikeGeneratorGroup, sorted
  425. ind_aff = np.empty(0) # create default afferent index array in case all were nan
  426. spk_t_lmi = np.empty(0)*ms # create default lmi spike time in case where no afferent input (all nan)
  427. ind_lmi = np.empty(0) # create default lmi index array in case all were nan
  428. if len(y)!=0: # if not all were nan, create index array for afferent spikes, lmi neuron, and lmi spike
  429. ind_aff = np.arange(len(y))
  430. ind_lmi = np.arange(1)
  431. spk_t_lmi = [np.min(y)]*ms
  432. miny = np.min(y)
  433. # update SpikeGeneratorGroup neuron indices and spikes
  434. net['aff_input'].set_spikes(ind_aff,spk_t_aff)
  435. net['lmi_input'].set_spikes(ind_lmi,spk_t_lmi)
  436. net.run(duration = meta_params['duration'])
  437. r =net['state_mon'].v/mV # set r to voltage trace from simulation trial
  438. R_wavs_.append(r.reshape(-1)) # append response for this stimamp to response mat across all stimamp
  439. onset_.append(miny)
  440. R_wavs.append(np.asarray(R_wavs_).T) #append the result from this run across stimamp
  441. onset.append(np.asarray(onset_).T)
  442. R_wavs = np.asarray(R_wavs).T
  443. onset = np.asarray(onset).T
  444. with h5py.File(model_h5_resultspath, 'a') as h5file:
  445. group = h5file.get('results')
  446. group.create_dataset('homog_pspike',data = R_wavs)
  447. group.create_dataset('homog_pspike_onset',data = onset)
  448. # homog all
  449. # multispike,fsl = generate_y_fit_multispike_homog(x,n_inputs,dataset,rv)
  450. R_wavs = []
  451. onset = []
  452. print('doing homog_all simulation')
  453. for _ in tqdm(range(n_runs)):
  454. # get spike times for afferent population
  455. multispike,fsl = generate_y_fit_multispike_homog(x,a,dataset,rv)
  456. R_wavs_ = [] # initialize array to store responses across stimulus set
  457. onset_ = []
  458. for y in multispike:
  459. miny = np.NaN
  460. net.restore('intialized',filename=sim_filepath) # this resets simulation clock to 0
  461. # the state of the system at time of restore should affect the network operation accordingly
  462. y = y[~np.isnan(y)] # remove nan values from array of spike times
  463. spk_t_aff = np.asarray(y)*ms # create spike time array for SpikeGeneratorGroup, sorted
  464. ind_aff = np.empty(0) # create default afferent index array in case all were nan
  465. spk_t_lmi = np.empty(0)*ms # create default lmi spike time in case where no afferent input (all nan)
  466. ind_lmi = np.empty(0) # create default lmi index array in case all were nan
  467. if len(y)!=0: # if not all were nan, create index array for afferent spikes, lmi neuron, and lmi spike
  468. ind_aff = np.arange(len(y))
  469. ind_lmi = np.arange(1)
  470. spk_t_lmi = [np.min(y)]*ms
  471. miny = np.min(y)
  472. # update SpikeGeneratorGroup neuron indices and spikes
  473. net['aff_input'].set_spikes(ind_aff,spk_t_aff)
  474. net['lmi_input'].set_spikes(ind_lmi,spk_t_lmi)
  475. net.run(duration = meta_params['duration'])
  476. r =net['state_mon'].v/mV # set r to voltage trace from simulation trial
  477. R_wavs_.append(r.reshape(-1)) # append response for this stimamp to response mat across all stimamp
  478. onset_.append(miny)
  479. R_wavs.append(np.asarray(R_wavs_).T) #append the result from this run across stimamp
  480. onset.append(np.asarray(onset_).T)
  481. R_wavs = np.asarray(R_wavs).T
  482. onset = np.asarray(onset).T
  483. with h5py.File(model_h5_resultspath, 'a') as h5file:
  484. group = h5file.get('results')
  485. group.create_dataset('homog_all',data = R_wavs)
  486. group.create_dataset('homog_all_onset',data = onset)