modelStages.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Mon Feb 22 15:18:09 2021
  5. @author: Oscar Portoles
  6. """
  7. import numpy as np
  8. import numba as nb
  9. import scipy.stats as st
  10. import scipy.io as sio
  11. import time
  12. from itertools import combinations
  13. import copy
  14. class WBLSmodel():
  15. """ whole-brain large-scale model and analysis methods """
  16. def __init__(self,stageIx=0,vel=5,zh=None):
  17. """ @ stageIx: int or list, with stage's indexes to simulate
  18. @ vel: float, velocity of spike-propagation velocity [m/s]
  19. @ zh: ndim array, iniital history of models pahses and amplitudes
  20. """
  21. self.zhist = zh # history of initial conditions
  22. self.localC = np.float64(0.6) # default Local coupling, L
  23. self.globalC = np.float64(0.15) # default Global coupling G
  24. self.velocity = np.float32(vel) # velocity [meter / second]
  25. self.interHsc = np.float32(1.0) # scaling factor between hemispheres
  26. self.tMax = np.float32(1.15) # time of simulation
  27. self.fs = np.float32(100.0) # samplig frequency for FC
  28. self.omega = np.float64(6) # Central natural frequency of ROIs [Hz]
  29. self.dlt = np.float64(1) # Spread of natural frequencies
  30. self.dt = np.float64(1e-3) # integration step
  31. self.stageSim = self._setStageSim(stageIx)
  32. self.iniCond = 2 # Initial conditions - 1: random (fiexd seed) , 2: emprical initial state, 3: testin nModel models identical, 4: empirical init State eover 3rd dimension
  33. self.log = []
  34. self.widthPulse = 0.030 # [seconds] duration of the pulse
  35. self.offMidBump = 0.01 # relative to the middel poit of a bump [sec]
  36. self.preBumpWin = 0.05 # 5ms, as long as bump [sec]
  37. self.bumpDur = 0.05 # duration of bump [sec]
  38. self.pathData = './data/'
  39. self.nameDTIm = 'mCons9Anato.mat' # structural connectome
  40. self.nameIni = 'OptiNoisy0p01IniState.npz' # inital condition first stage from pre-visual encoding + noise
  41. self.efcData = 'FCstagesDTIrois.mat' # MEG within-stage dpFC
  42. self.mapFile = 'mapFS2DTI.mat' # mapping frm FResurfer layout of ROIS to our visualization
  43. self.nameEnvs = 'meanEnvelopeByStage.mat' # relative change of envelope amplitude at the onset of stages
  44. self.nModel = np.int32(25) # number of model with different inital conditions to run simulataneously
  45. self.oneModel = False
  46. self.iniSeed = 1 # Seed for random intial conditions
  47. self._getAnatomicNetw()
  48. self._getEmpiricalInitialStateNP()
  49. self._importFCstage()
  50. self.setLocalParam(omega=self.omega, dlt=self.dlt)
  51. self.setNumbaThreaths()
  52. def setNumbaThreaths(self, n=1):
  53. nb.config.NUMBA_NUM_THREADS = n
  54. def get_mylogs(self):
  55. return self.log
  56. def _setStageSim(self, stSim):
  57. if isinstance(stSim, list):
  58. return np.array(stSim)
  59. else:
  60. return np.array([stSim])
  61. def _getAnatomicNetw(self):
  62. # load anatomical data
  63. loaddata = self.pathData + self.nameDTIm
  64. dti = sio.loadmat(loaddata) # C: structural connectivity, D: distance between areas.
  65. self.D = dti['mcdA'] # Distances beween nodes
  66. self.anato = dti['mcwA'].astype(np.float64) # Strucural/anatomical network
  67. self.C = np.shape(self.D)[1] # number of nodes/brain areas
  68. self.pairs = np.array(list(combinations(range(self.C),2)), dtype=np.int32)
  69. self.nFCs = self.pairs.shape[0]
  70. self.pairsC = np.vstack((self.pairs, np.fliplr(self.pairs)))
  71. # Distance to meters
  72. self.D = self.D / 1000.0
  73. def _getEmpiricalInitialStateNP(self):
  74. # load initial conditions
  75. loaddata = self.pathData + self.nameIni
  76. iniSta = np.load(loaddata,allow_pickle=True)
  77. self.eIniPhi = iniSta['iniState'][:,0,:]
  78. self.eIniAmp1 = iniSta['iniState'][:,1,:]
  79. self.eIniAmp2 = iniSta['iniState'][:,2,:]
  80. self.statesT = iniSta['edgeSt'][self.stageSim, :2]
  81. bump = iniSta['edgeSt'][self.stageSim,2]
  82. preBu = np.array([bump - self.bumpDur/2 - self.preBumpWin, bump - self.bumpDur/2])
  83. atBu = np.array([bump - self.bumpDur/2, bump + self.bumpDur/2])
  84. if self.stageSim.size == 1 :
  85. if self.stageSim > 0 :
  86. offset_stage_before = iniSta['edgeSt'][self.stageSim-1, 1] -self.dt
  87. self.statesT = self.statesT - offset_stage_before
  88. bump = bump - offset_stage_before
  89. preBu = preBu - offset_stage_before
  90. atBu = atBu - offset_stage_before
  91. self.edges_n = np.int32(self.fs * self.statesT)
  92. self.edges_dt = np.int32((1/self.dt) * self.statesT)
  93. self.nStates = self.statesT.shape[0]
  94. self.tMax = self.statesT.max() + 1/self.fs
  95. self.bump_n = np.int32(self.fs * bump)
  96. self.bump_dt = np.int32((1/self.dt) * bump)
  97. self.preBu_dt = np.int32(preBu / self.dt)
  98. self.atBu_dt = np.int32(atBu / self.dt)
  99. def _getEmpiReativEnv(self):
  100. loaddata = self.pathData + self.nameEnvs
  101. dti = sio.loadmat(loaddata)
  102. self.eRelEnv = dti['envRel'][:, self.stageSim] # [roois, stages]
  103. def setLocalParam(self, omega=6, dlt=1):
  104. omga_ = 2*np.pi * np.float64(omega) * self.dt
  105. dlt_ = np.float64(dlt) * self.dt
  106. self.locaDyn = np.complex64(-dlt_ + 1j* omga_)
  107. def _importFCstage(self):
  108. filepath = self.pathData + self.efcData
  109. self.eFC = np.float32(sio.loadmat(filepath)['fc1d'][2 + self.stageSim,:])
  110. self.eFCs = np.sign(self.eFC)
  111. self.neFCs = np.sum(np.abs(self.eFCs), axis=-1)
  112. self.inv_neFCs = 1.0 / self.neFCs
  113. @nb.vectorize([nb.float32(nb.complex64, nb.complex64)],cache=True,target='cpu',nopython=True)
  114. def _csd(zi, zj):
  115. csd = zi * np.conjugate(zj) # sample phase differences sign
  116. csdi = np.imag(csd)
  117. return np.sign( csdi )
  118. def dPLI3d_v(self, z):
  119. # PLI, cross spectral density
  120. z_i = z[self.pairs[:,0], self.edges_n[0,0]:self.edges_n[-1,-1], :]
  121. z_j = z[self.pairs[:,1], self.edges_n[0,0]:self.edges_n[-1,-1], :]
  122. # print('z_i shape: ', z_i.shape[1])
  123. # print('state chope: ',(self.edges_n[0,0], self.edges_n[-1,-1]))
  124. edges_n_0 = self.edges_n - self.edges_n[0,0]
  125. # sign phase difference
  126. siCsd = WBLSmodel._csd(z_i, z_j)
  127. self.pli = np.empty((self.nStates, self.nFCs), dtype=np.float32)
  128. # PLI over stages
  129. for i in range(self.nStates):
  130. # mean PLI over stage samples and model
  131. self.pli[i,:] = np.mean(siCsd[:, edges_n_0[i,0]:edges_n_0[i,1],:], axis=(1,2))
  132. def similarityAllPLIs(self):
  133. # fater than a numbazied function
  134. sigSimFC = np.sign(self.pli)
  135. return np.sum( self.eFCs * sigSimFC, axis=1) / self.neFCs
  136. def similarityAllPLIsWenergy(self):
  137. # fater than a numbazied function
  138. sigSimFC= np.sign(self.pli)
  139. simil = np.sum( self.eFCs * sigSimFC, axis=1) / self.neFCs
  140. energy = np.sum(np.abs(self.pulse))
  141. return simil - energy * self.inv_neFCs / self.maxEner
  142. def getPreAtBumpEnv(self):
  143. # change to z-score of the whole trial??
  144. self.preBu_env = np.zeros((self.C,self.nModel,self.nStates)) # ,int(self.bumpDur*self.dt)
  145. self.atBu_env = np.zeros((self.C,self.nModel,self.nStates))
  146. for iXs in range(self.nStates):
  147. z_pre = self.z[:, int(self.preBu_dt[0,iXs]+self.maxDlay):int(self.preBu_dt[1,iXs]+self.maxDlay), :]
  148. if self.preBu_dt[0,iXs]+self.maxDlay < 0:
  149. z_pre = self.z[:, :int(self.preBu_dt[1]+self.maxDlay), :]
  150. if z_pre.shape[1] < 5:
  151. z_pre = np.nan
  152. z_at = self.z[:, int(self.atBu_dt[0,iXs] +self.maxDlay):int(self.atBu_dt[1,iXs] +self.maxDlay), :]
  153. self.preBu_env[:,:,iXs] = np.mean(WBLSmodel._absZ( z_pre ), axis=1)
  154. self.atBu_env[:,:,iXs] = np.mean(WBLSmodel._absZ( z_at ), axis=1)
  155. def getScorrelRelEnv(self):
  156. self.sCor = np.zeros(self.nStates)
  157. for iXs in range(self.nStates):
  158. self.sCor[iXs], pval = st.spearmanr( self.eRelEnv[:,iXs], self.relEnv[:,iXs])
  159. def similarityPLIsWenergySperm(self):
  160. similFC = np.sum( self.eFCs * np.sign(self.pli), axis=1) / self.neFCs
  161. energyCoef = np.sum(np.abs(self.pulse)) / self.maxEner
  162. # print('fc: ',similFC[0],' energy: ', energyCoef ,' sCor: ', self.sCor)
  163. return similFC[0] - energyCoef * (1-self.sCor) * self.inv_neFCs
  164. def relativeEnvBuPre(self):
  165. self.getPreAtBumpEnv()
  166. self.relEnv = np.mean( (self.atBu_env - self.preBu_env) / self.preBu_env , axis=1) # mean over models of relative change
  167. def getAnatoCoupling(self,kG,kL):
  168. """Get anatomical network with couplings"""
  169. # normalization adjacency matrix
  170. self.anatoNorm = self.anato / np.mean(self.anato[~np.identity(self.C,dtype=bool)]) # normalize structural network to a mean = 1
  171. kS = self.anatoNorm * kG / self.C # Globa coupling
  172. np.fill_diagonal(kS,kL) # Local coupling
  173. return np.float64(kS)
  174. def getAnatoCouplingIHS(self,kG,kL,ihs):
  175. """Get anatomical network with couplings and inter-hemisphere scaling"""
  176. self.anatoScaleNormali(ihs) # IHS and normalization adjacency matrix
  177. kS = self.anatoSC * kG / self.C # Globa coupling
  178. np.fill_diagonal(kS,kL) # Local coupling
  179. return np.float64(kS)
  180. def anatoScaleNormali(self, h):
  181. self.anatoSC = np.zeros((self.C,self.C))
  182. hC = int(self.C/2)
  183. # scale from one hemisphere to another one
  184. self.anatoSC[hC:,:hC] = self.anato[hC:,:hC] * h
  185. self.anatoSC[:hC,hC:] = self.anato[:hC,hC:] * h
  186. # within hemsphere connections
  187. self.anatoSC[:hC,:hC] = self.anato[:hC,:hC]
  188. self.anatoSC[hC:,hC:] = self.anato[hC:,hC:]
  189. # normalization
  190. self.anatoSC = self.anatoSC / np.mean(self.anatoSC[~np.identity(self.C,dtype=bool)]) # normalize structural network to a mean = 1
  191. def _doCouplingLSBMsimu(self):
  192. """ get coupling matris with predefined local global cuplings ans scale by dt and 1/2
  193. So, it is ready to be used in teh the simulations """
  194. if self.interHsc == 1.0:
  195. K_ = self.getAnatoCoupling(self.globalC, self.localC)
  196. else:
  197. K_ = self.getAnatoCouplingIHS(self.globalC, self.localC, self.interHsc)
  198. ksim = np.float32( 0.5 * K_ * self.dt)
  199. self.Ksim = np.repeat(ksim[:,:,np.newaxis],self.nModel,axis=2)
  200. def getDelays(self,vel):
  201. """Return maximum delay and delay steps in samples"""
  202. # dlay = self.D / (1000.0 * vel) # [seconds] correct from mm to m
  203. self.dlay = self.D / vel # [seconds] correct from mm to m
  204. self.dlayStep = np.around(self.dlay / self.dt).astype(np.int32) # delay on steps backwards to be done
  205. self.maxDlay = np.int64(np.max(self.dlayStep)) # number of time steps for the longest dela
  206. self.iXc = np.tile( np.arange(self.C, dtype=np.int32), (self.C,1)).T # index for ROIs
  207. @nb.vectorize([nb.float32(nb.complex64)],cache=True,target='cpu',nopython=True)
  208. def _absZ(z):
  209. return np.abs(z) # normalize arguments to one
  210. # @profile
  211. def doKuramotoOrder(self,z):
  212. z_norm = z / self.z_abs # makes all modulus equal to one
  213. # global ordedr with local order == 1 at time t
  214. orderD = np.abs( np.mean( z_norm, axis = 0 ))
  215. # temporal mean and std, over models
  216. orderSD = np.mean( np.std( orderD, axis=0 ) ).astype(np.float16)
  217. order1 = np.mean( np.mean( orderD, axis=0 ) ).astype(np.float16)
  218. # mean local order over rois, and then over time, over models
  219. orderL = np.mean( np.mean(np.mean( self.z_abs, axis = 0 ), axis=0) ).astype(np.float16)
  220. # mean local variation (metastability), over models
  221. metaL = np.float16( np.mean(np.std( self.z_abs, axis=1), axis=1) )
  222. orders = np.array([orderL, order1, orderSD])
  223. return orders, metaL
  224. def impulsePerturb(self):
  225. """ Local couplings perturbation:
  226. (L + pulse) * Z_() = Z'_() wehre z_() is couplin variable in model
  227. becomes -> (1 + pulse/L) * z_() = Z'_() \ reshape to 2-dimensions"""
  228. # print('impulse ID: ', np.sum(self.pulse**2))
  229. self.impulse = (1 + self.pulse.reshape(self.C, self.nStates) / self.localC).astype(np.float32)
  230. def _edgesImpulse(self):
  231. self.edgePulse = np.zeros((self.nStates, 2))
  232. for i in range(self.nStates):
  233. self.edgePulse[i, 0] = self.bump_dt[i] - (self.offMidBump + 0.5*self.widthPulse) * 1/self.dt
  234. self.edgePulse[i, 1] = self.bump_dt[i] - (self.offMidBump - 0.5*self.widthPulse) * 1/self.dt
  235. @nb.vectorize([nb.complex64(nb.complex64, nb.complex64, nb.complex64)],cache=True,target='cpu',nopython=True)
  236. def _sumPartsASdt(param, z_n, couplingSum):
  237. return z_n + param * z_n - couplingSum
  238. @nb.vectorize([nb.complex64(nb.float32, nb.complex64, nb.complex64)],cache=True,target='cpu',nopython=True)
  239. def _coupling(kS, zn2, zp_del):#, res):
  240. # zn2 = zn * zn
  241. z_pro = np.conjugate(zp_del) * zn2
  242. return kS * (z_pro - zp_del)
  243. @nb.vectorize([nb.complex64(nb.complex64)],cache=True,target='cpu',nopython=True)
  244. def _zn2(z):
  245. return z * z
  246. def _KMAOcommuParZnV(z,maxDlay,dlayStep,fs,dt,kS, iXc,localPara):
  247. # best timing so far
  248. for n in range(maxDlay,z.shape[1]-1):
  249. zn = z[:,n,:] #
  250. iXdel = n-dlayStep
  251. zp_del = z[iXc, iXdel,:]
  252. zn2 = WBLSmodel._zn2( zn )
  253. # zn2_rep = np.repeat(zn2[:,np.newaxis,:], zn.shape[0], axis=1)
  254. zn2_rep = np.repeat(zn2[np.newaxis,:,:], zn.shape[0], axis=0)
  255. couplin = WBLSmodel._coupling(kS, zn2_rep, zp_del)#, couplin)
  256. kzpn = np.sum(couplin, axis=0)
  257. # add differntial step
  258. z[:,n+1,:] = WBLSmodel._sumPartsASdt(localPara, zn , kzpn)
  259. return z
  260. def _KMAOcommuParZnV_NoDel(z,fs,dt,kS,localPara):
  261. nMo = z.shape[2]
  262. # best timing so far
  263. for n in range(z.shape[1]-1):
  264. zn = z[:,n,:] #
  265. zn2 = WBLSmodel._zn2( zn )
  266. zn2_rep = np.lib.stride_tricks.as_strided(zn2,(68, 68, nMo),(0, zn2.strides[0], zn2.strides[1]))
  267. zp_rep = np.lib.stride_tricks.as_strided(zn,(68, 68, nMo),( zn.strides[0],0, zn.strides[1]))
  268. couplin = WBLSmodel._coupling(kS, zn2_rep, zp_rep)#, couplin)
  269. kzpn = np.sum(couplin, axis=0)
  270. # add differntial step
  271. z[:,n+1,:] = WBLSmodel._sumPartsASdt(localPara, zn , kzpn)
  272. return z
  273. #
  274. def _KMAOcommuParZnV_in(z,maxDlay,dlayStep,fs,dt,kS, iXc,localPara, impulse, edges_p):
  275. # best timing so far
  276. iXdiag = np.diag_indices(68, 2)
  277. for n in range(maxDlay,z.shape[1]-1):
  278. zn = z[:,n,:] #
  279. iXdel = n-dlayStep
  280. zp_del = z[iXc, iXdel,:]
  281. zn2 = WBLSmodel._zn2( zn )
  282. # zn2_rep = np.repeat(zn2[:,np.newaxis,:], zn.shape[0], axis=1)
  283. zn2_rep = np.repeat(zn2[np.newaxis,:,:], zn.shape[0], axis=0)
  284. couplin = WBLSmodel._coupling(kS, zn2_rep, zp_del)#, couplin)
  285. # index diagonal elements and add pulse in-place
  286. if (n >= edges_p[0,0]) and (n <= edges_p[0,1]):
  287. couplin[iXdiag] *= impulse[:,0,np.newaxis]
  288. kzpn = np.sum(couplin, axis=0)
  289. # add differntial step
  290. z[:,n+1,:] = WBLSmodel._sumPartsASdt(localPara, zn , kzpn)
  291. return z
  292. def _KMAOcommuParZnV_NoDel_in(z,fs,dt,kS,localPara, impulse, edges_p):
  293. nMo = z.shape[2]
  294. iXdiag = np.diag_indices(68, 2)
  295. # best timing so far
  296. for n in range(z.shape[1]-1):
  297. zn = z[:,n,:] #
  298. zn2 = WBLSmodel._zn2( zn )
  299. zn2_rep = np.lib.stride_tricks.as_strided(zn2,(68, 68, nMo),(0, zn2.strides[0], zn2.strides[1]))
  300. zp_rep = np.lib.stride_tricks.as_strided(zn,(68, 68, nMo),( zn.strides[0],0, zn.strides[1]))
  301. couplin = WBLSmodel._coupling(kS, zn2_rep, zp_rep)#, couplin)
  302. # index diagonal elements and add pulse in-place
  303. if (n >= edges_p[0,0]) and (n <= edges_p[0,1]):
  304. couplin[iXdiag] *= impulse[:,0,np.newaxis]
  305. kzpn = np.sum(couplin, axis=0)
  306. # add differntial step
  307. z[:,n+1,:] = WBLSmodel._sumPartsASdt(localPara, zn , kzpn)
  308. return z
  309. def _KMAOcommuParZnV_in_sequen(z,maxDlay,dlayStep,fs,dt,kS, iXc,localPara, impulse, edges_p):
  310. iXdiag = np.diag_indices(68, 2)
  311. for n in range(maxDlay,z.shape[1]-1):
  312. zn = z[:,n,:] #
  313. iXdel = n-dlayStep
  314. zp_del = z[iXc, iXdel,:]
  315. zn2 = WBLSmodel._zn2( zn )
  316. zn2_rep = np.repeat(zn2[np.newaxis,:,:], zn.shape[0], axis=0)
  317. couplin = WBLSmodel._coupling(kS, zn2_rep, zp_del)#, couplin)
  318. # index diagonal elements and add pulse in-place
  319. try:
  320. if (n >= edges_p[0,0]) and (n <= edges_p[0,1]):
  321. couplin[iXdiag] *= impulse[:,0,np.newaxis]
  322. except:
  323. pass
  324. try:
  325. if (n >= edges_p[1,0]) and (n <= edges_p[1,1]):
  326. couplin[iXdiag] *= impulse[:,1,np.newaxis]
  327. except:
  328. pass
  329. try:
  330. if (n >= edges_p[2,0]) and (n <= edges_p[2,1]):
  331. couplin[iXdiag] *= impulse[:,2,np.newaxis]
  332. except:
  333. pass
  334. try:
  335. if (n >= edges_p[3,0]) and (n <= edges_p[3,1]):
  336. couplin[iXdiag] *= impulse[:,3,np.newaxis]
  337. except:
  338. pass
  339. kzpn = np.sum(couplin, axis=0)
  340. # add differntial step
  341. z[:,n+1,:] = WBLSmodel._sumPartsASdt(localPara, zn , kzpn)
  342. return z
  343. def downsampleModel(self, z):
  344. # simple downsampling (there may be aliasing)
  345. z = z[:,self.maxDlay+1:,:] # remove history samples used in the begining
  346. return z[:,::np.int32(1./(self.fs*self.dt)),:]
  347. def _setHistDelayAndContainers(self):
  348. if self.velocity != 0:
  349. self.getDelays(self.velocity)
  350. else:
  351. self.maxDlay = 0
  352. if self.stageSim.size > 1:
  353. self.zIni = self._doNodeContainers(self.maxDlay)
  354. elif self.stageSim == 0:
  355. self.zIni = self._doNodeContainers(self.maxDlay)
  356. elif self.stageSim > 0 :
  357. self.zIni = self._doNodeContainerSimHistory()
  358. def _doNodeContainers(self, maxDlay):
  359. """ containers with shape [number of ROIs * number of models, number of samples]
  360. models are concatenated along raws (vertically),
  361. """
  362. self.nCM = self.C*self.nModel # number of models and communites
  363. eIniAmp1 = self.eIniAmp1[:,:self.nModel].T.ravel()
  364. eIniAmp2 = self.eIniAmp2[:,:self.nModel].T.ravel()
  365. eIniPhi = self.eIniPhi[:,:self.nModel].T.ravel()
  366. # node's variables
  367. r = np.zeros((self.nCM, int(self.tMax/self.dt + maxDlay)), dtype=np.float32) # node phase parameter [C, Nsamples to integrate]
  368. phi = np.zeros((self.nCM, int(self.tMax/self.dt + maxDlay)), dtype=np.float32) # node phase parameter [C, Nsamples to integrate]
  369. # initial conditions as history for the time delays
  370. omegaT = self.omega * np.tile(np.linspace(0, (maxDlay+1)*self.dt, maxDlay+1, dtype=np.float32), (self.nCM,1))
  371. if self.iniCond == 1: # random with fiexd seed
  372. np.random.seed(self.iniSeed)
  373. phiRa = np.tile(np.random.uniform(-np.pi, np.pi,self.nCM), (maxDlay+1,1)).T
  374. phi[:,0:maxDlay+1] = np.float32(np.remainder(omegaT + phiRa, 2*np.pi))
  375. np.random.seed(self.iniSeed+1)
  376. r[:,0:maxDlay+1] = np.tile( np.random.uniform(0.05, 0.95,self.nCM), (maxDlay+1,1)).T
  377. elif self.iniCond == 2: # phase: measured initial conditions
  378. # phase
  379. phiIni = np.tile(eIniPhi, (maxDlay+1,1)).T
  380. phi[:,0:maxDlay+1] = np.float32(np.remainder(omegaT + phiIni, 2*np.pi))
  381. # amplitude
  382. slope = (eIniAmp2 - eIniAmp1) / (-0.11/self.dt) # a = (y2 - y1) / (x2 - x1)
  383. interc = eIniAmp1 - slope*(-0.11/self.dt) # b = y1 - a * x1
  384. point1 = interc + slope*(maxDlay+1)
  385. rh = np.zeros((self.nCM, maxDlay+1), dtype=np.float32)
  386. for i in range(self.nCM):
  387. rh[i,:] = np.linspace(point1[i], eIniAmp2[i], maxDlay+1)
  388. rh[rh<0.05] = 0.05
  389. r[:,0:maxDlay+1] = rh
  390. elif self.iniCond == 3: # phase: half plane; amplitude: stabl 0.5
  391. phiOff = np.tile(np.linspace(0,np.pi,self.C), (int(maxDlay+1),self.nModel)).T
  392. phi[:,0:maxDlay+1] = np.float32(np.remainder(omegaT + phiOff, 2*np.pi))
  393. r[:,0:maxDlay+1] = 0.5*np.ones((self.nCM,maxDlay+1), dtype=np.float32)
  394. elif self.iniCond == 4: # phase: random; amplitude: stabl 0.2
  395. r = 0.2*np.ones((self.C, int(self.tMax/self.dt + maxDlay), self.nModel), dtype=np.float32) # node phase parameter [C, Nsamples to integrate]
  396. phi = np.float32(np.random.randn(self.C, int(self.tMax/self.dt + maxDlay), self.nModel)) # node phase parameter [C, Nsamples to integrate]
  397. else:
  398. raise ValueError('not defined self.iniCond for such value')
  399. if self.oneModel: # for testing
  400. r = r[:self.C,:]
  401. phi = phi[:self.C,:]
  402. else:
  403. z = np.zeros((self.C,r.shape[1],self.nModel),dtype=np.complex64)
  404. for i in range(self.nModel):
  405. z[:,:,i] = r[i*self.C:(i+1)*self.C,:] * np.exp(1j* phi[i*self.C:(i+1)*self.C,:])
  406. self.N = r.shape[1]
  407. return z
  408. def _doNodeContainerSimHistory(self):
  409. """ takes a delays history as input, typicaly the previous stage """
  410. nh = self.zhist.shape[1] # lenght [samples] of history
  411. z = np.zeros((self.C, int(nh+self.tMax/self.dt), self.nModel), dtype=np.complex64)
  412. z[:,:nh,:] = self.zhist
  413. return z
  414. def modelBehavior(self, z):
  415. self.z = copy.deepcopy(z)
  416. z = self.downsampleModel(z)
  417. self.z_abs = WBLSmodel._absZ(z)
  418. self.dPLI3d_v(z)
  419. self.orders, self.metaL = self.doKuramotoOrder(z)
  420. def meanEnvBuStage(self):
  421. # mean envelope over stages
  422. self.msEnv = np.zeros((self.C, self.nStates), dtype=np.float32)
  423. self.mbEnv = np.zeros((self.C, self.nStates), dtype=np.float32)
  424. for i in range(self.nStates):
  425. # mean envelpe amplitude over stage samples and model
  426. self.msEnv[:,i] = np.mean(self.z_abs[:, self.edges_n[i,0]:self.edges_n[i,1],:], axis=(1,2))
  427. self.mbEnv[:,i] = np.mean(self.z_abs[:, int(self.bump_n[i]-0.025*self.fs):int(self.bump_n[i]+0.025*self.fs),:], axis=(1,2))
  428. def runModel(self):
  429. self._doCouplingLSBMsimu()
  430. self.getDelays(self.velocity)
  431. z = self._doNodeContainers(self.maxDlay)
  432. z = WBLSmodel._KMAOcommuParZnV(z,self.maxDlay,self.dlayStep,self.fs,self.dt,self.Ksim,self.iXc, self.locaDyn)
  433. self.modelBehavior(z)
  434. def runModelNoDel(self):
  435. self._doCouplingLSBMsimu()
  436. z = WBLSmodel._KMAOcommuParZnV_NoDel(self.zIni,self.fs,self.dt,self.Ksim, self.locaDyn)
  437. self.modelBehavior(z)
  438. def runModelNoDel_in(self):
  439. """ pulse created in the model instance """
  440. z = WBLSmodel._KMAOcommuParZnV_NoDel_in(self.zIni,self.fs,self.dt,self.Ksim, self.locaDyn, self.impulse, self.edgePulse)
  441. self.modelBehavior(z)
  442. def runModel_Pulsein(self):
  443. self._doCouplingLSBMsimu()
  444. self.impulsePerturb() # (z, maxDlay, dlayStep, fs, dt, kS, iXc, localPara, impulse, edges_p)
  445. edgePulse = self.edgePulse + self.maxDlay
  446. z = WBLSmodel._KMAOcommuParZnV_in(self.zIni,self.maxDlay,self.dlayStep, self.fs,self.dt,self.Ksim, self.iXc, self.locaDyn, self.impulse, edgePulse)
  447. self.modelBehavior(z)
  448. def runModelNoDel_Pulsein(self):
  449. self._doCouplingLSBMsimu()
  450. self.impulsePerturb() # (z, maxDlay, dlayStep, fs, dt, kS, iXc, localPara, impulse, edges_p)
  451. z = WBLSmodel._KMAOcommuParZnV_NoDel_in(self.zIni, self.fs,self.dt,self.Ksim, self.locaDyn, self.impulse, self.edgePulse)
  452. self.modelBehavior(z)
  453. def runModel_Pulsein_sequence(self):
  454. self._doCouplingLSBMsimu()
  455. self.impulsePerturb() # (z, maxDlay, dlayStep, fs, dt, kS, iXc, localPara, impulse, edges_p)
  456. edgePulse = self.edgePulse + self.maxDlay
  457. z = WBLSmodel._KMAOcommuParZnV_in_sequen(self.zIni,self.maxDlay,self.dlayStep, self.fs,self.dt,self.Ksim, self.iXc, self.locaDyn, self.impulse, edgePulse)
  458. self.modelBehavior(z)
  459. if __name__ == "__main__":
  460. stages = [0,1]
  461. stages = 0
  462. kao = WBLSmodel(stageIx=stages)
  463. # kao.iniCond = 1
  464. # kao.iniSeed = 1
  465. kao._setHistDelayAndContainers()
  466. kao._edgesImpulse()
  467. kao._getEmpiReativEnv()
  468. kao.doNoise(variance=0)
  469. try:
  470. kao.pulse = np.ones((68,len(stages)))*10
  471. except:
  472. kao.pulse = np.ones(68)*10
  473. kao.runModel_Pulsein_noise()
  474. kao.similarityAllPLIs()
  475. kao.relativeEnvBuPre()
  476. kao.getScorrelRelEnv()