Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

unitary_event_analysis.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. # -*- coding: utf-8 -*-
  2. """
  3. Unitary Event (UE) analysis is a statistical method that
  4. enables to analyze in a time resolved manner excess spike correlation
  5. between simultaneously recorded neurons by comparing the empirical
  6. spike coincidences (precision of a few ms) to the expected number
  7. based on the firing rates of the neurons.
  8. References:
  9. - Gruen, Diesmann, Grammont, Riehle, Aertsen (1999) J Neurosci Methods,
  10. 94(1): 67-79.
  11. - Gruen, Diesmann, Aertsen (2002a,b) Neural Comput, 14(1): 43-80; 81-19.
  12. - Gruen S, Riehle A, and Diesmann M (2003) Effect of cross-trial
  13. nonstationarity on joint-spike events Biological Cybernetics 88(5):335-351.
  14. - Gruen S (2009) Data-driven significance estimation of precise spike
  15. correlation. J Neurophysiology 101:1126-1140 (invited review)
  16. :copyright: Copyright 2015-2016 by the Elephant team, see AUTHORS.txt.
  17. :license: Modified BSD, see LICENSE.txt for details.
  18. """
  19. import numpy as np
  20. import quantities as pq
  21. import neo
  22. import warnings
  23. import elephant.conversion as conv
  24. import scipy
  25. def hash_from_pattern(m, N, base=2):
  26. """
  27. Calculate for a spike pattern or a matrix of spike patterns
  28. (provide each pattern as a column) composed of N neurons a
  29. unique number.
  30. Parameters:
  31. -----------
  32. m: 2-dim ndarray
  33. spike patterns represented as a binary matrix (i.e., matrix of 0's and 1's).
  34. Rows and columns correspond to patterns and neurons, respectively.
  35. N: integer
  36. number of neurons is required to be equal to the number
  37. of rows
  38. base: integer
  39. base for calculation of hash values from binary
  40. sequences (= pattern).
  41. Default is 2
  42. Returns:
  43. --------
  44. list of integers:
  45. An array containing the hash values of each pattern,
  46. shape: (number of patterns)
  47. Raises:
  48. -------
  49. ValueError: if matrix m has wrong orientation
  50. Examples:
  51. ---------
  52. descriptive example:
  53. m = [0
  54. 1
  55. 1]
  56. N = 3
  57. base = 2
  58. hash = 0*2^2 + 1*2^1 + 1*2^0 = 3
  59. second example:
  60. >>> import numpy as np
  61. >>> m = np.array([[0, 1, 0, 0, 1, 1, 0, 1],
  62. [0, 0, 1, 0, 1, 0, 1, 1],
  63. [0, 0, 0, 1, 0, 1, 1, 1]])
  64. >>> hash_from_pattern(m,N=3)
  65. array([0, 4, 2, 1, 6, 5, 3, 7])
  66. """
  67. # check the consistency between shape of m and number neurons N
  68. if N != np.shape(m)[0]:
  69. raise ValueError('patterns in the matrix should be column entries')
  70. # check the entries of the matrix
  71. if not np.all((np.array(m) == 0) + (np.array(m) == 1)):
  72. raise ValueError('patterns should be zero or one')
  73. # generate the representation
  74. v = np.array([base**x for x in range(N)])
  75. # reverse the order
  76. v = v[np.argsort(-v)]
  77. # calculate the binary number by use of scalar product
  78. return np.dot(v, m)
  79. def inverse_hash_from_pattern(h, N, base=2):
  80. """
  81. Calculate the 0-1 spike patterns (matrix) from hash values
  82. Parameters:
  83. -----------
  84. h: list of integers
  85. list or array of hash values, length: number of patterns
  86. N: integer
  87. number of neurons
  88. base: integer
  89. base for calculation of the number from binary
  90. sequences (= pattern).
  91. Default is 2
  92. Raises:
  93. -------
  94. ValueError: if the hash is not compatible with the number
  95. of neurons hash value should not be larger than the biggest
  96. possible hash number with given number of neurons
  97. (e.g. for N = 2, max(hash) = 2^1 + 2^0 = 3
  98. , or for N = 4, max(hash) = 2^3 + 2^2 + 2^1 + 2^0 = 15)
  99. Returns:
  100. --------
  101. numpy.array:
  102. A matrix of shape: (N, number of patterns)
  103. Examples
  104. ---------
  105. >>> import numpy as np
  106. >>> h = np.array([3,7])
  107. >>> N = 4
  108. >>> inverse_hash_from_pattern(h,N)
  109. array([[1, 1],
  110. [1, 1],
  111. [0, 1],
  112. [0, 0]])
  113. """
  114. # check if the hash values are not greater than the greatest possible
  115. # value for N neurons with the given base
  116. if np.any(h > np.sum([base**x for x in range(N)])):
  117. raise ValueError(
  118. "hash value is not compatible with the number of neurons N")
  119. # check if the hash values are integer
  120. if not np.all(np.int64(h) == h):
  121. raise ValueError("hash values are not integers")
  122. m = np.zeros((N, len(h)), dtype=int)
  123. for j, hh in enumerate(h):
  124. i = N - 1
  125. while i >= 0 and hh != 0:
  126. m[i, j] = hh % base
  127. hh /= base
  128. i -= 1
  129. return m
  130. def n_emp_mat(mat, N, pattern_hash, base=2):
  131. """
  132. Count the occurrences of spike coincidence patterns
  133. in the given spike trains.
  134. Parameters:
  135. -----------
  136. mat: 2-dim ndarray
  137. binned spike trains of N neurons. Rows and columns correspond
  138. to neurons and temporal bins, respectively.
  139. N: integer
  140. number of neurons
  141. pattern_hash: list of integers
  142. hash values representing the spike coincidence patterns
  143. of which occurrences are counted.
  144. base: integer
  145. Base which was used to generate the hash values.
  146. Default is 2
  147. Returns:
  148. --------
  149. N_emp: list of integers
  150. number of occurrences of the given patterns in the given spike trains
  151. indices: list of lists of integers
  152. indices indexing the bins where the given spike patterns are found
  153. in `mat`. Same length as `pattern_hash`
  154. indices[i] = N_emp[i] = pattern_hash[i]
  155. Raises:
  156. -------
  157. ValueError: if mat is not zero-one matrix
  158. Examples:
  159. ---------
  160. >>> mat = np.array([[1, 0, 0, 1, 1],
  161. [1, 0, 0, 1, 0]])
  162. >>> pattern_hash = np.array([1,3])
  163. >>> n_emp, n_emp_indices = N_emp_mat(mat, N,pattern_hash)
  164. >>> print n_emp
  165. [ 0. 2.]
  166. >>> print n_emp_indices
  167. [array([]), array([0, 3])]
  168. """
  169. # check if the mat is zero-one matrix
  170. if not np.all((np.array(mat) == 0) + (np.array(mat) == 1)):
  171. raise ValueError("entries of mat should be either one or zero")
  172. h = hash_from_pattern(mat, N, base=base)
  173. N_emp = np.zeros(len(pattern_hash))
  174. indices = []
  175. for idx_ph, ph in enumerate(pattern_hash):
  176. indices_tmp = np.where(h == ph)[0]
  177. indices.append(indices_tmp)
  178. N_emp[idx_ph] = len(indices_tmp)
  179. return N_emp, indices
  180. def n_emp_mat_sum_trial(mat, N, pattern_hash):
  181. """
  182. Calculates empirical number of observed patterns summed across trials
  183. Parameters:
  184. -----------
  185. mat: 3d numpy array or elephant BinnedSpikeTrain object
  186. Binned spike trains represented as a binary matrix (i.e., matrix of 0's and 1's),
  187. segmented into trials. Trials should contain an identical number of neurons and
  188. an identical number of time bins.
  189. the entries are zero or one
  190. 0-axis --> trials
  191. 1-axis --> neurons
  192. 2-axis --> time bins
  193. N: integer
  194. number of neurons
  195. pattern_hash: list of integers
  196. array of hash values, length: number of patterns
  197. Returns:
  198. --------
  199. N_emp: list of integers
  200. numbers of occurences of the given spike patterns in the given spike trains,
  201. summed across trials. Same length as `pattern_hash`.
  202. idx_trials: list of lists of integers
  203. list of indices of mat for each trial in which
  204. the specific pattern has been observed.
  205. 0-axis --> trial
  206. 1-axis --> list of indices for the chosen trial per
  207. entry of `pattern_hash`
  208. Raises:
  209. -------
  210. ValueError: if matrix mat has wrong orientation
  211. ValueError: if mat is not zero-one matrix
  212. Examples:
  213. ---------
  214. >>> mat = np.array([[[1, 1, 1, 1, 0],
  215. [0, 1, 1, 1, 0],
  216. [0, 1, 1, 0, 1]],
  217. [[1, 1, 1, 1, 1],
  218. [0, 1, 1, 1, 1],
  219. [1, 1, 0, 1, 0]]])
  220. >>> pattern_hash = np.array([4,6])
  221. >>> N = 3
  222. >>> n_emp_sum_trial, n_emp_sum_trial_idx =
  223. n_emp_mat_sum_trial(mat, N,pattern_hash)
  224. >>> n_emp_sum_trial
  225. array([ 1., 3.])
  226. >>> n_emp_sum_trial_idx
  227. [[array([0]), array([3])], [array([], dtype=int64), array([2, 4])]]
  228. """
  229. # check the consistency between shape of m and number neurons N
  230. if N != np.shape(mat)[1]:
  231. raise ValueError('the entries of mat should be a list of a'
  232. 'list where 0-axis is trials and 1-axis is neurons')
  233. num_patt = len(pattern_hash)
  234. N_emp = np.zeros(num_patt)
  235. idx_trials = []
  236. # check if the mat is zero-one matrix
  237. if not np.all((np.array(mat) == 0) + (np.array(mat) == 1)):
  238. raise ValueError("entries of mat should be either one or zero")
  239. for mat_tr in mat:
  240. N_emp_tmp, indices_tmp = n_emp_mat(mat_tr, N, pattern_hash, base=2)
  241. idx_trials.append(indices_tmp)
  242. N_emp += N_emp_tmp
  243. return N_emp, idx_trials
  244. def _n_exp_mat_analytic(mat, N, pattern_hash):
  245. """
  246. Calculates the expected joint probability for each spike pattern analyticaly
  247. """
  248. marg_prob = np.mean(mat, 1, dtype=float)
  249. # marg_prob needs to be a column vector, so we
  250. # build a two dimensional array with 1 column
  251. # and len(marg_prob) rows
  252. marg_prob = np.reshape(marg_prob, (len(marg_prob), 1))
  253. m = inverse_hash_from_pattern(pattern_hash, N)
  254. nrep = np.shape(m)[1]
  255. # multipyling the marginal probability of neurons with regard to the
  256. # pattern
  257. pmat = np.multiply(m, np.tile(marg_prob, (1, nrep))) +\
  258. np.multiply(1 - m, np.tile(1 - marg_prob, (1, nrep)))
  259. return np.prod(pmat, axis=0) * float(np.shape(mat)[1])
  260. def _n_exp_mat_surrogate(mat, N, pattern_hash, n_surr=1):
  261. """
  262. Calculates the expected joint probability for each spike pattern with spike
  263. time randomization surrogate
  264. """
  265. if len(pattern_hash) > 1:
  266. raise ValueError('surrogate method works only for one pattern!')
  267. N_exp_array = np.zeros(n_surr)
  268. for rz_idx, rz in enumerate(np.arange(n_surr)):
  269. # shuffling all elements of zero-one matrix
  270. mat_surr = np.array(mat)
  271. [np.random.shuffle(i) for i in mat_surr]
  272. N_exp_array[rz_idx] = n_emp_mat(mat_surr, N, pattern_hash)[0][0]
  273. return N_exp_array
  274. def n_exp_mat(mat, N, pattern_hash, method='analytic', n_surr=1):
  275. """
  276. Calculates the expected joint probability for each spike pattern
  277. Parameters:
  278. -----------
  279. mat: 2d numpy array
  280. the entries are zero or one
  281. 0-axis --> neurons
  282. 1-axis --> time bins
  283. pattern_hash: list of integers
  284. array of hash values, length: number of patterns
  285. method: string
  286. method with which the expectency should be caculated
  287. 'analytic' -- > analytically
  288. 'surr' -- > with surrogates (spike time randomization)
  289. Default is 'analytic'
  290. n_surr: integer
  291. number of surrogates for constructing the distribution of expected joint probability.
  292. Default is 1 and this number is needed only when method = 'surr'
  293. kwargs:
  294. -------
  295. Raises:
  296. -------
  297. ValueError: if matrix m has wrong orientation
  298. Returns:
  299. --------
  300. if method is analytic:
  301. numpy.array:
  302. An array containing the expected joint probability of each pattern,
  303. shape: (number of patterns,)
  304. if method is surr:
  305. numpy.ndarray, 0-axis --> different realizations,
  306. length = number of surrogates
  307. 1-axis --> patterns
  308. Examples:
  309. ---------
  310. >>> mat = np.array([[1, 1, 1, 1],
  311. [0, 1, 0, 1],
  312. [0, 0, 1, 0]])
  313. >>> pattern_hash = np.array([5,6])
  314. >>> N = 3
  315. >>> n_exp_anal = n_exp_mat(mat,N, pattern_hash, method = 'analytic')
  316. >>> n_exp_anal
  317. [ 0.5 1.5 ]
  318. >>>
  319. >>>
  320. >>> n_exp_surr = n_exp_mat(
  321. mat, N,pattern_hash, method = 'surr', n_surr = 5000)
  322. >>> print n_exp_surr
  323. [[ 1. 1.]
  324. [ 2. 0.]
  325. [ 2. 0.]
  326. ...,
  327. [ 2. 0.]
  328. [ 2. 0.]
  329. [ 1. 1.]]
  330. """
  331. # check if the mat is zero-one matrix
  332. if np.any(mat > 1) or np.any(mat < 0):
  333. raise ValueError("entries of mat should be either one or zero")
  334. if method == 'analytic':
  335. return _n_exp_mat_analytic(mat, N, pattern_hash)
  336. if method == 'surr':
  337. return _n_exp_mat_surrogate(mat, N, pattern_hash, n_surr)
  338. def n_exp_mat_sum_trial(
  339. mat, N, pattern_hash, method='analytic_TrialByTrial', **kwargs):
  340. """
  341. Calculates the expected joint probability
  342. for each spike pattern sum over trials
  343. Parameters:
  344. -----------
  345. mat: 3d numpy array or elephant BinnedSpikeTrain object
  346. Binned spike trains represented as a binary matrix (i.e., matrix of 0's and 1's),
  347. segmented into trials. Trials should contain an identical number of neurons and
  348. an identical number of time bins.
  349. the entries are zero or one
  350. 0-axis --> trials
  351. 1-axis --> neurons
  352. 2-axis --> time bins
  353. N: integer
  354. number of neurons
  355. pattern_hash: list of integers
  356. array of hash values, length: number of patterns
  357. method: string
  358. method with which the unitary events whould be computed
  359. 'analytic_TrialByTrial' -- > calculate the expectency
  360. (analytically) on each trial, then sum over all trials.
  361. 'analytic_TrialAverage' -- > calculate the expectency
  362. by averaging over trials.
  363. (cf. Gruen et al. 2003)
  364. 'surrogate_TrialByTrial' -- > calculate the distribution
  365. of expected coincidences by spike time randomzation in
  366. each trial and sum over trials.
  367. Default is 'analytic_trialByTrial'
  368. kwargs:
  369. -------
  370. n_surr: integer
  371. number of surrogate to be used
  372. Default is 1
  373. Returns:
  374. --------
  375. numpy.array:
  376. An array containing the expected joint probability of
  377. each pattern summed over trials,shape: (number of patterns,)
  378. Examples:
  379. --------
  380. >>> mat = np.array([[[1, 1, 1, 1, 0],
  381. [0, 1, 1, 1, 0],
  382. [0, 1, 1, 0, 1]],
  383. [[1, 1, 1, 1, 1],
  384. [0, 1, 1, 1, 1],
  385. [1, 1, 0, 1, 0]]])
  386. >>> pattern_hash = np.array([5,6])
  387. >>> N = 3
  388. >>> n_exp_anal = n_exp_mat_sum_trial(mat, N, pattern_hash)
  389. >>> print n_exp_anal
  390. array([ 1.56, 2.56])
  391. """
  392. # check the consistency between shape of m and number neurons N
  393. if N != np.shape(mat)[1]:
  394. raise ValueError('the entries of mat should be a list of a'
  395. 'list where 0-axis is trials and 1-axis is neurons')
  396. if method == 'analytic_TrialByTrial':
  397. n_exp = np.zeros(len(pattern_hash))
  398. for mat_tr in mat:
  399. n_exp += n_exp_mat(mat_tr, N, pattern_hash, method='analytic')
  400. elif method == 'analytic_TrialAverage':
  401. n_exp = n_exp_mat(
  402. np.mean(mat, 0), N, pattern_hash, method='analytic') * np.shape(mat)[0]
  403. elif method == 'surrogate_TrialByTrial':
  404. if 'n_surr' in kwargs:
  405. n_surr = kwargs['n_surr']
  406. else:
  407. n_surr = 1.
  408. n_exp = np.zeros(n_surr)
  409. for mat_tr in mat:
  410. n_exp += n_exp_mat(mat_tr, N, pattern_hash,
  411. method='surr', n_surr=n_surr)
  412. else:
  413. raise ValueError(
  414. "The method only works on the zero_one matrix at the moment")
  415. return n_exp
  416. def gen_pval_anal(
  417. mat, N, pattern_hash, method='analytic_TrialByTrial', **kwargs):
  418. """
  419. computes the expected coincidences and a function to calculate
  420. p-value for given empirical coincidences
  421. this function generate a poisson distribution with the expected
  422. value calculated by mat. it returns a function which gets
  423. the empirical coincidences, `n_emp`, and calculates a p-value
  424. as the area under the poisson distribution from `n_emp` to infinity
  425. Parameters:
  426. -----------
  427. mat: 3d numpy array or elephant BinnedSpikeTrain object
  428. Binned spike trains represented as a binary matrix (i.e., matrix of 0's and 1's),
  429. segmented into trials. Trials should contain an identical number of neurons and
  430. an identical number of time bins.
  431. the entries are zero or one
  432. 0-axis --> trials
  433. 1-axis --> neurons
  434. 2-axis --> time bins
  435. N: integer
  436. number of neurons
  437. pattern_hash: list of integers
  438. array of hash values, length: number of patterns
  439. method: string
  440. method with which the unitary events whould be computed
  441. 'analytic_TrialByTrial' -- > calculate the expectency
  442. (analytically) on each trial, then sum over all trials.
  443. ''analytic_TrialAverage' -- > calculate the expectency
  444. by averaging over trials.
  445. Default is 'analytic_trialByTrial'
  446. (cf. Gruen et al. 2003)
  447. kwargs:
  448. -------
  449. n_surr: integer
  450. number of surrogate to be used
  451. Default is 1
  452. Returns:
  453. --------
  454. pval_anal:
  455. a function which calculates the p-value for
  456. the given empirical coincidences
  457. n_exp: list of floats
  458. expected coincidences
  459. Examples:
  460. --------
  461. >>> mat = np.array([[[1, 1, 1, 1, 0],
  462. [0, 1, 1, 1, 0],
  463. [0, 1, 1, 0, 1]],
  464. [[1, 1, 1, 1, 1],
  465. [0, 1, 1, 1, 1],
  466. [1, 1, 0, 1, 0]]])
  467. >>> pattern_hash = np.array([5,6])
  468. >>> N = 3
  469. >>> pval_anal,n_exp = gen_pval_anal(mat, N,pattern_hash)
  470. >>> n_exp
  471. array([ 1.56, 2.56])
  472. """
  473. if method == 'analytic_TrialByTrial' or method == 'analytic_TrialAverage':
  474. n_exp = n_exp_mat_sum_trial(mat, N, pattern_hash, method=method)
  475. def pval(n_emp):
  476. p = 1. - scipy.special.gammaincc(n_emp, n_exp)
  477. return p
  478. elif method == 'surrogate_TrialByTrial':
  479. if 'n_surr' in kwargs:
  480. n_surr = kwargs['n_surr']
  481. else:
  482. n_surr = 1.
  483. n_exp = n_exp_mat_sum_trial(
  484. mat, N, pattern_hash, method=method, n_surr=n_surr)
  485. def pval(n_emp):
  486. hist = np.bincount(np.int64(n_exp))
  487. exp_dist = hist / float(np.sum(hist))
  488. if len(n_emp) > 1:
  489. raise ValueError(
  490. 'in surrogate method the p_value can be calculated only for one pattern!')
  491. return np.sum(exp_dist[int(n_emp[0]):])
  492. return pval, n_exp
  493. def jointJ(p_val):
  494. """Surprise measurement
  495. logarithmic transformation of joint-p-value into surprise measure
  496. for better visualization as the highly significant events are
  497. indicated by very low joint-p-values
  498. Parameters:
  499. -----------
  500. p_val: list of floats
  501. p-values of statistical tests for different pattern.
  502. Returns:
  503. --------
  504. J: list of floats
  505. list of surprise measure
  506. Examples:
  507. ---------
  508. >>> p_val = np.array([0.31271072, 0.01175031])
  509. >>> jointJ(p_val)
  510. array([0.3419968 , 1.92481736])
  511. """
  512. p_arr = np.array(p_val)
  513. try:
  514. Js = np.log10(1 - p_arr) - np.log10(p_arr)
  515. except RuntimeWarning:
  516. pass
  517. return Js
  518. def _rate_mat_avg_trial(mat):
  519. """
  520. calculates the average firing rate of each neurons across trials
  521. """
  522. num_tr, N, nbins = np.shape(mat)
  523. psth = np.zeros(N)
  524. for tr, mat_tr in enumerate(mat):
  525. psth += np.sum(mat_tr, axis=1)
  526. return psth / float(nbins) / float(num_tr)
  527. def _bintime(t, binsize):
  528. """
  529. change the real time to bintime
  530. """
  531. t_dl = t.rescale('ms').magnitude
  532. binsize_dl = binsize.rescale('ms').magnitude
  533. return np.floor(np.array(t_dl) / binsize_dl).astype(int)
  534. def _winpos(t_start, t_stop, winsize, winstep, position='left-edge'):
  535. """
  536. Calculates the position of the analysis window
  537. """
  538. t_start_dl = t_start.rescale('ms').magnitude
  539. t_stop_dl = t_stop.rescale('ms').magnitude
  540. winsize_dl = winsize.rescale('ms').magnitude
  541. winstep_dl = winstep.rescale('ms').magnitude
  542. # left side of the window time
  543. if position == 'left-edge':
  544. ts_winpos = np.arange(
  545. t_start_dl, t_stop_dl - winsize_dl + winstep_dl, winstep_dl) * pq.ms
  546. else:
  547. raise ValueError(
  548. 'the current version only returns left-edge of the window')
  549. return ts_winpos
  550. def _UE(mat, N, pattern_hash, method='analytic_TrialByTrial', **kwargs):
  551. """
  552. returns the default results of unitary events analysis
  553. (Surprise, empirical coincidences and index of where it happened
  554. in the given mat, n_exp and average rate of neurons)
  555. """
  556. rate_avg = _rate_mat_avg_trial(mat)
  557. n_emp, indices = n_emp_mat_sum_trial(mat, N, pattern_hash)
  558. if method == 'surrogate_TrialByTrial':
  559. if 'n_surr' in kwargs:
  560. n_surr = kwargs['n_surr']
  561. else:
  562. n_surr = 1
  563. dist_exp, n_exp = gen_pval_anal(
  564. mat, N, pattern_hash, method, n_surr=n_surr)
  565. n_exp = np.mean(n_exp)
  566. elif method == 'analytic_TrialByTrial' or method == 'analytic_TrialAverage':
  567. dist_exp, n_exp = gen_pval_anal(mat, N, pattern_hash, method)
  568. pval = dist_exp(n_emp)
  569. Js = jointJ(pval)
  570. return Js, rate_avg, n_exp, n_emp, indices
  571. def jointJ_window_analysis(
  572. data, binsize, winsize, winstep, pattern_hash,
  573. method='analytic_TrialByTrial', t_start=None,
  574. t_stop=None, binary=True, **kwargs):
  575. """
  576. Calculates the joint surprise in a sliding window fashion
  577. Parameters:
  578. ----------
  579. data: list of neo.SpikeTrain objects
  580. list of spike trains in different trials
  581. 0-axis --> Trials
  582. 1-axis --> Neurons
  583. 2-axis --> Spike times
  584. binsize: Quantity scalar with dimension time
  585. size of bins for descritizing spike trains
  586. winsize: Quantity scalar with dimension time
  587. size of the window of analysis
  588. winstep: Quantity scalar with dimension time
  589. size of the window step
  590. pattern_hash: list of integers
  591. list of interested patterns in hash values
  592. (see hash_from_pattern and inverse_hash_from_pattern functions)
  593. method: string
  594. method with which the unitary events whould be computed
  595. 'analytic_TrialByTrial' -- > calculate the expectency
  596. (analytically) on each trial, then sum over all trials.
  597. 'analytic_TrialAverage' -- > calculate the expectency
  598. by averaging over trials.
  599. (cf. Gruen et al. 2003)
  600. 'surrogate_TrialByTrial' -- > calculate the distribution
  601. of expected coincidences by spike time randomzation in
  602. each trial and sum over trials.
  603. Default is 'analytic_trialByTrial'
  604. t_start: float or Quantity scalar, optional
  605. The start time to use for the time points.
  606. If not specified, retrieved from the `t_start`
  607. attribute of `spiketrain`.
  608. t_stop: float or Quantity scalar, optional
  609. The start time to use for the time points.
  610. If not specified, retrieved from the `t_stop`
  611. attribute of `spiketrain`.
  612. kwargs:
  613. -------
  614. n_surr: integer
  615. number of surrogate to be used
  616. Default is 100
  617. Returns:
  618. -------
  619. result: dictionary
  620. Js: list of float
  621. JointSurprise of different given patterns within each window
  622. shape: different pattern hash --> 0-axis
  623. different window --> 1-axis
  624. indices: list of list of integers
  625. list of indices of pattern within each window
  626. shape: different pattern hash --> 0-axis
  627. different window --> 1-axis
  628. n_emp: list of integers
  629. empirical number of each observed pattern.
  630. shape: different pattern hash --> 0-axis
  631. different window --> 1-axis
  632. n_exp: list of floats
  633. expeced number of each pattern.
  634. shape: different pattern hash --> 0-axis
  635. different window --> 1-axis
  636. rate_avg: list of floats
  637. average firing rate of each neuron
  638. shape: different pattern hash --> 0-axis
  639. different window --> 1-axis
  640. """
  641. if not isinstance(data[0][0], neo.SpikeTrain):
  642. raise ValueError(
  643. "structure of the data is not correct: 0-axis should be trials, 1-axis units and 2-axis neo spike trains")
  644. if t_start is None:
  645. t_start = data[0][0].t_start.rescale('ms')
  646. if t_stop is None:
  647. t_stop = data[0][0].t_stop.rescale('ms')
  648. # position of all windows (left edges)
  649. t_winpos = _winpos(t_start, t_stop, winsize, winstep, position='left-edge')
  650. t_winpos_bintime = _bintime(t_winpos, binsize)
  651. winsize_bintime = _bintime(winsize, binsize)
  652. winstep_bintime = _bintime(winstep, binsize)
  653. if winsize_bintime * binsize != winsize:
  654. warnings.warn(
  655. "ratio between winsize and binsize is not integer -- "
  656. "the actual number for window size is " + str(winsize_bintime * binsize))
  657. if winstep_bintime * binsize != winstep:
  658. warnings.warn(
  659. "ratio between winsize and binsize is not integer -- "
  660. "the actual number for window size is" + str(winstep_bintime * binsize))
  661. num_tr, N = np.shape(data)[:2]
  662. n_bins = int((t_stop - t_start) / binsize)
  663. mat_tr_unit_spt = np.zeros((len(data), N, n_bins))
  664. for tr, sts in enumerate(data):
  665. bs = conv.BinnedSpikeTrain(
  666. sts, t_start=t_start, t_stop=t_stop, binsize=binsize)
  667. if binary is True:
  668. mat = bs.to_bool_array()
  669. else:
  670. raise ValueError(
  671. "The method only works on the zero_one matrix at the moment")
  672. mat_tr_unit_spt[tr] = mat
  673. num_win = len(t_winpos)
  674. Js_win, n_exp_win, n_emp_win = (np.zeros(num_win) for _ in range(3))
  675. rate_avg = np.zeros((num_win, N))
  676. indices_win = {}
  677. for i in range(num_tr):
  678. indices_win['trial' + str(i)] = []
  679. for i, win_pos in enumerate(t_winpos_bintime):
  680. mat_win = mat_tr_unit_spt[:, :, win_pos:win_pos + winsize_bintime]
  681. if method == 'surrogate_TrialByTrial':
  682. if 'n_surr' in kwargs:
  683. n_surr = kwargs['n_surr']
  684. else:
  685. n_surr = 100
  686. Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[i], indices_lst = _UE(
  687. mat_win, N, pattern_hash, method, n_surr=n_surr)
  688. else:
  689. Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[
  690. i], indices_lst = _UE(mat_win, N, pattern_hash, method)
  691. for j in range(num_tr):
  692. if len(indices_lst[j][0]) > 0:
  693. indices_win[
  694. 'trial' + str(j)] = np.append(indices_win['trial' + str(j)], indices_lst[j][0] + win_pos)
  695. return {'Js': Js_win, 'indices': indices_win, 'n_emp': n_emp_win, 'n_exp': n_exp_win, 'rate_avg': rate_avg / binsize}