cell_assembly_detection.py 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210
  1. """
  2. CAD [1] is a method aimed to capture structures of higher-order correlation in
  3. massively parallel spike trains. In particular, it is able to extract
  4. patterns of spikes with arbitrary configuration of time lags (time interval
  5. between spikes in a pattern), and at multiple time scales,
  6. e.g. from synchronous patterns to firing rate co-modulations.
  7. CAD consists of a statistical parametric testing done on the level of pairs
  8. of neurons, followed by an agglomerative recursive algorithm, in order to
  9. detect and test statistically precise repetitions of spikes in the data.
  10. In particular, pairs of neurons are tested for significance under the null
  11. hypothesis of independence, and then the significant pairs are agglomerated
  12. into higher order patterns.
  13. The method was published in Russo et al. 2017 [1]. The original
  14. code is in Matlab language.
  15. Given a list of discretized (binned) spike trains by a given temporal
  16. scale (bin_size), assumed to be recorded in parallel, the CAD analysis can be
  17. applied as demonstrated in this short toy example of 5 parallel spike trains
  18. that exhibit fully synchronous events of order 5.
  19. Examples
  20. --------
  21. >>> import matplotlib.pyplot as plt
  22. >>> import elephant.conversion as conv
  23. >>> import elephant.spike_train_generation
  24. >>> import quantities as pq
  25. >>> import numpy as np
  26. >>> import elephant.cell_assembly_detection as cad
  27. >>> np.random.seed(30)
  28. >>> # Generate correlated data and bin it with a bin_size of 10ms
  29. >>> sts = elephant.spike_train_generation.cpp(
  30. >>> rate=15*pq.Hz, A=[0]+[0.95]+[0]*4+[0.05], t_stop=10*pq.s)
  31. >>> bin_size = 10*pq.ms
  32. >>> spM = conv.BinnedSpikeTrain(sts, bin_size=bin_size)
  33. >>> # Call of the method
  34. >>> patterns = cad.cell_assembly_detection(spM=spM, max_lag=2)[0]
  35. >>> # Plotting
  36. >>> plt.figure()
  37. >>> for neu in patterns['neurons']:
  38. >>> if neu == 0:
  39. >>> plt.plot(
  40. >>> patterns['times']*bin_size, [neu]*len(patterns['times']),
  41. >>> 'ro', label='pattern')
  42. >>> else:
  43. >>> plt.plot(
  44. >>> patterns['times']*bin_size, [neu] * len(patterns['times']),
  45. >>> 'ro')
  46. >>> # Raster plot of the data
  47. >>> for st_idx, st in enumerate(sts):
  48. >>> if st_idx == 0:
  49. >>> plt.plot(st.rescale(pq.ms), [st_idx] * len(st), 'k.',
  50. >>> label='spikes')
  51. >>> else:
  52. >>> plt.plot(st.rescale(pq.ms), [st_idx] * len(st), 'k.')
  53. >>> plt.ylim([-1, len(sts)])
  54. >>> plt.xlabel('time (ms)')
  55. >>> plt.ylabel('neurons ids')
  56. >>> plt.legend()
  57. >>> plt.show()
  58. References
  59. ----------
  60. [1] Russo, E., & Durstewitz, D. (2017).
  61. Cell assemblies at multiple time scales with arbitrary lag constellations.
  62. Elife, 6.
  63. """
  64. from __future__ import division, print_function, unicode_literals
  65. import copy
  66. import math
  67. import time
  68. import numpy as np
  69. from scipy.stats import f
  70. import elephant.conversion as conv
  71. from elephant.utils import deprecated_alias
  72. __all__ = [
  73. "cell_assembly_detection"
  74. ]
  75. @deprecated_alias(data='binned_spiketrain', maxlag='max_lag',
  76. min_occ='min_occurrences',
  77. same_config_cut='same_configuration_pruning')
  78. def cell_assembly_detection(binned_spiketrain, max_lag, reference_lag=2,
  79. alpha=0.05, min_occurrences=1, size_chunks=100,
  80. max_spikes=np.inf, significance_pruning=True,
  81. subgroup_pruning=True,
  82. same_configuration_pruning=False,
  83. bool_times_format=False, verbose=False):
  84. """
  85. The function performs the CAD analysis for the binned (discretized) spike
  86. trains given in input. The method looks for candidate
  87. significant patterns with lags (number of bins between successive spikes
  88. in the pattern) going from `-max_lag` to `max_lag` (second parameter of the
  89. function). Thus, between two successive spikes in the pattern there can
  90. be at most `max_lag`*`bin_size` units of time.
  91. The method agglomerates pairs of units (or a unit and a preexisting
  92. assembly), tests their significance by a statistical test
  93. and stops when the detected assemblies reach their maximal dimension
  94. (parameter `max_spikes`).
  95. At every agglomeration size step (e.g. from triplets to quadruplets), the
  96. method filters patterns having the same neurons involved, and keeps only
  97. the most significant one. This pruning is optional and the choice is
  98. identified by the parameter 'significance_pruning'.
  99. Assemblies already included in a bigger assembly are eliminated in a final
  100. pruning step. Also this pruning is optional, and the choice is identified
  101. by the parameter `subgroup_pruning`.
  102. Parameters
  103. ----------
  104. binned_spiketrain : elephant.conversion.BinnedSpikeTrain
  105. Binned spike trains containing data to be analyzed.
  106. max_lag : int
  107. Maximal lag to be tested. For a binning dimension of bin_size the
  108. method will test all pairs configurations with a time
  109. shift between '-max_lag' and 'max_lag'.
  110. reference_lag : int, optional
  111. Reference lag (in bins) for the non-stationarity correction in the
  112. statistical test.
  113. Default: 2.
  114. alpha : float, optional
  115. Significance level for the statistical test.
  116. Default: 0.05.
  117. min_occurrences : int, optional
  118. Minimal number of occurrences required for an assembly
  119. (all assemblies, even if significant, with fewer occurrences
  120. than min_occurrences are discarded).
  121. Default: 0.
  122. size_chunks : int, optional
  123. Size (in bins) of chunks in which the spike trains are divided
  124. to compute the variance (to reduce non stationarity effects
  125. on variance estimation).
  126. Default: 100.
  127. max_spikes : int, optional
  128. Maximal assembly order (the algorithm will return assemblies
  129. composed of maximum `max_spikes` elements).
  130. Default: `np.inf`.
  131. significance_pruning : bool, optional
  132. If True, the method performs significance pruning among
  133. the detected assemblies.
  134. Default: True.
  135. subgroup_pruning : bool, optional
  136. If True, the method performs subgroup pruning among
  137. the detected assemblies.
  138. Default: True.
  139. same_configuration_pruning : bool, optional
  140. If True, performs pruning (not present in the original code and more
  141. efficient), not testing assemblies already formed
  142. if they appear in the very same configuration.
  143. Default: False.
  144. bool_times_format : bool, optional
  145. If True, the activation time series is a list of 0/1 elements, where
  146. 1 indicates the first spike of the pattern.
  147. Otherwise, the activation times of the assemblies are indicated by the
  148. indices of the bins in which the first spike of the pattern
  149. is happening.
  150. Default: False.
  151. verbose : bool, optional
  152. Regulates the number of prints given by the method. If true all prints
  153. are given, otherwise the method does give any prints.
  154. Default: False.
  155. Returns
  156. -------
  157. assembly : list of dict
  158. Contains the assemblies detected for the bin size chosen. Each
  159. assembly is a dictionary with attributes:
  160. 'neurons' : list
  161. Vector of units taking part to the assembly (unit order correspond
  162. to the agglomeration order).
  163. 'lag' : list
  164. Vector of time lags.
  165. `lag[z]` is the activation delay between `neurons[1]` and
  166. `neurons[z+1]`.
  167. 'pvalue' : list
  168. Vector containing p-values.
  169. `pvalue[z]` is the p-value of the statistical test between
  170. performed adding `neurons[z+1]` to the `neurons[1:z]`.
  171. 'times' : list
  172. Assembly activation time. It reports how many times the
  173. complete assembly activates in that bin. Time always refers to the
  174. activation of the first listed assembly element (`neurons[1]`),
  175. that doesn't necessarily corresponds to the first unit firing.
  176. The format is identified by the variable `bool_times_format`.
  177. 'signature' : list of list
  178. Array of two entries `(z,c)`. The first is the number of neurons
  179. participating in the assembly (size), and the second is number of
  180. assembly occurrences.
  181. Raises
  182. ------
  183. TypeError
  184. If `binned_spiketrain` is not an instance of
  185. `elephant.conv.BinnedSpikeTrain`.
  186. ValueError
  187. If the parameters are out of bounds.
  188. Notes
  189. -----
  190. Alias: cad
  191. References
  192. ----------
  193. [1] Russo, E., & Durstewitz, D. (2017). Cell assemblies at multiple time
  194. scales with arbitrary lag constellations. Elife, 6.
  195. Examples
  196. --------
  197. >>> import elephant.conversion as conv
  198. >>> import elephant.spike_train_generation
  199. >>> import quantities as pq
  200. >>> import numpy as np
  201. >>> import elephant.cell_assembly_detection as cad
  202. ...
  203. >>> np.random.seed(30)
  204. ...
  205. >>> # Generate correlated data and bin it with a bin_size of 10ms
  206. >>> sts = elephant.spike_train_generation.cpp(
  207. >>> rate=15*pq.Hz, A=[0]+[0.95]+[0]*4+[0.05], t_stop=10*pq.s)
  208. >>> bin_size = 10*pq.ms
  209. >>> spM = conv.BinnedSpikeTrain(sts, bin_size=bin_size)
  210. ...
  211. >>> # Call of the method
  212. >>> patterns = cad.cell_assembly_detection(spM=spM, max_lag=2)[0]
  213. """
  214. initial_time = time.time()
  215. # check parameter input and raise errors if necessary
  216. _raise_errors(binned_spiketrain=binned_spiketrain,
  217. max_lag=max_lag,
  218. alpha=alpha,
  219. min_occurrences=min_occurrences,
  220. size_chunks=size_chunks,
  221. max_spikes=max_spikes)
  222. # transform the binned spiketrain into array
  223. binned_spiketrain = binned_spiketrain.to_array()
  224. # zero order
  225. n_neurons = len(binned_spiketrain)
  226. # initialize empty assembly
  227. assembly_in = [{'neurons': None,
  228. 'lags': None,
  229. 'pvalue': None,
  230. 'times': None,
  231. 'signature': None} for _ in range(n_neurons)]
  232. # initializing the dictionaries
  233. if verbose:
  234. print('Initializing the dictionaries...')
  235. for w1 in range(n_neurons):
  236. assembly_in[w1]['neurons'] = [w1]
  237. assembly_in[w1]['lags'] = []
  238. assembly_in[w1]['pvalue'] = []
  239. assembly_in[w1]['times'] = binned_spiketrain[w1]
  240. assembly_in[w1]['signature'] = [[1, sum(binned_spiketrain[w1])]]
  241. # first order = test over pairs
  242. # denominator of the Bonferroni correction
  243. # divide alpha by the number of tests performed in the first
  244. # pairwise testing loop
  245. number_test_performed = n_neurons * (n_neurons - 1) * (2 * max_lag + 1)
  246. alpha = alpha * 2 / float(number_test_performed)
  247. if verbose:
  248. print('actual significance_level', alpha)
  249. # sign_pairs_matrix is the matrix with entry as 1 for the significant pairs
  250. sign_pairs_matrix = np.zeros((n_neurons, n_neurons), dtype=np.int)
  251. assembly = []
  252. if verbose:
  253. print('Testing on pairs...')
  254. # nns: count of the existing assemblies
  255. nns = 0
  256. # initialize the structure existing_patterns, storing the patterns
  257. # determined by neurons and lags:
  258. # if the pattern is already existing, don't do the test
  259. existing_patterns = []
  260. # for loop for the pairwise testing
  261. for w1 in range(n_neurons - 1):
  262. for w2 in range(w1 + 1, n_neurons):
  263. spiketrain2 = binned_spiketrain[w2]
  264. n2 = w2
  265. assembly_flag = 0
  266. # call of the function that does the pairwise testing
  267. call_tp = _test_pair(
  268. ensemble=assembly_in[w1],
  269. spiketrain2=spiketrain2,
  270. n2=n2,
  271. max_lag=max_lag,
  272. size_chunks=size_chunks,
  273. reference_lag=reference_lag,
  274. existing_patterns=existing_patterns,
  275. same_configuration_pruning=same_configuration_pruning)
  276. if same_configuration_pruning:
  277. assem_tp = call_tp[0]
  278. else:
  279. assem_tp = call_tp
  280. # if the assembly given in output is significant and the number
  281. # of occurrences is higher than the minimum requested number
  282. if assem_tp['pvalue'][-1] < alpha and \
  283. assem_tp['signature'][-1][1] > min_occurrences:
  284. # save the assembly in the output
  285. assembly.append(assem_tp)
  286. sign_pairs_matrix[w1][w2] = 1
  287. assembly_flag = 1 # flag : it is indeed an assembly
  288. # put the item_candidate into the existing_patterns list
  289. if same_configuration_pruning:
  290. item_candidate = call_tp[1]
  291. if not existing_patterns:
  292. existing_patterns = [item_candidate]
  293. else:
  294. existing_patterns.append(item_candidate)
  295. if assembly_flag:
  296. nns += 1 # count of the existing assemblies
  297. # making sign_pairs_matrix symmetric
  298. sign_pairs_matrix = sign_pairs_matrix + sign_pairs_matrix.T
  299. sign_pairs_matrix[sign_pairs_matrix == 2] = 1
  300. # print(sign_pairs_matrix)
  301. # second order and more: increase the assembly size by adding a new unit
  302. # the algorithm will return assemblies composed by
  303. # maximum max_spikes elements
  304. if verbose:
  305. print()
  306. print('Testing on higher order assemblies...')
  307. print()
  308. # keep the count of the current size of the assembly
  309. current_size_agglomeration = 2
  310. # number of groups previously found
  311. n_as = len(assembly)
  312. # w2_to_test_v : contains the elements to test with the elements that are
  313. # in the assembly in input
  314. w2_to_test_v = np.zeros(n_neurons)
  315. # testing for higher order assemblies
  316. w1 = 0
  317. while w1 < n_as:
  318. w1_elements = assembly[w1]['neurons']
  319. # Add only neurons that have significant first order
  320. # co-occurrences with members of the assembly
  321. # Find indices and values of nonzero elements
  322. for i in range(len(w1_elements)):
  323. w2_to_test_v += sign_pairs_matrix[w1_elements[i]]
  324. # w2_to_test_p : vector with the index of nonzero elements
  325. w2_to_test_p = np.nonzero(w2_to_test_v)[0]
  326. # list with the elements to test
  327. # that are not already in the assembly
  328. w2_to_test = [item for item in w2_to_test_p
  329. if item not in w1_elements]
  330. pop_flag = 0
  331. # check that there are candidate neurons for agglomeration
  332. if w2_to_test:
  333. # bonferroni correction only for the tests actually performed
  334. alpha = alpha / float(len(w2_to_test) * n_as * (2 * max_lag + 1))
  335. # testing for the element in w2_to_test
  336. for ww2 in range(len(w2_to_test)):
  337. w2 = w2_to_test[ww2]
  338. spiketrain2 = binned_spiketrain[w2]
  339. assembly_flag = 0
  340. pop_flag = max(assembly_flag, 0)
  341. # testing for the assembly and the new neuron
  342. call_tp = _test_pair(
  343. ensemble=assembly[w1],
  344. spiketrain2=spiketrain2,
  345. n2=w2,
  346. max_lag=max_lag,
  347. size_chunks=size_chunks,
  348. reference_lag=reference_lag,
  349. existing_patterns=existing_patterns,
  350. same_configuration_pruning=same_configuration_pruning)
  351. if same_configuration_pruning:
  352. assem_tp = call_tp[0]
  353. else:
  354. assem_tp = call_tp
  355. # if it is significant and
  356. # the number of occurrences is sufficient and
  357. # the length of the assembly is less than the input limit
  358. if assem_tp['pvalue'][-1] < alpha and \
  359. assem_tp['signature'][-1][1] > min_occurrences and \
  360. assem_tp['signature'][-1][0] <= max_spikes:
  361. # the assembly is saved in the output list of
  362. # assemblies
  363. assembly.append(assem_tp)
  364. assembly_flag = 1
  365. if len(assem_tp['neurons']) > current_size_agglomeration:
  366. # up to the next agglomeration level
  367. current_size_agglomeration += 1
  368. # Pruning step 1
  369. # between two assemblies with the same unit set
  370. # arranged into different
  371. # configurations, choose the most significant one
  372. if significance_pruning is True and \
  373. current_size_agglomeration > 3:
  374. assembly, n_filtered_assemblies = \
  375. _significance_pruning_step(
  376. pre_pruning_assembly=assembly)
  377. if same_configuration_pruning:
  378. item_candidate = call_tp[1]
  379. existing_patterns.append(item_candidate)
  380. if assembly_flag:
  381. # count one more assembly
  382. nns += 1
  383. n_as = len(assembly)
  384. # if at least once the assembly was agglomerated to a bigger one,
  385. # pop the smaller one
  386. if pop_flag:
  387. assembly.pop(w1)
  388. w1 = w1 + 1
  389. # Pruning step 1
  390. # between two assemblies with the same unit set arranged into different
  391. # configurations, choose the most significant one
  392. # Last call for pruning of last order agglomeration
  393. if significance_pruning:
  394. assembly = _significance_pruning_step(pre_pruning_assembly=assembly)[0]
  395. # Pruning step 2
  396. # Remove assemblies whom elements are already
  397. # ALL included in a bigger assembly
  398. if subgroup_pruning:
  399. assembly = _subgroup_pruning_step(pre_pruning_assembly=assembly)
  400. # Reformat of the activation times
  401. if not bool_times_format:
  402. for pattern in assembly:
  403. pattern['times'] = np.where(pattern['times'] > 0)[0]
  404. # Give as output only the maximal groups
  405. if verbose:
  406. print()
  407. print('Giving outputs of the method...')
  408. print()
  409. print('final_assembly')
  410. for item in assembly:
  411. print(item['neurons'],
  412. item['lags'],
  413. item['signature'])
  414. # Time needed for the computation
  415. if verbose:
  416. print()
  417. print('time', time.time() - initial_time)
  418. return assembly
  419. def _chunking(binned_pair, size_chunks, max_lag, best_lag):
  420. """
  421. Chunking the object binned_pair into parts with the same bin length
  422. Parameters
  423. ----------
  424. binned_pair : np.array
  425. vector of the binned spike trains for the pair being analyzed
  426. size_chunks : int
  427. size of chunks desired
  428. max_lag : int
  429. max number of lags for the bin_size chosen
  430. best_lag : int
  431. lag with the higher number of coincidences
  432. Returns
  433. -------
  434. chunked : list
  435. list with the object binned_pair cut in size_chunks parts
  436. n_chunks : int
  437. number of chunks
  438. """
  439. length = len(binned_pair[0], )
  440. # number of chunks
  441. n_chunks = math.ceil((length - max_lag) / size_chunks)
  442. # new chunk size, this is to have all chunks of roughly the same size
  443. size_chunks = math.floor((length - max_lag) / n_chunks)
  444. n_chunks = np.int(n_chunks)
  445. size_chunks = np.int(size_chunks)
  446. chunked = [[[], []] for _ in range(n_chunks)]
  447. # cut the time series according to best_lag
  448. binned_pair_cut = np.array([np.zeros(length - max_lag, dtype=np.int),
  449. np.zeros(length - max_lag, dtype=np.int)])
  450. # choose which entries to consider according to the best lag chosen
  451. if best_lag == 0:
  452. binned_pair_cut[0] = binned_pair[0][0:length - max_lag]
  453. binned_pair_cut[1] = binned_pair[1][0:length - max_lag]
  454. elif best_lag > 0:
  455. binned_pair_cut[0] = binned_pair[0][0:length - max_lag]
  456. binned_pair_cut[1] = binned_pair[1][
  457. best_lag:length - max_lag + best_lag]
  458. else:
  459. binned_pair_cut[0] = binned_pair[0][
  460. -best_lag:length - max_lag - best_lag]
  461. binned_pair_cut[1] = binned_pair[1][0:length - max_lag]
  462. # put the cut data into the chunked object
  463. for iii in range(n_chunks - 1):
  464. chunked[iii][0] = binned_pair_cut[0][
  465. size_chunks * iii:size_chunks * (iii + 1)]
  466. chunked[iii][1] = binned_pair_cut[1][
  467. size_chunks * iii:size_chunks * (iii + 1)]
  468. # last chunk can be of slightly different size
  469. chunked[n_chunks - 1][0] = binned_pair_cut[0][
  470. size_chunks * (n_chunks - 1):length]
  471. chunked[n_chunks - 1][1] = binned_pair_cut[1][
  472. size_chunks * (n_chunks - 1):length]
  473. return chunked, n_chunks
  474. def _assert_same_pattern(item_candidate, existing_patterns, max_lag):
  475. """
  476. Tests if a particular pattern has already been tested and retrieved as
  477. significant.
  478. Parameters
  479. ----------
  480. item_candidate : list of list with two components
  481. in the first component there are the neurons involved in the assembly,
  482. in the second there are the correspondent lags
  483. existing_patterns: list
  484. list of the already significant patterns
  485. max_lag: int
  486. maximum lag to be tested
  487. Returns
  488. -------
  489. True if the pattern was already tested and retrieved as significant
  490. False if not
  491. """
  492. # unique representation of pattern in term of lags, maxlag and neurons
  493. # participating
  494. item_candidate = sorted(item_candidate[0] * 2 * max_lag +
  495. item_candidate[1] + max_lag)
  496. if item_candidate in existing_patterns:
  497. return True
  498. else:
  499. return False
  500. def _test_pair(ensemble, spiketrain2, n2, max_lag, size_chunks, reference_lag,
  501. existing_patterns, same_configuration_pruning):
  502. """
  503. Tests if two spike trains have repetitive patterns occurring more
  504. frequently than chance.
  505. Parameters
  506. ----------
  507. ensemble : dictionary
  508. structure with the previously formed assembly and its spike train
  509. spiketrain2 : list
  510. spike train of the new unit to be tested for significance
  511. (candidate to be a new assembly member)
  512. n2 : int
  513. new unit tested
  514. max_lag : int
  515. maximum lag to be tested
  516. size_chunks : int
  517. size (in bins) of chunks in which the spike trains is divided
  518. to compute the variance (to reduce non stationarity effects
  519. on variance estimation)
  520. reference_lag : int
  521. lag of reference; if zero or negative reference lag=-l
  522. existing_patterns: list
  523. list of the already significant patterns
  524. same_configuration_pruning: bool
  525. if True (not present in the original code and more
  526. efficient), does not test assemblies already formed
  527. if they appear in the very same configuration
  528. Default: False
  529. Returns
  530. -------
  531. assembly : dictionary
  532. assembly formed by the method (can be empty), with attributes:
  533. 'elements' : vector of units taking part to the assembly
  534. (unit order correspond to the agglomeration order)
  535. 'lag' : vector of time lags (lag[z] is the activation delay between
  536. elements[1] and elements[z+1]
  537. 'pvalue' : vector of pvalues. `pr[z]` is the p-value of the statistical
  538. test between performed adding elements[z+1] to the elements[1:z]
  539. 'times' : assembly activation time. It reports how many times the
  540. complete assembly activates in that bin.
  541. time always refers to the activation of the first listed
  542. assembly element (elements[1]), that doesn't necessarily
  543. corresponds to the first unit firing.
  544. 'signature' : array of two entries (z,c). The first is the number of
  545. neurons participating in the assembly (size),
  546. while the second is number of assembly occurrences.
  547. item_candidate : list of list with two components
  548. in the first component there are the neurons involved in the assembly,
  549. in the second there are the correspondent lags.
  550. """
  551. # list with the binned spike trains of the two neurons
  552. binned_pair = [ensemble['times'], spiketrain2]
  553. # For large bin_sizes, the binned spike counts may potentially fluctuate
  554. # around a high mean level and never fall below some minimum count
  555. # considerably larger than zero for the whole time series.
  556. # Entries up to this minimum count would contribute
  557. # to the coincidence count although they are completely
  558. # uninformative, so we subtract the minima.
  559. binned_pair = np.array([binned_pair[0] - min(binned_pair[0]),
  560. binned_pair[1] - min(binned_pair[1])])
  561. ntp = len(binned_pair[0]) # trial length
  562. # Divide in parallel trials with 0/1 elements
  563. # max number of spikes in one bin for both neurons
  564. maxrate = np.int(max(max(binned_pair[0]), max(binned_pair[1])))
  565. # creation of the parallel processes, one for each rate up to maxrate
  566. # and computation of the coincidence count for both neurons
  567. par_processes = np.zeros((maxrate, 2, ntp), dtype=np.int)
  568. par_proc_expectation = np.zeros(maxrate, dtype=np.int)
  569. for i in range(maxrate):
  570. par_processes[i] = np.array(binned_pair > i, dtype=np.int)
  571. par_proc_expectation[i] = (np.sum(par_processes[i][0]) * np.sum(
  572. par_processes[i][1])) / float(ntp)
  573. # Decide which is the lag with most coincidences (l_ : best lag)
  574. # we are calculating the joint spike count of units A and B at lag l.
  575. # It is computed by counting the number
  576. # of times we have a spike in A and a corresponding spike in unit B
  577. # l times later for every lag,
  578. # we select the one corresponding to the highest count
  579. # structure with the coincidence counts for each lag
  580. fwd_coinc_count = np.array([0 for _ in range(max_lag + 1)])
  581. bwd_coinc_count = np.array([0 for _ in range(max_lag + 1)])
  582. for lag in range(max_lag + 1):
  583. time_fwd_cc = np.array([binned_pair[0][
  584. 0:len(binned_pair[0]) - max_lag],
  585. binned_pair[1][
  586. lag:len(binned_pair[1]) - max_lag + lag]])
  587. time_bwd_cc = np.array([binned_pair[0][
  588. lag:len(binned_pair[0]) - max_lag + lag],
  589. binned_pair[1][
  590. 0:len(binned_pair[1]) - max_lag]])
  591. # taking the minimum, place by place for the coincidences
  592. fwd_coinc_count[lag] = np.sum(np.minimum(time_fwd_cc[0],
  593. time_fwd_cc[1]))
  594. bwd_coinc_count[lag] = np.sum(np.minimum(time_bwd_cc[0],
  595. time_bwd_cc[1]))
  596. # choice of the best lag, taking into account the reference lag
  597. if reference_lag <= 0:
  598. # if the global maximum is in the forward process (A to B)
  599. if np.amax(fwd_coinc_count) > np.amax(bwd_coinc_count):
  600. # bwd_flag indicates whether we are in time_fwd_cc or time_bwd_cc
  601. fwd_flag = 1
  602. global_maximum_index = np.argmax(fwd_coinc_count)
  603. else:
  604. fwd_flag = 2
  605. global_maximum_index = np.argmax(bwd_coinc_count)
  606. best_lag = (fwd_flag == 1) * global_maximum_index - (
  607. fwd_flag == 2) * global_maximum_index
  608. max_coinc_count = max(np.amax(fwd_coinc_count),
  609. np.amax(bwd_coinc_count))
  610. else:
  611. # reverse the ctAB_ object and not take into account the first entry
  612. bwd_coinc_count_rev = bwd_coinc_count[1:len(bwd_coinc_count)][::-1]
  613. hab_l = np.append(bwd_coinc_count_rev, fwd_coinc_count)
  614. lags = range(-max_lag, max_lag + 1)
  615. max_coinc_count = np.amax(hab_l)
  616. best_lag = lags[np.argmax(hab_l)]
  617. if best_lag < 0:
  618. lag_ref = best_lag + reference_lag
  619. coinc_count_ref = hab_l[lags.index(lag_ref)]
  620. else:
  621. lag_ref = best_lag - reference_lag
  622. coinc_count_ref = hab_l[lags.index(lag_ref)]
  623. # now check whether the pattern, with those neurons and that particular
  624. # configuration of lags,
  625. # is already in the list of the significant patterns
  626. # if it is, don't do the testing
  627. # if it is not, continue
  628. previous_neu = ensemble['neurons']
  629. pattern_candidate = copy.copy(previous_neu)
  630. pattern_candidate.append(n2)
  631. pattern_candidate = np.array(pattern_candidate)
  632. # add both the new lag and zero
  633. previous_lags = ensemble['lags']
  634. lags_candidate = copy.copy(previous_lags)
  635. lags_candidate.append(best_lag)
  636. lags_candidate[:0] = [0]
  637. pattern_candidate = list(pattern_candidate)
  638. lags_candidate = list(lags_candidate)
  639. item_candidate = [[pattern_candidate], [lags_candidate]]
  640. if same_configuration_pruning:
  641. if _assert_same_pattern(item_candidate=item_candidate,
  642. existing_patterns=existing_patterns,
  643. max_lag=max_lag):
  644. en_neurons = copy.copy(ensemble['neurons'])
  645. en_neurons.append(n2)
  646. en_lags = copy.copy(ensemble['lags'])
  647. en_lags.append(np.inf)
  648. en_pvalue = copy.copy(ensemble['pvalue'])
  649. en_pvalue.append(1)
  650. en_n_occ = copy.copy(ensemble['signature'])
  651. en_n_occ.append([0, 0])
  652. item_candidate = []
  653. assembly = {'neurons': en_neurons,
  654. 'lags': en_lags,
  655. 'pvalue': en_pvalue,
  656. 'times': [],
  657. 'signature': en_n_occ}
  658. return assembly, item_candidate
  659. else:
  660. # I go on with the testing
  661. pair_expectation = np.sum(par_proc_expectation)
  662. # case of no coincidences or limit for the F asimptotical
  663. # distribution (too few coincidences)
  664. if max_coinc_count == 0 or pair_expectation <= 5 or \
  665. pair_expectation >= (min(np.sum(binned_pair[0]),
  666. np.sum(binned_pair[1])) - 5):
  667. en_neurons = copy.copy(ensemble['neurons'])
  668. en_neurons.append(n2)
  669. en_lags = copy.copy(ensemble['lags'])
  670. en_lags.append(np.inf)
  671. en_pvalue = copy.copy(ensemble['pvalue'])
  672. en_pvalue.append(1)
  673. en_n_occ = copy.copy(ensemble['signature'])
  674. en_n_occ.append([0, 0])
  675. assembly = {'neurons': en_neurons,
  676. 'lags': en_lags,
  677. 'pvalue': en_pvalue,
  678. 'times': [],
  679. 'signature': en_n_occ}
  680. if same_configuration_pruning:
  681. item_candidate = []
  682. return assembly, item_candidate
  683. else:
  684. return assembly
  685. else: # construct the activation series for binned_pair
  686. length = len(binned_pair[0]) # trial length
  687. activation_series = np.zeros(length)
  688. if reference_lag <= 0:
  689. if best_lag == 0: # synchrony case
  690. for i in range(maxrate): # for all parallel processes
  691. par_processes_a = par_processes[i][0]
  692. par_processes_b = par_processes[i][1]
  693. activation_series = \
  694. np.add(activation_series,
  695. np.multiply(par_processes_a,
  696. par_processes_b))
  697. coinc_count_matrix = np.array([[0, fwd_coinc_count[0]],
  698. [bwd_coinc_count[2], 0]])
  699. # matrix with #AB and #BA
  700. # here we specifically choose
  701. # 'l* = -2' for the synchrony case
  702. elif best_lag > 0:
  703. for i in range(maxrate):
  704. par_processes_a = par_processes[i][0]
  705. par_processes_b = par_processes[i][1]
  706. # multiplication between the two binned time series
  707. # shifted by best_lag
  708. activation_series[0:length - best_lag] = \
  709. np.add(activation_series[0:length - best_lag],
  710. np.multiply(par_processes_a[
  711. 0:length - best_lag],
  712. par_processes_b[
  713. best_lag:length]))
  714. coinc_count_matrix = \
  715. np.array([[0, fwd_coinc_count[global_maximum_index]],
  716. [bwd_coinc_count[global_maximum_index], 0]])
  717. else:
  718. for i in range(maxrate):
  719. par_processes_a = par_processes[i][0]
  720. par_processes_b = par_processes[i][1]
  721. activation_series[-best_lag:length] = \
  722. np.add(activation_series[-best_lag:length],
  723. np.multiply(par_processes_a[
  724. -best_lag:length],
  725. par_processes_b[
  726. 0:length + best_lag]))
  727. coinc_count_matrix = \
  728. np.array([[0, fwd_coinc_count[global_maximum_index]],
  729. [bwd_coinc_count[global_maximum_index], 0]])
  730. else:
  731. if best_lag == 0:
  732. for i in range(maxrate):
  733. par_processes_a = par_processes[i][0]
  734. par_processes_b = par_processes[i][1]
  735. activation_series = \
  736. np.add(activation_series,
  737. np.multiply(par_processes_a,
  738. par_processes_b))
  739. elif best_lag > 0:
  740. for i in range(maxrate):
  741. par_processes_a = par_processes[i][0]
  742. par_processes_b = par_processes[i][1]
  743. activation_series[0:length - best_lag] = \
  744. np.add(activation_series[0:length - best_lag],
  745. np.multiply(par_processes_a[
  746. 0:length - best_lag],
  747. par_processes_b[
  748. best_lag:length]))
  749. else:
  750. for i in range(maxrate):
  751. par_processes_a = par_processes[i][0]
  752. par_processes_b = par_processes[i][1]
  753. activation_series[-best_lag:length] = \
  754. np.add(activation_series[-best_lag:length],
  755. np.multiply(par_processes_a[
  756. -best_lag:length],
  757. par_processes_b[
  758. 0:length + best_lag]))
  759. coinc_count_matrix = np.array([[0, max_coinc_count],
  760. [coinc_count_ref, 0]])
  761. # chunking
  762. chunked, nch = _chunking(binned_pair=binned_pair,
  763. size_chunks=size_chunks,
  764. max_lag=max_lag,
  765. best_lag=best_lag)
  766. marginal_counts = np.zeros((nch, maxrate, 2), dtype=np.int)
  767. # for every chunk, a vector with in each entry the sum of elements
  768. # in each parallel binary process, for each unit
  769. # maxrate_t : contains the maxrates for both neurons in each chunk
  770. maxrate_t = np.zeros(nch, dtype=np.int)
  771. # ch_nn : contains the length of the different chunks
  772. ch_nn = np.zeros(nch, dtype=np.int)
  773. count_sum = 0
  774. # for every chunk build the parallel processes
  775. # and the coincidence counts
  776. for iii in range(nch):
  777. binned_pair_chunked = np.array(chunked[iii])
  778. maxrate_t[iii] = max(max(binned_pair_chunked[0]),
  779. max(binned_pair_chunked[1]))
  780. ch_nn[iii] = len(chunked[iii][0])
  781. par_processes_chunked = [None for _ in range(
  782. np.int(maxrate_t[iii]))]
  783. for i in range(np.int(maxrate_t[iii])):
  784. par_processes_chunked[i] = np.zeros(
  785. (2, len(binned_pair_chunked[0])), dtype=np.int)
  786. par_processes_chunked[i] = np.array(binned_pair_chunked > i,
  787. dtype=np.int)
  788. for i in range(np.int(maxrate_t[iii])):
  789. par_processes_a = par_processes_chunked[i][0]
  790. par_processes_b = par_processes_chunked[i][1]
  791. marginal_counts[iii][i][0] = np.int(np.sum(par_processes_a))
  792. marginal_counts[iii][i][1] = np.int(np.sum(par_processes_b))
  793. count_sum = count_sum + min(marginal_counts[iii][i][0],
  794. marginal_counts[iii][i][1])
  795. # marginal_counts[iii][i] has in its entries
  796. # '[ #_a^{\alpha,c} , #_b^{\alpha,c}]'
  797. # where '\alpha' goes from 1 to maxrate, c goes from 1 to nch
  798. # calculation of variance for each chunk
  799. n = ntp - max_lag # used in the calculation of the p-value
  800. var_x = [np.zeros((2, 2)) for _ in range(nch)]
  801. var_tot = 0
  802. cov_abab = [0 for _ in range(nch)]
  803. cov_abba = [0 for _ in range(nch)]
  804. var_t = [np.zeros((2, 2)) for _ in range(nch)]
  805. cov_x = [np.zeros((2, 2)) for _ in range(nch)]
  806. for iii in range(nch): # for every chunk
  807. ch_size = ch_nn[iii]
  808. # evaluation of AB + variance and covariance
  809. cov_abab[iii] = [[0 for _ in range(maxrate_t[iii])]
  810. for _ in range(maxrate_t[iii])]
  811. # for every rate up to the maxrate in that chunk
  812. for i in range(maxrate_t[iii]):
  813. par_marg_counts_i = \
  814. np.outer(marginal_counts[iii][i], np.ones(2))
  815. cov_abab[iii][i][i] = \
  816. np.multiply(
  817. np.multiply(par_marg_counts_i, par_marg_counts_i.T)
  818. / float(ch_size),
  819. np.multiply(ch_size - par_marg_counts_i,
  820. ch_size - par_marg_counts_i.T)
  821. / float(ch_size * (ch_size - 1)))
  822. # calculation of the variance
  823. var_t[iii] = var_t[iii] + cov_abab[iii][i][i]
  824. # cross covariances terms
  825. if maxrate_t[iii] > 1:
  826. for j in range(i + 1, maxrate_t[iii]):
  827. par_marg_counts_j = \
  828. np.outer(marginal_counts[iii][j], np.ones(2))
  829. cov_abab[iii][i][j] = \
  830. 2 * np.multiply(
  831. np.multiply(par_marg_counts_j,
  832. par_marg_counts_j.T)
  833. / float(ch_size),
  834. np.multiply(ch_size - par_marg_counts_i,
  835. ch_size - par_marg_counts_i.T)
  836. / float(ch_size * (ch_size - 1)))
  837. # update of the variance
  838. var_t[iii] = var_t[iii] + cov_abab[iii][i][j]
  839. # evaluation of coinc_count_matrix = #AB - #BA
  840. cov_abba[iii] = [[0 for _ in range(maxrate_t[iii])]
  841. for _ in range(maxrate_t[iii])]
  842. for i in range(maxrate_t[iii]):
  843. par_marg_counts_i = \
  844. np.outer(marginal_counts[iii][i], np.ones(2))
  845. cov_abba[iii][i][i] = \
  846. np.multiply(
  847. np.multiply(par_marg_counts_i, par_marg_counts_i.T)
  848. / float(ch_size),
  849. np.multiply(ch_size - par_marg_counts_i,
  850. ch_size - par_marg_counts_i.T)
  851. / float(ch_size * (ch_size - 1) ** 2))
  852. cov_x[iii] = cov_x[iii] + cov_abba[iii][i][i]
  853. if maxrate_t[iii] > 1:
  854. for j in range((i + 1), maxrate_t[iii]):
  855. par_marg_counts_j = \
  856. np.outer(marginal_counts[iii][j], np.ones(2))
  857. cov_abba[iii][i][j] = \
  858. 2 * np.multiply(
  859. np.multiply(par_marg_counts_j,
  860. par_marg_counts_j.T)
  861. / float(ch_size),
  862. np.multiply(ch_size - par_marg_counts_i,
  863. ch_size - par_marg_counts_i.T)
  864. / float(ch_size * (ch_size - 1) ** 2))
  865. cov_x[iii] = cov_x[iii] + cov_abba[iii][i][j]
  866. var_x[iii] = var_t[iii] + var_t[iii].T - cov_x[iii] - cov_x[iii].T
  867. var_tot = var_tot + var_x[iii]
  868. # Yates correction
  869. coinc_count_matrix = coinc_count_matrix - coinc_count_matrix.T
  870. if abs(coinc_count_matrix[0][1]) > 0:
  871. coinc_count_matrix = abs(coinc_count_matrix) - 0.5
  872. if var_tot[0][1] == 0:
  873. pr_f = 1
  874. # p-value obtained through approximation to a Fischer F distribution
  875. # (here we employ the survival function)
  876. else:
  877. fstat = coinc_count_matrix ** 2 / var_tot
  878. pr_f = f.sf(fstat[0][1], 1, n)
  879. # Creation of the dictionary with the results
  880. en_neurons = copy.copy(ensemble['neurons'])
  881. en_neurons.append(n2)
  882. en_lags = copy.copy(ensemble['lags'])
  883. en_lags.append(best_lag)
  884. en_pvalue = copy.copy(ensemble['pvalue'])
  885. en_pvalue.append(pr_f)
  886. en_n_occ = copy.copy(ensemble['signature'])
  887. en_n_occ.append([len(en_neurons), sum(activation_series)])
  888. assembly = {'neurons': en_neurons,
  889. 'lags': en_lags,
  890. 'pvalue': en_pvalue,
  891. 'times': activation_series,
  892. 'signature': en_n_occ}
  893. if same_configuration_pruning:
  894. return assembly, item_candidate
  895. else:
  896. return assembly
  897. def _significance_pruning_step(pre_pruning_assembly):
  898. """
  899. Between two assemblies with the same unit set arranged into different
  900. configurations the most significant one is chosen.
  901. Parameters
  902. ----------
  903. pre_pruning_assembly : list
  904. contains the whole set of significant assemblies (unfiltered)
  905. Returns
  906. -------
  907. assembly : list
  908. contains the filtered assemblies
  909. n_filtered_assemblies : int
  910. number of filtered assemblies by the function
  911. """
  912. # number of assemblies before pruning
  913. nns = len(pre_pruning_assembly)
  914. # boolean array for selection of assemblies to keep
  915. selection = []
  916. # list storing the found assemblies
  917. assembly = []
  918. for i in range(nns):
  919. elem = sorted(pre_pruning_assembly[i]['neurons'])
  920. # in the list, so that membership can be checked
  921. if elem in selection:
  922. # find the element that was already in the list
  923. pre = selection.index(elem)
  924. if pre_pruning_assembly[i]['pvalue'][-1] <= \
  925. assembly[pre]['pvalue'][-1]:
  926. # if the new element has a p-value that is smaller
  927. # than the one had previously
  928. selection[pre] = elem
  929. # substitute the prev element in the selection with the new
  930. assembly[pre] = pre_pruning_assembly[i]
  931. # substitute also in the list of the new assemblies
  932. if elem not in selection:
  933. selection.append(elem)
  934. assembly.append(pre_pruning_assembly[i])
  935. # number of assemblies filtered out is equal to the difference
  936. # between the pre and post pruning size
  937. n_filtered_assemblies = nns - len(assembly)
  938. return assembly, n_filtered_assemblies
  939. def _subgroup_pruning_step(pre_pruning_assembly):
  940. """
  941. Removes assemblies which are already all included in a bigger assembly
  942. Parameters
  943. ----------
  944. pre_pruning_assembly : list
  945. contains the assemblies filtered by the significance value
  946. Returns
  947. --------
  948. final_assembly : list
  949. contains the assemblies filtered by inclusion
  950. """
  951. # reversing the semifinal_assembly makes the computation quicker
  952. # since the assembly are formed by agglomeration
  953. pre_pruning_assembly_r = list(reversed(pre_pruning_assembly))
  954. nns = len(pre_pruning_assembly_r)
  955. # boolean list with the selected assemblies
  956. selection = [True for _ in range(nns)]
  957. for i in range(nns):
  958. # check only in the range of the already selected assemblies
  959. if selection[i]:
  960. a = pre_pruning_assembly_r[i]['neurons']
  961. for j in range(i + 1, nns):
  962. if selection[j]:
  963. b = pre_pruning_assembly_r[j]['neurons']
  964. # check if a is included in b or vice versa
  965. if set(a).issuperset(set(b)):
  966. selection[j] = False
  967. if set(b).issuperset(set(a)):
  968. selection[i] = False
  969. # only for the case in which significance_pruning=False
  970. if set(a) == set(b):
  971. selection[i] = True
  972. selection[j] = True
  973. assembly_r = []
  974. # put into final_assembly only the selected ones
  975. for i in range(nns):
  976. if selection[i]:
  977. assembly_r.append(pre_pruning_assembly_r[i])
  978. assembly = list(reversed(assembly_r))
  979. return assembly
  980. def _raise_errors(binned_spiketrain, max_lag, alpha, min_occurrences,
  981. size_chunks, max_spikes):
  982. """
  983. Returns errors if the parameters given in input are not correct.
  984. Parameters
  985. ----------
  986. binned_spiketrain : BinnedSpikeTrain object
  987. binned spike trains containing data to be analysed
  988. max_lag: int
  989. maximal lag to be tested. For a binning dimension of bin_size the
  990. method will test all pairs configurations with a time
  991. shift between -max_lag and max_lag
  992. alpha : float
  993. alpha level.
  994. min_occurrences : int
  995. minimal number of occurrences required for an assembly
  996. (all assemblies, even if significant, with fewer occurrences
  997. than min_occurrences are discarded).
  998. size_chunks : int
  999. size (in bins) of chunks in which the spike trains is divided
  1000. to compute the variance (to reduce non stationarity effects
  1001. on variance estimation)
  1002. max_spikes : int
  1003. maximal assembly order (the algorithm will return assemblies of
  1004. composed by maximum max_spikes elements).
  1005. Raises
  1006. ------
  1007. TypeError
  1008. if the data is not an elephant.conv.BinnedSpikeTrain object
  1009. ValueError
  1010. if the maximum lag considered is 1 or less
  1011. if the significance level is not in [0,1]
  1012. if the minimal number of occurrences for an assembly is less than 1
  1013. if the length of the chunks for the variance computation is 1 or less
  1014. if the maximal assembly order is not between 2
  1015. and the number of neurons
  1016. if the time series is too short (less than 100 bins)
  1017. """
  1018. if not isinstance(binned_spiketrain, conv.BinnedSpikeTrain):
  1019. raise TypeError(
  1020. 'data must be in BinnedSpikeTrain format')
  1021. if max_lag < 2:
  1022. raise ValueError('max_lag value cant be less than 2')
  1023. if alpha < 0 or alpha > 1:
  1024. raise ValueError('significance level has to be in interval [0,1]')
  1025. if min_occurrences < 1:
  1026. raise ValueError('minimal number of occurrences for an assembly '
  1027. 'must be at least 1')
  1028. if size_chunks < 2:
  1029. raise ValueError('length of the chunks cannot be 1 or less')
  1030. if max_spikes < 2:
  1031. raise ValueError('maximal assembly order must be less than 2')
  1032. if binned_spiketrain.shape[1] - max_lag < 100:
  1033. raise ValueError('The time series is too short, consider '
  1034. 'taking a longer portion of spike train '
  1035. 'or diminish the bin size to be tested')
  1036. # alias for the function
  1037. cad = cell_assembly_detection