lif_surface.py 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. import os
  2. import numpy as np
  3. import pandas as pd
  4. import itertools as it
  5. from .lif_simulation import whitenoise
  6. from .lif_simulation import generate_responses
  7. from ..util import mutual_info, default_job_count, DelayType
  8. from joblib import Parallel, delayed
  9. def create_population(spike_responses, pop_size, delay_type, additional_delay):
  10. """
  11. Creates a random population of the spike_responses from the set of passed responses. If cell_density is larger than 0 the responses will be delayed according to the conduction delay.
  12. The function assumes a constant distance between "cells".
  13. Parameters
  14. ----------
  15. spike_respones : list of lists
  16. Each entry is a list/array of spike times
  17. pop_size : int
  18. the population size.
  19. cell_density : float, optional
  20. cell density in m^-1. Defaults to 0, which indicates infinitely, i.e. no conduction delays.
  21. conduction_velocity :float, optional
  22. The conduction velocity. Defaults to 50 m/s
  23. Returns
  24. -------
  25. list of np.ndarray
  26. randomly selected and delayed responses.
  27. """
  28. indices = np.arange(len(spike_responses), dtype=int)
  29. rng = np.random.default_rng()
  30. rng.shuffle(indices)
  31. population = []
  32. for i in range(pop_size):
  33. spike_times = spike_responses[indices[i]]
  34. spike_times = np.asarray(spike_times)
  35. delay = 0.0
  36. if additional_delay > 0.0:
  37. if delay_type == DelayType.Equal:
  38. delay = (rng.random() - 0.5) * additional_delay * 2
  39. elif delay_type == DelayType.EqualPositive:
  40. delay = rng.random() * additional_delay * 2
  41. elif delay_type == DelayType.Gaussian:
  42. delay = rng.normal(0.0, additional_delay)
  43. elif delay_type == DelayType.GaussianPositive:
  44. delay = rng.normal(additional_delay * 4, additional_delay)
  45. population.append(spike_times + delay)
  46. return population
  47. def get_population_information(spike_responses, stimulus, population_size, delay_type, delay=0.0,
  48. kernel_sigma=0.0005, stepsize=0.0001, trial_duration=10., repeats=1,
  49. cutoff=(0., 300.)):
  50. """Estimate the amount of information carried by the population response.
  51. Parameters
  52. ----------
  53. spike_responses : list of np.ndarray
  54. list of spike times
  55. stimulus : np.ndarray
  56. the stimulus
  57. population_size : int
  58. population size
  59. density : float, optional
  60. cell density in m^-1. Defaults to 0/m, which indicates infinitely high density, i.e. no conduction delays.
  61. cutoff : tuple of float, optional
  62. the lower and upper cutoff frequency of the stimulus. Defaults to (0, 500)
  63. kernel_sigma : float, optional
  64. std of the Gaussian kernel used for firing rate estimation. Defaults to 0.0005.
  65. stepsize : float, optional
  66. Temporal resolution of the firing rate. Defaults to 0.0001.
  67. trial_duration : float, optional
  68. trial duration in seconds. Defaults to 10.
  69. repeats : int, optional
  70. number of random populations per population size. Defaults to 1.
  71. conduction_velocity : float, optional
  72. conduction velocity in m/s. Defaults to 50 m/s.
  73. Returns
  74. -------
  75. np.ndarray
  76. mutual information between stimulus and population response. Has the shape [num_population_sizes, repeats]
  77. """
  78. information = np.zeros(repeats)
  79. for i in range(repeats):
  80. population_spikes = create_population(spike_responses, population_size, delay_type, delay)
  81. _, _, mi, _, _ = mutual_info(population_spikes, 0.0, [False] * len(population_spikes),
  82. stimulus, cutoff, kernel_sigma, stepsize=stepsize, trial_duration=trial_duration)
  83. information[i] = mi[-1]
  84. info = {"delay_type": str(delay_type), "kernel_sigma": kernel_sigma, "pop_size": population_size, "cutoff": cutoff, "delay": delay, "mi": np.mean(information), "mi_err": np.std(information)}
  85. return info
  86. def process_populations(spike_responses, stimulus, population_size, kernels=[0.0005], delays=[0.0],
  87. delay_type=DelayType.Gaussian, stepsize=0.0001, trial_duration=10., repeats=1, num_cores=1):
  88. """_summary_
  89. Parameters
  90. ----------
  91. spike_responses : list of np.arrays
  92. containing the spike times in response to the stimulus presentations
  93. stimulus : np.array
  94. the stimulus waveform
  95. population_sizes : list
  96. list of population sizes that should be analysed
  97. density : float
  98. spatial density of model neurons in m^-1
  99. cutoff : tuple, optional
  100. lower and upper cutoff frequencies of the analyzed frequency bands by default (0., 500.)
  101. kernel_sigma : float, optional
  102. The sigma of the Gaussian kernel used for firing rate estimation, by default 0.0005
  103. stepsize : float, optional
  104. temporal stepsize of the simulation, by default 0.0001
  105. trial_duration : float, optional
  106. duration of the trials in seconds, by default 10.
  107. repeats : int, optional
  108. number of repetitions, by default 1
  109. conduction_velocity : float, optional
  110. simulated axonal conduction velocity, by default 50.
  111. num_cores : int, optional
  112. number of spawned parallel processes, by default 1
  113. Returns
  114. -------
  115. list
  116. the results
  117. """
  118. combinations = it.product(delays, kernels)
  119. processed_list = Parallel(n_jobs=num_cores)(delayed(get_population_information)(spike_responses, stimulus, population_size, delay_type, d, k, stepsize, trial_duration, repeats) for d, k in combinations)
  120. df = pd.DataFrame(processed_list)
  121. return df
  122. def run_simulations(args=None):
  123. delays = [0.0, 0.000125, 0.00025, 0.0005, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.01, 0.0125, 0.015]
  124. kernels=[0.000125, 0.00025, 0.0005, 0.001, 0.002, 0.003, 0.005, 0.01, 0.015, 0.02, 0.025]
  125. delay_types = [DelayType.Equal, DelayType.Gaussian, DelayType.EqualPositive, DelayType.GaussianPositive]
  126. lc = 0 # lower cutoff
  127. uc = 300 # upper cutoff
  128. dt = 0.00001 # s sr = 100 kHz
  129. duration = 2 # s
  130. amplitude = 0.0625
  131. num_responses = 30
  132. noise = 5
  133. tau_m = 0.0025
  134. repeats = 5
  135. num_cores = default_job_count() if args is None else args.jobs
  136. population_size = 20 if args is None else args.pop_size
  137. outfile = "test.csv" if args is None else args.outfile
  138. print("generating stimulus %i -- %i Hz..." % (lc, uc), end="\r")
  139. stimulus = whitenoise(lc, uc, dt, duration) * amplitude
  140. print("generating stimulus %i -- %i Hz... done" % (lc, uc))
  141. print("generating responses... ", end="\r")
  142. spikes = generate_responses(num_responses, stimulus, repeats, dt, noise, tau_m, num_cores)
  143. print("generating responses... done")
  144. results = None
  145. for i, delay_type in enumerate(delay_types):
  146. message = f"analysing populations [{i+1}/{len(delay_types)}], delay type: {str(delay_type).upper()}... "
  147. print(message, end="\r", flush=True)
  148. result = process_populations(spikes, stimulus, population_size, kernels, delays, delay_type, dt, duration, repeats, num_cores)
  149. if results is None:
  150. results = result
  151. else:
  152. results = results.append(result, ignore_index=True)
  153. print(message + "\t done!", end="\n")
  154. results.to_csv(outfile, sep=";")
  155. def command_line_parser(parent_parser):
  156. parser = parent_parser.add_parser("surface", help="Calculate the mutual information for a large surface that spans the synaptic kernel and the delays for different delay_types.")
  157. parser.add_argument("-j", "--jobs", type=int, default=default_job_count(), help=f"The number of parallel processes spawned for the analyses (defaults to {default_job_count()})")
  158. parser.add_argument("-p", "--pop_size", type=int, default=20, help=f"The population size (defaults to 20)")
  159. parser.add_argument("-dt", "--delay_type", type=str, default=str(DelayType.Gaussian), help="The type of distribution from which delays have been drawn. Usually a Gaussian normal distribution.")
  160. parser.add_argument("-o", "--outfile", type=str, default=os.path.join("derived_data", "lif_mi_surface.csv"), help="The destination file.")
  161. parser.set_defaults(func=run_simulations)
  162. if __name__ == "__main__":
  163. run_simulations()