123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238 |
- import os
- import numpy as np
- import nixio as nix
- import scipy.signal as signal
- import matplotlib.mlab as mlab
- from scipy.stats import circmean
- from ..util import gaussKernel, detect_eod_times, load_white_noise_stim
- def get_firing_rate(spikes, duration, dt, sigma):
- time = np.arange(0.0, duration, dt)
- rates = np.zeros((len(spikes), len(time)))
- k = gaussKernel(sigma, dt)
- for i, sp in enumerate(spikes):
- binary = np.zeros(len(time))
- binary[np.array(sp/dt, dtype=int)] = 1
- rates[i, :] = np.convolve(binary, k, mode="same")
- return time, rates
- def analyse_white_noise_data(stimulus, spikes):
- f, coherence_spectra = get_coherence(stimulus, spikes, 0.001)
- f, transfer_functions = get_transfer_function(stimulus, spikes, 0.001)
- return f, transfer_functions, coherence_spectra
- def create_eod_template(eod, eod_times, eod_indices, spike_times, avg_count=None):
- phases = np.zeros((len(spike_times)-1, 1))
- relative_times = np.zeros((len(spike_times)-1, 1))
- for i, sp in enumerate(spike_times[:-1]):
- idx = np.where(eod_times < sp)[0][-1]
- if idx > len(eod_times)-1:
- break
- period = eod_times[idx+1] - eod_times[idx]
- dt = sp - eod_times[idx]
- phases[i] = dt/period * 2 * np.pi
- relative_times[i] = dt
- indices = np.arange(2, len(eod_times[:-1]))
- np.random.shuffle(indices)
- indices = indices[:avg_count]
- x = np.arange(0.0, 2*np.pi+0.01, 0.1)
- eod_snippets = np.zeros((len(indices), len(x)))
- for i, idx in enumerate(indices):
- fp = eod[eod_indices[idx]:eod_indices[idx+1]]
- xp = np.linspace(0.0, 2 * np.pi, len(fp))
- eod_snippets[i, :] = np.interp(x, xp, fp)
- return np.mean(eod_snippets, axis=0), np.std(eod_snippets, axis=0), phases, relative_times
- def get_transfer_function(stimulus, spikes, sigma):
- csds = None
- psds = None
- f = None
- transfer_functions = None
- _, responses = get_firing_rate(spikes, 10., 1./20000., sigma)
- for i, r in enumerate(responses):
- f, csd = signal.csd(stimulus[1], responses[i, :], fs=20000, nperseg=2**14, noverlap=2**13,
- detrend="constant", window="hann")
- f, psd = signal.csd(stimulus[1], stimulus[1], fs=20000, nperseg=2**14, noverlap=2**13,
- detrend="constant", window="hann")
- if csds is None:
- csds = np.zeros((len(responses), len(f)), dtype=np.complex128)
- psds = np.zeros((len(responses), len(f)), dtype=np.complex128)
- transfer_functions = np.zeros((len(responses), len(f)), dtype=np.complex128)
- csds[i, :] = csd
- psds[i, :] = psd
- transfer_functions[i, :] = csd/psd
- return f, transfer_functions
- def get_coherence(stimulus, spikes, sigma=0.001):
- _, responses = get_firing_rate(spikes, 10., 1./20000., sigma)
- f = None
- coherence_spectra = None
- for i, r in enumerate(responses):
- c, f = mlab.cohere(r, stimulus[1], NFFT=2**14,
- noverlap=2**13, Fs=20000, detrend=mlab.detrend_mean, window=mlab.window_hanning)
- if coherence_spectra is None:
- coherence_spectra = np.zeros((len(responses), len(f)))
- coherence_spectra[i, :] = c
- return f, coherence_spectra
- def save_white_noise_data(time, stimulus, spikes, responses, frequency, coherence_spectra,
- gain_functions):
- if not os.path.exists(os.path.sep.join(["..", "data"])):
- os.mkdir(os.path.sep.join(["..", "data"]))
- trial_data = np.empty(0)
- spike_data = np.empty(0)
- for i, sp in enumerate(spikes):
- trial = np.ones_like(sp) * i
- trial_data = np.hstack((trial_data, trial))
- spike_data = np.hstack((spike_data, sp))
- np.savez_compressed(os.path.sep.join(["derived_data", "figure1_whitenoise_data"]),
- spikes=spike_data, trial_data=trial_data,
- time=time, rates=responses,
- stimulus=stimulus, frequency=frequency,
- coherence_spectra=coherence_spectra, gain_spectra=gain_functions)
- def load_white_noise_data_from_nix(block):
- tags = []
- tags = []
- tag_times = []
- mtags = []
- mtag_positions = []
- stims = []
- stim_contrasts = []
- for t in block.tags:
- if "filestimulus" in t.name.lower():
- secs = t.metadata.find_sections(lambda sec: "file" in sec.props)
- if len(secs) == 0:
- continue
- filename = secs[0].props["file"].values[0]
- if "gwn" not in filename:
- continue
- else:
- continue
- tags.append(t.name)
- tag_times.append((t.position[0], t.position[0] + t.extent[0]))
- for mt in block.multi_tags:
- pos = mt.positions[:]
- ext = mt.extents[:]
- for i, (p, e) in enumerate(zip(pos, ext)):
- if e < 0.01:
- continue
- if p > t.position[0] and (p + e) < t.position[0] + t.extent[0]:
- mtags.append(mt.name)
- mtag_positions.append(i)
- stims.append(filename)
- stim_contrasts.append(secs[0].props["contrast"].values[0] * 100)
- max_contr = np.max(stim_contrasts)
- unique_stims = np.unique(stims)
- stim_dict = {}
- stimuli = {}
- for s in unique_stims:
- stim_dict[s] = []
- for i, c in enumerate(stim_contrasts):
- if c == max_contr:
- stim_dict[s].append(i)
- time, stim = load_white_noise_stim(s, stim_duration=10, sampling_rate=20000,
- folder="stimuli")
- if time is None:
- return None, None, None
- stimuli[s] = (time, stim)
- # at this point we should have the stimulus used at max contrast,
- # the respective indices, and the stimulus itself
- # let's work only on the first stimulus
- stimulus = list(stimuli.keys())[0]
- positions = np.array(mtag_positions)[stim_dict[stimulus]]
- spike_times = []
- for p in positions:
- mt = block.multi_tags[mtags[p]]
- st = mt.retrieve_data(p, "Spikes-1")[:] - mt.positions[p][0]
- if st[0] < 0:
- st = st[1:]
- spike_times.append(st)
- return stimuli[stimulus], spike_times
- def analyze_driven_activity(args):
- if not os.path.exists(args.dataset):
- raise ValueError("Dataset %s was not found!" % args.dataset)
- nf = nix.File.open(args.dataset, nix.FileMode.ReadOnly)
- block = nf.blocks[0]
- stim, spikes = load_white_noise_data_from_nix(block)
- nf.close()
- stimulus = (stim[0], stim[1])
- f, tfs, cspecs = analyse_white_noise_data(stimulus, spikes)
- time, rates = get_firing_rate(spikes, 10., 1./20000, 0.001)
- save_white_noise_data(time, stimulus, spikes, rates, f, cspecs, tfs)
- def analyze_baseline_activity(args):
- if not os.path.exists(args.dataset):
- raise ValueError("Dataset %s was not found!" % args.dataset)
- nf = nix.File.open(args.dataset, nix.FileMode.ReadOnly)
- block = nf.blocks[0]
- baseline_tag = None
- for t in block.tags:
- if "baseline" in t.name.lower():
- if baseline_tag:
- baseline_tag = baseline_tag if t.extent[0] <= baseline_tag.extent[0] else t
- else:
- baseline_tag = t
- if not baseline_tag:
- nf.close()
- raise ValueError("No baseline data found in dataset %s!" % args.dataset)
- baseline_eod = baseline_tag.retrieve_data("EOD")[:]
- baseline_voltage = baseline_tag.retrieve_data("V-1")[:]
- baseline_spikes = baseline_tag.retrieve_data("Spikes-1")[:]
- baseline_time = np.asarray(baseline_tag.references["EOD"].dimensions[0].axis(len(baseline_eod)))
- nf.close()
- baseline_rate = len(baseline_spikes)/(baseline_time[-1] - baseline_time[0])
- # eod times
- eod_times, eod_indices = detect_eod_times(baseline_time, baseline_eod)
- eodf = len(eod_times)/(baseline_time[-1]-baseline_time[0])
- # spike times
- isis = np.diff(baseline_spikes)
- cv = np.std(isis)/np.mean(isis)
- burstiness = len(isis[isis < 1.5 * np.mean(np.diff(eod_times))])/len(isis)
- # phase locking
- eod_template, template_std, spike_phases, spike_times_rel = create_eod_template(baseline_eod,
- eod_times,
- eod_indices,
- baseline_spikes,
- avg_count=1000)
- eod_ampl = np.max(eod_template) - np.min(eod_template)
- phase_locking = np.mean(np.exp(1j * 2*np.pi*np.mean(1/np.diff(eod_times)) * spike_times_rel))
- save_baseline_data(baseline_time, baseline_eod, baseline_voltage, baseline_spikes,
- spike_phases, spike_times_rel, eod_times, eod_template, template_std,
- isis, cv, burstiness, phase_locking, eodf, baseline_rate)
- def save_baseline_data(time, eod, voltage, spikes, phases, spike_cycle_times,
- eod_times, eod_template, template_std, isis, cv, bi, vs, eodf, baseline_rate):
- if not os.path.exists(os.path.join(["./", "derived_data"])):
- os.mkdir(os.path.join(["./", "derived_data"]))
- np.savez_compressed(os.path.join(["derived_data", "figure1_baseline_data"]), time=time, eod=eod,
- voltage=voltage, spike_times=spikes[:-1], spike_phases=np.squeeze(phases), spike_cycle_times=np.squeeze(spike_cycle_times), eod_times=eod_times, eod_template=eod_template, template_error=template_std, isis=isis, cv=cv, burstiness=bi,
- vector_strength=vs, eodf=eodf, baseline_rate=baseline_rate,
- preferred_phase=circmean(np.squeeze(phases)))
|