123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123 |
- '''
- Script to run the SPADE analysis for Polychronous model
- validation (Gutzen et al 2018)
- authors: Pietro Quaglio, Robin Gutzen, Michael von Papen, Guido Trensch,
- Sonja Grün, Michael Denker
- '''
- import neo
- import quantities as pq
- import elephant.spade as spade
- import numpy as np
- import os
- import argparse
- import yaml
- # Function to load C and Spinnaker data
- def load(file_path, t_start=0, t_stop=60000, filter_inh=False,
- **kwargs):
- f = open(file_path, 'r')
- lines = f.readlines()
- N = 1000 # neurons
- # Read Spike Times
- spike_times = [[]] * N
- for line in lines:
- t_s, t_ms, n = line.split()
- t = int(t_s) * 1000 + int(t_ms)
- n = int(n)
- if t > t_stop:
- break
- spike_times[n] = spike_times[n] + [t]
- # Fill Spike Trains
- nbr_neurons = N
- if filter_inh:
- nbr_neurons = 800
- spiketrains = [[]] * nbr_neurons
- for n, st in enumerate(spike_times):
- if n < 800:
- n_type = 'exc'
- else:
- n_type = 'inh'
- if not filter_inh or n_type == 'exc':
- spiketrains[n] = neo.SpikeTrain(np.sort(st), units='ms',
- t_start=t_start, t_stop=t_stop,
- n_type=n_type)
- return spiketrains
- # Functions to create new folders
- def mkdirp(directory):
- if not os.path.isdir(directory):
- print(directory)
- os.mkdir(directory)
- def split_path(path):
- folders = []
- while 1:
- path, folder = os.path.split(path)
- if folder != "":
- folders.append(folder)
- else:
- if path != "":
- folders.append(path)
- break
- folders.reverse()
- return folders
- # The parsing arguments
- parser = argparse.ArgumentParser(description='Running the SPADE analysis for Polychronous model validation')
- # The simulator used to generate data
- parser.add_argument('simulator', metavar='simulator', type=str,
- help='The simulator used to generate the data to analyze')
- # The number of simulation hours after which the data are Recorded
- parser.add_argument('hour', metavar='hour', type=int,
- help='The number of simulation hours after which the data are Recorded')
- # Getting the arguments
- args = parser.parse_args()
- simulator = args.simulator
- hour = args.hour
- # Load general SPADE parameters
- with open("configfile_spade.yaml", 'r') as stream:
- param = yaml.load(stream)
- ######## Loading the Data########
- # Paths to data
- experiment = '../simulation_data/iteration_III/60s_simulation_runs/{}'.format(simulator)
- # Paths to the data to analyze
- simulation = '/out_firings_after{}h'.format(hour)
- data_path = './{}'.format(experiment + simulation)
- # Loading the .dat containing spike datadata
- spikedata = load(data_path + '.dat', filter_inh=True)
- ####### Running SPADE analysis #########
- results_spade = spade.spade(
- spikedata,binsize=param['binsize']*pq.ms,
- winlen=param['winlen'],
- min_spikes=param['min_spikes'],
- min_neu=param['min_spikes'],
- min_occ=param['min_occ'],
- n_surr=param['n_surr'],
- alpha=param['alpha'],
- psr_param=param['psr_param'],
- output_format='patterns')
- ####### Saving the results ########
- # Stripping quantities from lags and times for python 2.7 compatibility
- patterns = results_spade['patterns']
- for patt in patterns:
- patt['lags'] = list(patt['lags'].magnitude)
- patt['times'] = list(patt['times'].magnitude)
- # Relative path where to store the results
- res_dir = './patterns_results/iteration_III/60s_simulation_runs/{}{}'.format(
- simulator, simulation)
- # if not existing creating path to save results
- path_temp = './'
- for folder in split_path(res_dir):
- path_temp = path_temp + '/' + folder
- mkdirp(path_temp)
- # Save results
- np.save(res_dir + '/patterns.npy', (results_spade, param))
|