123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881 |
- # -*- coding: utf-8 -*-
- from .sim_data import SimInfo
- import numpy as np
- from scipy.fftpack import fftn
- from scipy.signal import convolve2d
- import os
- #import plot3d
- import pylab as plt
- import random
- from .connection import Connection
- import pickle
- from .idl import dist
- from objsimpy.distance import cyclic_distance
- from scipy.ndimage import measurements
- from matplotlib.colors import Normalize
- from .colormaps import cmap_color_circle
- from objsimpy.stim.stim_movie import StimulusSet
- from objsimpy.stim.stim_sequence import InputTrace
- from itertools import product
- import scipy.ndimage
- import logging
- logger = logging.getLogger(__name__)
- def pickled_results(func):
- def new_func(filename, *args, **kwargs):
- reevaluate = False
- if 'reevaluate' in kwargs:
- reevaluate = kwargs['reevaluate']
- del kwargs['reevaluate']
- if not os.path.isfile(filename):
- reevaluate = True
- if reevaluate:
- logger.info("calculating ...")
- result = func(*args, **kwargs)
- logger.info("pickling result to: {}".format(filename))
- f = open(filename, "wb")
- pickle.dump(result, f)
- f.close()
- return result
- else:
- logger.info("loading pickled result from: {}".format(filename))
- f = open(filename, "rb")
- result = pickle.load(f)
- f.close
- return result
- return new_func
- class Psth():
- def __init__(self, psth=None, time_start=0):
- self.psth = psth
- self.time_start = time_start
- self.time_stop = psth.shape[2] + time_start - 1
- self.time_steps = psth.shape[2]
- self.n_neurons = psth.shape[0]
- self.n_stimuli = psth.shape[1]
- #print "n_neurons = ", self.n_neurons
- def generate_spikes_and_input(psth, n_stimulus_repetitions=None):
- psth_remaining = psth.psth.copy()
- time = 0
- neuron_numbers = []
- spike_times = []
- inputs = np.array([])
- if n_stimulus_repetitions is None or n_stimulus_repetitions < psth_remaining.max():
- n_stimulus_repetitions = psth_remaining.max()
- #print "corrected n_stimulus_repetitions to ", n_stimulus_repetitions
- no_stim_time = 10
- stimuli = range(psth.n_stimuli) * n_stimulus_repetitions
- #print "stimuli = ", stimuli
- while len(stimuli) > 0:
- inputs = np.hstack((inputs, np.array([-1]*no_stim_time)))
- time += no_stim_time
- # pick a stimulus
- stimulus = random.choice(stimuli)
- stimuli.remove(stimulus)
- #print stimuli
- repetitions_left = sum(np.array(stimuli) == stimulus)
- if psth.time_start < 0:
- inputs = np.hstack((inputs, np.array([-1]*(-1*psth.time_start))))
- inputs = np.hstack((inputs, np.array([stimulus] * (1 + psth.time_stop))))
- else:
- inputs = np.hstack((inputs, ([stimulus] * (1 + psth.time_stop - psth.time_start))))
- # generate spikes
- for t in range(psth.time_steps):
- for neuron in range(psth.n_neurons):
- spike_probability = psth_remaining[neuron, stimulus, t] / repetitions_left
- if random.random() < spike_probability:
- neuron_numbers.append(neuron)
- spike_times.append(time)
- psth_remaining[neuron, stimulus, t] -= 1
- time += 1
- return ((spike_times, neuron_numbers), InputTrace(nparray=inputs), n_stimulus_repetitions)
- def generate_dummy_psth(psth_start=-40, psth_stop=100, n_stimuli=2, n_neurons=2):
- n_time_steps = psth_stop - psth_start + 1
- psth = np.zeros(n_time_steps * n_neurons * n_stimuli).reshape(n_neurons, n_stimuli, n_time_steps)
- for neuron in range(n_neurons):
- for stimulus in range(n_stimuli):
- psth[neuron, stimulus, :] = exp_decrease(psth_start, psth_stop, 4.9*(stimulus+1.) + 6.4 * np.sin(0.9*np.pi*(neuron+1)/n_neurons), 5+neuron*stimulus)
- psth = psth.round()
- return Psth(psth, psth_start)
- def exp_decrease(start, stop, initial=1.0, time_factor=1.0):
- if start < 0:
- values = np.hstack((np.zeros(-start + 1), initial * np.exp(-time_factor * np.linspace(0, 1, stop))))
- else:
- values = np.hstack((np.zeros(1), initial * np.exp(-time_factor * np.linspace(0, 1, stop))))
- return values
- def calc_psth(spikes, input, psth_start=0, psth_stop=10):
- stim_onsets = input.get_stim_onset()
- n_neurons = max(spikes[1]) + 1
- n_stimuli = max(input.nparray) + 1
- n_time_steps = psth_stop - psth_start + 1
- psth_array_size = n_neurons * n_stimuli * n_time_steps
- #print "psth_array_size=", psth_array_size
- #print "n_neurons = ", n_neurons
- #print "n_stimuli = ", n_stimuli
- #print "n_time_steps = ", n_time_steps
- psth = np.zeros(psth_array_size).reshape(n_neurons, n_stimuli, n_time_steps)
- spike_times = spikes[0]
- neuron_numbers = spikes[1]
- spike_indices = np.arange(len(spike_times))
- stim_frequencies = {}
- for onset in stim_onsets:
- onset_time = onset[0]
- stimulus_number = onset[1]
-
- # count stimulus repetitions
- if stimulus_number in stim_frequencies:
- stim_frequencies[stimulus_number] += 1
- else:
- stim_frequencies[stimulus_number] = 1
-
- # find all spikes between (onset_time + psth_start) and (onset_time + psth_stop)
- relevant_spikes_mask = ((onset_time + psth_start) <= spike_times) * (spike_times <= (onset_time + psth_stop))
- relevant_spike_indices = spike_indices[relevant_spikes_mask]
- for index in relevant_spike_indices:
- psth[neuron_numbers[index], stimulus_number, spike_times[index] - onset_time - psth_start] += 1
-
- #print stim_frequencies
-
- # devide by number of stimulus repetitions
- for stim in stim_frequencies:
- psth[:,stim,:] /= stim_frequencies[stim]
- return Psth(psth=psth)
- def calc_total_psth(spikes, input, psth_start=0, psth_stop=10, n_neurons=None):
- n_time_steps = psth_stop - psth_start + 1
- total_psth = np.zeros(n_time_steps)
- stim_onsets = input.get_stim_onset()
- n_stimulus_events = len(stim_onsets)
- stim_times_array = np.arange(n_time_steps) + psth_start
- time_psth_time_map = {}
- for onset in stim_onsets:
- onset_time = onset[0]
- #print "onset = ", onset
- for t in stim_times_array:
- absolute_time = t + onset_time
- time_psth_time_map.setdefault(absolute_time, []).append(t)
-
- #print time_psth_time_map
- spike_times = spikes[0]
- neuron_numbers = spikes[1]
- for spike_time in spike_times:
- if spike_time in time_psth_time_map:
- total_psth[time_psth_time_map[spike_time]] += 1
-
- #print "total psth = ", total_psth
- # devide by number of stimulus repetitions
- total_psth /= n_stimulus_events
- if n_neurons is None:
- n_neurons = max(neuron_numbers) + 1
- total_psth /= n_neurons
- return total_psth
- def calc_response_strength(spikes, input, psth_start=0, psth_stop=10):
- stim_onsets = input.get_stim_onset()
- n_neurons = max(spikes[1]) + 1
- n_stimuli = max(input.nparray) + 1
- n_time_steps = psth_stop - psth_start + 1
- response_array_size = n_neurons * n_stimuli
- logger.info("response_array_size = {}".format(response_array_size))
- logger.info("n_neurons = {}".format(n_neurons))
- logger.info("n_stimuli = {}".format(n_stimuli))
- logger.info("n_time_steps = {}".format(n_time_steps))
- stim_responses = np.zeros(response_array_size).reshape(n_neurons, n_stimuli)
- spike_times = spikes[0]
- neuron_numbers = spikes[1]
- spike_indices = np.arange(len(spike_times))
- stim_frequencies = {}
- for onset in stim_onsets:
- onset_time = onset[0]
- stimulus_number = onset[1]
-
- # count stimulus repetitions
- stim_frequencies[stimulus_number] = 1 + stim_frequencies.get(stimulus_number, 0)
-
- # find all spikes between (onset_time + psth_start) and (onset_time + psth_stop)
- relevant_spikes_mask = ((onset_time + psth_start) <= spike_times) * (spike_times <= (onset_time + psth_stop))
- relevant_spike_indices = spike_indices[relevant_spikes_mask]
- for index in relevant_spike_indices:
- stim_responses[neuron_numbers[index], stimulus_number] += 1
-
- # devide by number of stimulus repetitions
- for stim in stim_frequencies:
- stim_responses[:,stim] /= stim_frequencies[stim]
- return stim_responses
- def calc_response_strength2(spikes, input, psth_start=0, psth_stop=10, n_neurons=None):
- stim_onsets = input.get_stim_onset()
- #stim_onsets = stim_onsets[:-2] # remove this later!!!!!!!!!!
- if n_neurons is None:
- n_neurons = max(spikes[1]) + 1
- n_stimuli = max(input.nparray) + 1
- n_time_steps = psth_stop - psth_start + 1
- response_array_size = n_neurons * n_stimuli
- #print "response_array_size=", response_array_size
- #print "n_neurons = ", n_neurons
- #print "n_stimuli = ", n_stimuli
- #print "n_time_steps = ", n_time_steps
- stim_responses = np.zeros(response_array_size).reshape(n_neurons, n_stimuli)
- spike_times = spikes[0]
- neuron_numbers = spikes[1]
- spike_indices = np.arange(len(spike_times))
- stim_frequencies = {}
- time_stimulus_map = {}
- stim_times_array = np.arange(n_time_steps) + psth_start
- logger.debug("stim_onsets={}".format(stim_onsets))
- for onset in stim_onsets:
- onset_time = onset[0]
- stimulus_number = onset[1]
-
- # count stimulus repetitions
- stim_frequencies[stimulus_number] = 1 + stim_frequencies.get(stimulus_number, 0)
-
- stim_times = onset_time + stim_times_array
- for t in stim_times:
- time_stimulus_map.setdefault(t, []).append(stimulus_number)
-
- for spike_index in spike_indices:
- if spike_times[spike_index] in time_stimulus_map:
- for stim in time_stimulus_map[spike_times[spike_index]]:
- stim_responses[neuron_numbers[spike_index], stim] += 1
-
- # devide by number of stimulus repetitions
- for stim in stim_frequencies:
- logger.debug("stim = {}, freq = {}".format(stim, stim_frequencies[stim]))
- stim_responses[:,stim] /= stim_frequencies[stim]
- return stim_responses
- calc_response_strength2_PICKLED = pickled_results(calc_response_strength2)
- def calc_preferred_stimuli_from_response(response_strength, target_nx=100, target_ny=100, mark_low_responses=None):
- """Calculates preferred stimuli from spike responses
-
- parameters:
- @param response_strength: 2d array with dimensions (number_of_neurons, number_of_stimuli)
- @param target_nx: x dimension of target layer
- @param target_nx: y dimension of target layer
- @param mark_low_responses: if this parameter is not None, neurons with low response will be markt with the given value
- @return: preferred_stimuli, selectivity
- """
-
- # add jitter, otherwise at same response_strength lower stimulus number would always
- # be counted as preferred (see doc of argmax)
- jitter_amplitude = 0.0001
- jitter = jitter_amplitude * np.random.random(response_strength.shape)
- preferred_stimuli = (response_strength + jitter).argmax(axis=1).reshape(target_ny, target_nx)
- # calculate selectivity
- max_responses = response_strength.max(axis=1)
- min_responses = response_strength.min(axis=1)
- selectivity = (max_responses - min_responses) / (max_responses + min_responses)
- selectivity = selectivity.reshape(target_ny, target_nx)
-
- # mark low responses
- if mark_low_responses is not None:
- total_mean_response = response_strength.mean()
- low_response = response_strength.max(axis=1) < total_mean_response
- preferred_stimuli[low_response.reshape(target_nx, target_ny)] = mark_low_responses
- selectivity[low_response]=0
- return preferred_stimuli, selectivity
- calc_preferred_stimuli_from_response_PICKLED = pickled_results(calc_preferred_stimuli_from_response)
- def calc_pref_stim_maps_from_response(response_strength, stim_nx, stim_ny, target_nx=100, target_ny=100):
- """Calculate preferred x and y stimulus maps from response strength
-
- @param response_strength: 2d array with dimensions (number_of_neurons, number_of_stimuli)
- @param stim_nx: x dimension of 2d stimulus space
- @param stim_nx: y dimension of 2d stimulus space
- @param target_nx: x dimension of target layer
- @param target_nx: y dimension of target layer
- @return: preferred_stimuli_x, preferred_stimuli_y, selectivity
- """
- low_res_mark = -2
- preferred_stimuli, selectivity = calc_preferred_stimuli_from_response(response_strength, target_nx, target_ny, low_res_mark)
- low_response = np.where(preferred_stimuli==low_res_mark)
- preferred_stimuli_x = preferred_stimuli % stim_nx
- preferred_stimuli_y = preferred_stimuli / stim_nx
-
- # mark map neurons with too low response with low_res_mark
- preferred_stimuli_x[low_response] = low_res_mark
- preferred_stimuli_y[low_response] = low_res_mark
-
- return preferred_stimuli_x, preferred_stimuli_y, selectivity
- calc_pref_stim_maps_from_response_PICKLED = pickled_results(calc_pref_stim_maps_from_response)
- def calc_preferred_stim_from_weights(weight_file, stim_file, fname_stimcorr_pickle=None, stimcorr_reeval=True):
- con = Connection()
- con.load_weight_file(weight_file)
- incomming_weight_sum = con.get_incomming_weight_sum()
-
- stimset = StimulusSet(stim_file)
-
- if fname_stimcorr_pickle is not None:
- response_strength = calc_weight_stim_correlation_PICKLED(fname_stimcorr_pickle, con, stimset.pics, reevaluate=stimcorr_reeval)
- else:
- response_strength = calc_weight_stim_correlation(con, stimset.pics)
-
- max_response = response_strength.max(axis=1)
- min_response = response_strength.min(axis=1)
- selectivity = (max_response - min_response) / (max_response + min_response)
- # add jitter, otherwise at same response_strength lower stimulus number would always
- # be counted as preferred (see doc of argmax)
- jitter_amplitude = 0.0001
- jitter = jitter_amplitude * np.random.random(response_strength.shape)
- target_nx = con.get_target_nx()
- target_ny = con.get_target_ny()
- preferred_stimuli = (response_strength + jitter).argmax(axis=1).reshape(target_ny, target_nx)
- selectivity = selectivity.reshape(target_ny, target_nx)
- return preferred_stimuli, selectivity, incomming_weight_sum
- calc_preferred_stim_from_weights_PICKLED = pickled_results(calc_preferred_stim_from_weights)
- def calc_pref_stim_maps_from_weights(weight_file, stim_file, stim_nx=20, stim_ny=20, fname_stimcorr_pickle=None, stimcorr_reeval=True):
- preferred_stimuli, selectivity, inc_sum = calc_preferred_stim_from_weights(weight_file, stim_file, fname_stimcorr_pickle)
- pref_x = preferred_stimuli % stim_nx
- pref_y = preferred_stimuli / stim_nx
- return pref_x, pref_y, selectivity, inc_sum
- calc_pref_stim_maps_from_weights_PICKLED = pickled_results(calc_pref_stim_maps_from_weights)
- def calc_weight_stim_correlation(connection, stim_pics):
- n_target = connection.get_n_target()
- n_stimuli = len(stim_pics)
- response_strength = np.zeros((n_target, n_stimuli))
- for target in range(n_target):
- weights = connection.get_incomming_matrix(target)
- for stim in range(n_stimuli):
- response = weights * stim_pics[stim]
- response_strength[target, stim] += response.sum()
-
- return response_strength
- calc_weight_stim_correlation_PICKLED = pickled_results(calc_weight_stim_correlation)
-
- def plot_psth(psth, ax=None):
- logging.info("plotting psth")
- if ax is None:
- fig = plt.figure()
- ax = fig.add_subplot(1,1,1)
- n_neurons = psth.shape[0]
- n_stim = psth.shape[1]
- n_time_steps = psth.shape[2]
- for neuron in range(n_neurons):
- for stim in range(n_stim):
- ax.plot(psth[neuron,stim,:], 'x-')
- def calc_patch_sizes(weight_file, stim_file, stim_nx, stim_ny, neurons_nx, neurons_ny):
- logging.info("calculating patch sizes")
- con = Connection()
- con.load_weight_file(weight_file)
- stimset = StimulusSet(stim_file)
-
- pickle_file = weight_file + "_stimcorr.pickle"
- response_strength = calc_weight_stim_correlation_PICKLED(pickle_file, con, stimset.pics)
- logging.info("response_strength.shape = {}".format(response_strength.shape))
- n_neurons, n_stim = response_strength.shape
- if stim_nx * stim_ny != n_stim:
- raise Exception("stimulus dimensions don't match with response strength array!")
- if neurons_nx * neurons_ny != n_neurons:
- raise Exception("neuron map dimensions don't match with number of neurons!")
- # calculate single para map
- response_strength_xy = response_strength.reshape(n_neurons, stim_ny, stim_nx)
- x_responses = response_strength_xy.sum(1)
- y_responses = response_strength_xy.sum(2)
- x_patch_sizes = [calc_patch_size(response) for response in x_responses]
- y_patch_sizes = [calc_patch_size(response) for response in y_responses]
-
- return x_patch_sizes, y_patch_sizes
-
- def calc_patch_size(image):
- ''' berechnet die patch size anhand der Fourier-Transformation
- Implementierung nur sinnvoll für quadratische images
- '''
- nx, ny = image.shape
- if nx != ny:
- raise ValueError("image must be quadratic! (image.shape[0] == image.shape[1]")
- #print "nx=" + str(nx)
- #print "ny=" + str(ny)
- mwfree_image = image - np.mean(image)
- fft_image = np.abs(fftn(mwfree_image))
- #plt.imshow(abs(fft_image), interpolation='nearest')
- #plt.show()
- max_ind = fft_image.argmax()
- #print "max_ind=" + str(max_ind)
- d = dist(ny, nx)
- #print "d.shape=" + str(d.shape)
- max_x = max_ind % nx
- max_y = max_ind // nx
- max_dist = d[max_y, max_x]
- # frequency in cycles per pixel
- freq = max_dist / nx
-
- # wave length in pixel
- wave_length = 1./freq
-
- return wave_length
-
- def evaluate_patch_dynamics(spike_data, patch_border_threshold=0.3, patch_noise_ratio_threshold=10., dt=100., t_start=None):
- """Calculate activity patch meausres: size, speed, amplitude"""
- spike_movie, time_scale = spike_data.calc_spike_movie(dt=dt, t_start=t_start, time_scale=True)
- n_frames = spike_movie.shape[2]
- patch_positions = []
- patch_sizes = []
- patch_amplitudes = []
- background_noise = []
- total_activity = []
- n_neurons = spike_movie.shape[0] * spike_movie.shape[1]
- for i in range(n_frames):
- frame = spike_movie[:,:,i]
- peak_pos, patch_pos, patch_size, patch_indices = find_biggest_patch(frame, threshold=patch_border_threshold)
- patch_spikes = [frame[x, y] for x, y in patch_indices]
- non_patch_spikes = np.sum(frame) - np.sum(patch_spikes)
-
- total_frame_activity = np.sum(frame)
- total_activity.append(total_frame_activity)
-
- mean_non_patch_spikes = non_patch_spikes / (n_neurons - len(patch_indices))
- background_noise_hz = 1000. * mean_non_patch_spikes / dt # in Hz
- amplitude = np.mean(patch_spikes)
- patch_amplitude_hz = 1000. * amplitude / dt # in Hz
- patch_amplitudes.append(patch_amplitude_hz)
-
- if patch_amplitude_hz < patch_noise_ratio_threshold*background_noise_hz:
- patch_size = 0
- patch_sizes.append(patch_size)
- background_noise.append(background_noise_hz)
- if patch_size > 0:
- patch_positions.append(np.array(patch_pos))
- else:
- patch_positions.append(np.array([-1, -1]))
-
- sqrt_patch_sizes = np.sqrt(patch_sizes)
-
- valid_patches = np.where(sqrt_patch_sizes>0)
- n_valid_patches = len(valid_patches[0])
- median_sqrt_patch_size = np.median(sqrt_patch_sizes[valid_patches])
- speed_sec = calc_patch_speed(patch_positions,
- sqrt_patch_sizes,
- dt,
- 0.5*median_sqrt_patch_size,
- spike_movie.shape)
-
- valid_speed_patches = np.where(sqrt_patch_sizes[:-1]>0)
- median_speed = np.median(speed_sec[valid_speed_patches])
-
- return {'patch_size': sqrt_patch_sizes,
- 'patch_size_median': median_sqrt_patch_size,
- 'n_valid_patches': n_valid_patches,
- 'n_frames': n_frames,
- 'patch_speed': speed_sec,
- 'patch_speed_median': median_speed,
- 'patch_amplitude': patch_amplitudes,
- 'patch_positions': patch_positions,
- 'background_noise': background_noise,
- 'time_scale': time_scale,
- }
-
- def find_biggest_patch(frame, threshold=0.5, sigma=1.5):
- """Find biggest activity patch in image (e.g. single spike movie frame)
-
- @param frame: two dimensional ndarray
- @param threshold: relative patch threshold between mean and max activity (values must be between 0 and 1)
- all contiguous neurons around the maximum activity neuron
- which spike above the threshold belong to the patch
-
- @return: peak position, patch size, and coordinates of all neurons that belong to the patch
- """
- if threshold <= 0 or threshold >= 1:
- raise ValueError("Threshold must be in open interval (0,1)")
- rows, cols = frame.shape
-
- frame = scipy.ndimage.filters.gaussian_filter(frame, sigma, order=0, output=None, mode='wrap', truncate=4.0)
- peak_pos = np.unravel_index(np.argmax(frame), frame.shape)
- peak_value = frame[peak_pos]
- mean_value = np.mean(frame)
-
- # shift peak to center
- shift_rows = int(0.5*rows - peak_pos[0])
- shift_cols = int(0.5*cols - peak_pos[1])
- frame = np.roll(frame, shift_rows, axis=0)
- frame = np.roll(frame, shift_cols, axis=1)
-
- abs_threshold = mean_value + threshold * (peak_value-mean_value)
- # create mask of neurons above threshold
- above_threshold_pixels = frame > abs_threshold
- if sum(above_threshold_pixels.flat) == 0:
- return (0,0), (0,0), 0, []
- # and now: a miracle occurs
- # find indices of masked array entries, flood fill to detect contiguous regions
- # http://stackoverflow.com/questions/9440921/identify-contiguous-regions-in-2d-numpy-array
- labels, numL = scipy.ndimage.label(above_threshold_pixels)
- label_indices = [(labels == i).nonzero() for i in xrange(1, numL+1)]
-
- # find biggest patch
- patch_sizes = [len(li[0]) for li in label_indices]
- ind_biggest_patch = np.argmax(patch_sizes)
- patch_size = patch_sizes[ind_biggest_patch]
- patch_indices = label_indices[ind_biggest_patch]
-
- # calculate patch position based on centered patch,
- # then shift back coordinates to original position
- patch_pos = [(np.mean(patch_indices[0]) - shift_rows) % rows,
- (np.mean(patch_indices[1]) - shift_cols) % cols
- ]
-
- # patch was shifted to image center for flood fill to work correctly
- # now correct indices for original (unshifted) patch position
- patch_indices = zip((patch_indices[0] - shift_rows) % rows,
- (patch_indices[1] - shift_cols) % cols,
- )
-
- return peak_pos, patch_pos, patch_size, patch_indices
- def calc_patch_speed(patch_positions, patch_sizes, delta_t, patch_threshold, layer_dim):
- """Calculate patch speed from x and y positions"""
- pos_x, pos_y = zip(*patch_positions)
- pos_x = np.array(pos_x)
- pos_y = np.array(pos_y)
- pos_diff_x = cyclic_distance(pos_x[1:], pos_x[0:-1], layer_dim[0])
- pos_diff_y = cyclic_distance(pos_y[1:], pos_y[0:-1], layer_dim[1])
- pos_diff = np.sqrt(pos_diff_x**2 + pos_diff_y**2)
- speed_sec = 1000. * pos_diff / delta_t # in neurons per second
- # mark invalid speed values
- invalid_speed = -1
- speed_sec[pos_x[1:] < 0] = invalid_speed # no valid patch position
- speed_sec[pos_x[0:-1] < 0] = invalid_speed # no valid patch position
- speed_sec[patch_sizes[0:-1] < patch_threshold] = invalid_speed # patch too small
- return speed_sec
- def find_pinwheels_pref_x_prototype_matching(movie_file, weight_file, num_stimuli = [20,20], plot_path = None, preferred_stim_pickle_file = None):
- if preferred_stim_pickle_file == None:
- preferences = calc_pref_stim_maps_from_weights(weight_file, movie_file, stim_nx = num_stimuli[0], stim_ny = num_stimuli[1])
- else:
- preferences = calc_pref_stim_maps_from_weights_PICKLED(preferred_stim_pickle_file, weight_file, movie_file, stim_nx = num_stimuli[0], stim_ny = num_stimuli[1])
- prototypes = set_up_prototypes(num_stimuli[0])
- bank = set_up_pw_prototype_bank(prototypes)
- matched_img = do_pattern_matching_pref_stim_prototype_bank(preferences[0], bank, num_stimuli = num_stimuli[0])
- pinwheels = eval_pattern_matching(matched_img)
- if plot_path != None:
- preferences[0][pinwheels] = -5
- preferences[1][pinwheels] = -5
- norm = Normalize(vmin = 0.)
- plt.imshow(preferences[0], origin = 'lower', interpolation = 'Nearest', cmap = cmap_color_circle(), norm = norm)
- plt.colorbar()
- plt.show()
- plt.savefig(os.path.join(plot_path, 'pref_x_with_pws.png'))
- norm = Normalize(vmin = 0.)
- plt.imshow(preferences[1], origin = 'lower', interpolation = 'Nearest', cmap = cmap_color_circle(), norm = norm)
- plt.colorbar()
- plt.show()
- plt.savefig(os.path.join(plot_path, 'pref_y_with_pws.png'))
- return pinwheels
- def set_up_prototypes(nx):
- r = [-3.,-2.,-1.,0,1.,2.,3.]
- proto = np.array([[(np.arctan2(i,j)/np.pi-1.)/2.*-nx for i in r] for j in r])
- proto2 = np.zeros((len(r),len(r)))
- c0 = 0
- c1 = 0
- for i in r:
- c1 = 0
- for j in r:
- phi = (np.arctan2(i,j)/np.pi -1.)/(-2.)
- if phi>=0.125:
- phi = phi - 0.125
- else:
- phi = 0.875 + phi
- proto2[c0,c1] = phi*nx
- c1 += 1
- c0 += 1
- return [proto,proto2]
- def set_up_pw_prototype_bank(protos):
- num_protos = len(protos)
- bank = np.zeros((8*num_protos,protos[0].shape[0],protos[0].shape[1]))
- for i in range(num_protos):
- bank[0+i*8] = protos[i]
- bank[1+i*8] = np.transpose(bank[0+i*8])
- bank[2+i*8] = np.flipud(bank[0+i*8])
- bank[3+i*8] = np.fliplr(bank[0+i*8])
- bank[4+i*8] = np.transpose(bank[2+i*8])
- bank[5+i*8] = np.transpose(bank[3+i*8])
- bank[6+i*8] = np.fliplr(bank[2+i*8])
- bank[7+i*8] = np.fliplr(bank[5+i*8])
- return bank
- def do_pattern_matching_pref_stim_prototype_bank(pref_stim_img, bank, num_stimuli, measure = "float"):
- more_img = np.zeros((3*pref_stim_img.shape[0],3*pref_stim_img.shape[1]))
- for i in range(3):
- for j in range(3):
- more_img[i*pref_stim_img.shape[0]:(i+1)*pref_stim_img.shape[0],j*pref_stim_img.shape[1]:(j+1)*pref_stim_img.shape[1]] = pref_stim_img
- diff = np.zeros([bank.shape[1], bank.shape[2]])
- diff_sum = 0.
- con_img = np.zeros((pref_stim_img.shape[0],pref_stim_img.shape[1]))
- dhalf = bank.shape[1]/2
- for i in range(pref_stim_img.shape[0],2*pref_stim_img.shape[0]):
- for j in range(pref_stim_img.shape[1],2*pref_stim_img.shape[1]):
- for b in range(bank.shape[0]):
- curr_img = more_img[i-dhalf:i+dhalf+bank.shape[1]%2,j-dhalf:j+dhalf+bank.shape[2]%2]
- num_count = 0
- for num in range(num_stimuli):
- if num in curr_img:
- num_count += 1
- diff = abs(curr_img-bank[b])
- diff[np.where(diff>num_stimuli/2)] = num_stimuli - diff[np.where(diff>num_stimuli/2)]
- diff_sum = np.sum(diff)
- measure = ((diff_sum/(bank[0].size*num_stimuli/2.)-0.5)*4)**2
- if (num_count>=0.8*num_stimuli) and measure>=2.:
- if measure == 'binary':
- con_img[i-pref_stim_img.shape[0],j-pref_stim_img.shape[1]] = 1
- else:
- con_img[i-pref_stim_img.shape[0],j-pref_stim_img.shape[1]] += measure
- return con_img
- def eval_pattern_matching(matched_img):
- more_img = np.zeros((3*matched_img.shape[0],3*matched_img.shape[1]))
- for i in range(3):
- for j in range(3):
- more_img[i*matched_img.shape[0]:(i+1)*matched_img.shape[0],j*matched_img.shape[1]:(j+1)*matched_img.shape[1]] = matched_img
- s = [[1,1,1],
- [1,1,1],
- [1,1,1]]
- labeled, num = measurements.label(more_img, structure = s)
- for i in range(1,num+1):
- if not more_img[np.where(labeled == i)].any()>0.8*np.amax(more_img):
- more_img[np.where(np.where(labeled == i))] = 0
- labeled, num = measurements.label(more_img, structure = s)
- centers = measurements.center_of_mass(more_img, labeled, range(1,num+1))
- if len(centers)>0:
- more_pw = np.zeros((2,len(centers)))
- more_pw[0] = np.round(centers)[:,0]
- more_pw[1] = np.round(centers)[:,1]
- more_pw = tuple(more_pw.astype(int))
- more_img[:,:] = 0
- more_img[more_pw] = 1
- pw_matrix = more_img[matched_img.shape[0]:2*matched_img.shape[0],matched_img.shape[1]:2*matched_img.shape[1]]
- pinwheels = np.where(pw_matrix==1)
- else:
- pinwheels = []
- return pinwheels
- def calc_curl(pref_stim, pref_x, n, m):
- more_x = np.zeros((2*pref_x.shape[0],2*pref_x.shape[1]))
- for i in range(2):
- for j in range(2):
- more_x[i*pref_stim.shape[0]:int((i+0.5)*pref_stim.shape[0]),j*pref_stim.shape[1]:int((j+1)*pref_stim.shape[1])] = pref_x[pref_stim.shape[0]/2:]
- more_x[int((i+0.5)*pref_stim.shape[0]):(i+1)*pref_stim.shape[0],int((j)*pref_stim.shape[1]):(j+1)*pref_stim.shape[1]] = pref_x[:pref_stim.shape[0]/2]
- more_x = np.roll(more_x, pref_stim.shape[1]/2)
- more_img = np.zeros((2*pref_stim.shape[0],2*pref_stim.shape[1]))
- for i in range(2):
- for j in range(2):
- more_img[i*pref_stim.shape[0]:int((i+0.5)*pref_stim.shape[0]),j*pref_stim.shape[1]:int((j+1)*pref_stim.shape[1])] = pref_stim[pref_stim.shape[0]/2:]
- more_img[int((i+0.5)*pref_stim.shape[0]):(i+1)*pref_stim.shape[0],int((j)*pref_stim.shape[1]):(j+1)*pref_stim.shape[1]] = pref_stim[:pref_stim.shape[0]/2]
- more_img = np.roll(more_img, pref_stim.shape[1]/2)
- viel_img = more_img.copy()
- more_img = np.logical_or(more_img == n,more_img == m)
- labeled, num = measurements.label(more_img)
- label_indices = [(labeled == i).nonzero() for i in xrange(1, num+1)]
- centers = measurements.center_of_mass(more_img, labeled, range(1,num+1))
- patch_sizes = [len(label_indices[i][0]) for i in range(num)]
- biggest_patches = np.where(abs(patch_sizes-np.amax(patch_sizes))<2)
- for patch in biggest_patches[0]:
- i = np.array(centers)[patch].astype(int)
- #for i in np.array(centers)[biggest_patches].astype(int):
- if (i[0] >= (pref_stim.shape[0]/2)) and (i[0] < (pref_stim.shape[0]/2+pref_stim.shape[0])) and (i[1] >= (pref_stim.shape[1]/2)) and (i[1] < (pref_stim.shape[1]/2+pref_stim.shape[0])):
- a = np.zeros((viel_img.shape[0],viel_img.shape[1]))
- a[label_indices[patch]] = 1
- g = np.ones((3,3))
- f = convolve2d(a,g,mode='same',boundary='wrap')
- a = f
- a = np.zeros((viel_img.shape[0],viel_img.shape[1]))
- a[np.where(f>=3)] = 1
- f = convolve2d(a,g,mode='same',boundary='wrap')
- a[np.where(f<=8)] = 0
- b = np.diff(a).astype(int)
- c = np.diff(a,axis=0).astype(int)
- d = np.where(c[:,1:]+b[1:] != 0)
- d = np.array(d) + 1
- e = (d[0],d[1])
- tree = scipy.spatial.KDTree(np.array(e).T)
- mat = tree.sparse_distance_matrix(tree,70.).todense()
- start = 0
- minimum = 0.
- ordered = np.zeros((d[0].size,2))
- order = np.zeros(d[0].size, dtype=int)
- order[0] = start
- ordered[0] = np.array(d[:,start])
- for j in range(1,d[0].size):
- start, minimum = find_next_nn(mat,minimum, start, order)
- order[j] = start
- ordered[j] = np.array(d[:,start])
- integral = 0
- for j in range(order.size):
- integral += cycl_dist(more_x[int(ordered[j,0]), int(ordered[j,1])],more_x[int(ordered[j-1,0]), int(ordered[j-1,1])])
- integral /= float(pref_x.max()+1)
- ordered = ordered.T
- ordered = (ordered[0] + pref_stim.shape[0]/2, ordered[1] + pref_stim.shape[1]/2)
- ordered = (ordered[0].astype(int)%pref_stim.shape[0], ordered[1].astype(int)%pref_stim.shape[1])
- center = np.array([i[0],i[1]]).astype(int) - np.array([pref_stim.shape[0]/2, pref_stim.shape[1]/2]).astype(int)
- patch_indices = (np.array(label_indices[patch][0]) + pref_stim.shape[0]/2, np.array(label_indices[patch][1]) + pref_stim.shape[1]/2)
- patch_indices = (patch_indices[0]%pref_stim.shape[0], patch_indices[1]%pref_stim.shape[1])
- #patch_indices = np.sum(label_indices[patch], -np.array([pref_stim.shape[0]/2, pref_stim.shape[1]/2]).astype(int))
- center = (np.array([center[0]]),np.array([center[1]]))
- return ordered, integral
- def find_next_nn(mat,minimum, start, order):
- indices = np.where(mat[start]>minimum)
- minimum = mat[start][indices][0].min()
- ind = np.where(mat[start][0]==minimum)[1].copy()
- if ind[0,0] in order:
- if ind.size > 1:
- if ind[0,1] in order:
- if ind.size > 2:
- if ind[0,2] in order:
- start, minimum = find_next_nn(mat,minimum, start, order)
- else:
- start = ind[0,2]
- minimum = 0.
- else:
- start, minimum = find_next_nn(mat,minimum, start, order)
- else:
- start = ind[0,1]
- minimum = 0.
- else:
- start, minimum = find_next_nn(mat,minimum, start, order)
- else:
- start = ind[0,0]
- minimum = 0.
- return start, minimum
- def cycl_dist(a,b,n_stim=20):
- d = a-b
- if d>n_stim/2:
- d = n_stim - d
- if d<-n_stim/2:
- d = -n_stim - d
- return d
- def calc_pinwheels_pwcharge(pref_map_in, num_stimuli):
- """ radius of circle for charge calculation chosen to be 6. This is big enough to tolerate
- small discontiuities, but small enough to avoid overlap between two neighboring pinwheels """
- pref_map = np.array(pref_map_in).copy()
- r = 6
- a = np.amax(pref_map.shape)
- circle1 = np.array([[i,j] for i, j in product(range(a/2,-a/2,-1), range(-a/2,0)) if abs(i*i+j*j - r*r) < r])
- circle2 = np.array([[i,j] for i, j in product(range(-a/2,a/2), range(0,a/2)) if abs(i*i+j*j - r*r) < r])
- circle = np.concatenate((circle1,circle2))
- circ_ind = circle.T.copy()
- """ create new extended map with double the size of the original map and do the calculations with it in order to avoid boundary effects """
- more_map = np.zeros((2*pref_map.shape[0],2*pref_map.shape[1]))
- for i in range(2):
- for j in range(2):
- more_map[i*pref_map.shape[0]:int((i+0.5)*pref_map.shape[0]),j*pref_map.shape[1]:int((j+1)*pref_map.shape[1])] = pref_map[pref_map.shape[0]/2:].copy()
- more_map[int((i+0.5)*pref_map.shape[0]):(i+1)*pref_map.shape[0],int((j)*pref_map.shape[1]):(j+1)*pref_map.shape[1]] = pref_map[:pref_map.shape[0]/2].copy()
- more_map = np.roll(more_map, pref_map.shape[1]/2)
- """ calculate charge landcape with moving the circle over all possible positions on the extended map """
- charge = np.zeros((len(range(r,more_map.shape[0]-r)),len(range(r,more_map.shape[1]-r))))
- for i,j in product(range(r,more_map.shape[0]-r),range(r,more_map.shape[1]-r)):
- circle_coords = circle.copy()
- circle_coords[:,0] += i
- circle_coords[:,1] += j
- charge[(i-r),(j-r)] = calc_charge(more_map, circle_coords, num_stimuli)
- """ disregard areas of low charge values due to random fluctuations """
- charge[np.where(np.abs(charge)<0.8)] = 0.0
- """ convolve charge landscape with np.ones((3,3)) to create connected areas of high charge
- and avoid different areas close to eachother """
- kernel = np.ones((3,3))
- charge_conv = convolve2d(charge, kernel, mode='same', boundary='wrap')
- """ disregard areas that only had one single point of high charge or two with medium charge"""
- charge_conv[np.where(np.abs(charge_conv)<2)] = 0.0
- """ use label to identify connected areas belonging to one pinwheel """
- labeled, num = measurements.label(charge_conv)#, kernel)
- label_indices = [(labeled == i).nonzero() for i in xrange(1, num+1)]
- patch_sizes = [len(label_indices[i][0]) for i in range(num)]
- """ neighborhood condition: there must be at least 5 points in a labeled region.
- These 5 points had at least two high charge points in their vicinity. This disregards single missing points in a region
- but ensures original points of high chrage did have a short distance to eachother and thereby belong to the same pw.
- (This corresponds to some fancy geometrical forms and prevents algorithm from finding pws in salt and pepper pattern)"""
- labels = []
- for i in range(num):
- if patch_sizes[i]>4:
- labels.append(i+1)
- if len(labels)>0:
- """ calculate pw coordinates as centers of mass of the regions in coordinate system of the extended map"""
- centers = measurements.center_of_mass(charge_conv, labeled, labels)
- more_pw = np.zeros((2,len(centers)))
- more_pw[0] = np.round(centers)[:,0].astype(int)
- more_pw[1] = np.round(centers)[:,1].astype(int)
- more_pw = tuple(more_pw.astype(int))
- more_pw_matrix = np.zeros(more_map.shape)
- more_pw_matrix[more_pw] = 1
- """ transform to original coordinate frame """
- d = [pref_map.shape[0]/2-r, pref_map.shape[1]/2-r]
- pw_matrix = more_pw_matrix[d[0]:(pref_map.shape[0]+d[0]), d[1]:(pref_map.shape[1]+d[1])].copy()
- pinwheels = np.where(pw_matrix==1)
- else:
- pinwheels = ()
- """ return pinwheel coordinates """
- return pinwheels
- def calc_charge(map_matrix, ordered_surface_coords, num_stimuli):
- values = np.zeros(ordered_surface_coords.shape[0])
- charge = 0.
- for k in range(ordered_surface_coords.shape[0]):
- delta = cycl_dist(map_matrix[ordered_surface_coords[k,0],ordered_surface_coords[k,1]], map_matrix[ordered_surface_coords[k-1,0],ordered_surface_coords[k-1,1]])
- values[k] = map_matrix[ordered_surface_coords[k,0],ordered_surface_coords[k,1]]
- """ continuity condition: only accept values for the integral if there are no major jumps in values (eg in random map) """
- if abs(delta) < 4:
- charge += delta
- """ wheel condition: only accept high pinwheel value if almost all values were on the surface """
- if np.unique(values).size < num_stimuli-num_stimuli/10.:
- charge = 0.0
- """ normalization """
- charge /= float(num_stimuli)
- return charge
- if __name__=="__main__":
- pass
|