123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121 |
- import numpy as np
- from scripts.spatial_network.perlin_map.run_simulation_perlin_map import DATA_FOLDER
- from scipy.optimize import curve_fit
- import pingouin as pg
- def filter_run_names_by_par_dict(traj, par_dict):
- run_name_list = []
- for run_idx, run_name in enumerate(traj.f_get_run_names()):
- traj.f_set_crun(run_name)
- paramters_equal = True
- for key, val in par_dict.items():
- if (traj.par[key] != val):
- paramters_equal = False
- if paramters_equal:
- run_name_list.append(run_name)
- traj.f_restore_default()
- return run_name_list
- def correlation_length_fit_dict(traj, map_type='perlin_map', load=True):
- def phi_diff(d_phi):
- return (d_phi + np.pi) % (2 * np.pi) - np.pi
- def exp_fit_func(x, corr_len):
- return np.pi / 2. * np.exp(-corr_len * x)
- scale_expl = traj.f_get('scale').f_get_range()
- scale_list = np.unique(scale_expl)
- seed_expl = traj.f_get('seed').f_get_range()
- seed_list = np.unique(seed_expl)
- dim = 60
- size = 900
- traj_name = traj.name
- if load:
- try:
- fit_correlation_lengths_dict = np.load(DATA_FOLDER + map_type + traj_name + '_fit_correlation_lengths_dict.npy')
- all_values_there = [scale in fit_correlation_lengths_dict.item().keys() for scale in scale_list]
- if all(all_values_there):
- return fit_correlation_lengths_dict.item()
- print('Some scale values missing in corr. len. fit dictionary, new one will be generated')
- except:
- print('No correlation length fit dictionary found, will be generated now')
- fit_correlation_lengths_dict = {}
- for scale in scale_list:
- fit_corr_lens_different_seeds = []
- for map_seed in seed_list:
- par_dict = {'seed': map_seed, 'scale': scale, 'long_axis': 100.0}
- run_name = filter_run_names_by_par_dict(traj, par_dict)
- ex_tunings = traj.results.runs[run_name].ex_tunings
- ex_tun_array = np.array(ex_tunings).reshape((dim, dim))
- x_displacements = range(-dim + 1, dim)
- y_displacements = range(-dim + 1, dim)
- r_c_array = np.ndarray((len(x_displacements), len(y_displacements)))
- dist_array = np.ndarray((len(x_displacements), len(y_displacements)))
- for x_displacement in x_displacements:
- for y_displacement in y_displacements:
- a_1 = ex_tun_array[max(0, x_displacement): min(dim, dim + x_displacement),
- max(0, y_displacement): min(dim, dim + y_displacement)]
- a_2 = ex_tun_array[max(0, -x_displacement): min(dim, dim - x_displacement),
- max(0, -y_displacement): min(dim, dim - y_displacement)]
- if a_1.shape == (1, 1):
- # TODO: Still not sure which value to put here.
- r_c = np.nan
- else:
- # TODO: For some reason, some values come out as NaN or +/-inf
- r_c, p_c = pg.circ_corrcc(a_1.flatten(), a_2.flatten(), correction_uniform=True)
- r_c_array[x_displacement + dim - 1, y_displacement + dim - 1] = r_c
- dist_array[x_displacement + dim - 1, y_displacement + dim - 1] = np.sqrt(
- x_displacement ** 2 + y_displacement ** 2)
- # correlations over distances
- dist_array *= size / dim
- distances = dist_array.flatten()
- correlations = r_c_array.flatten()
- distances = distances[~np.isnan(correlations)]
- correlations = correlations[~np.isnan(correlations)]
- # Binned mean and exponential fit
- bins = np.linspace(0.0, size, 301)
- binned_ids = np.digitize(distances, bins)
- binned_correlations = [[] for _ in range(len(bins))]
- for id, bin_id in enumerate(binned_ids):
- bin_corr = correlations[id]
- binned_correlations[bin_id - 1].append(bin_corr)
- binned_mean_correlations = np.array([np.mean(bin) for bin in binned_correlations])
- bins = bins[np.isfinite(binned_mean_correlations)]
- binned_mean_correlations = binned_mean_correlations[np.isfinite(binned_mean_correlations)]
- # TODO: bins is wrong here, as it is the bin borders
- params, _ = curve_fit(exp_fit_func, bins, binned_mean_correlations, [0.5])
- fit_corr_len = 1 / params[0]
- fit_corr_lens_different_seeds.append(fit_corr_len)
- fit_correlation_lengths_dict[scale] = np.mean(fit_corr_lens_different_seeds)
- np.save(DATA_FOLDER + map_type + traj_name + '_fit_correlation_lengths_dict.npy', fit_correlation_lengths_dict)
- return fit_correlation_lengths_dict
|