correlation_length_distance_perlin.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. import numpy as np
  2. from scripts.spatial_network.perlin.run_entropy_maximisation_perlin_map import DATA_FOLDER
  3. from scipy.optimize import curve_fit
  4. import pingouin as pg
  5. def filter_run_names_by_par_dict(traj, par_dict):
  6. run_name_list = []
  7. for run_idx, run_name in enumerate(traj.f_get_run_names()):
  8. traj.f_set_crun(run_name)
  9. paramters_equal = True
  10. for key, val in par_dict.items():
  11. if (traj.par[key] != val):
  12. paramters_equal = False
  13. if paramters_equal:
  14. run_name_list.append(run_name)
  15. traj.f_restore_default()
  16. return run_name_list
  17. def correlation_length_fit_dict(traj, map_type='perlin', load=True):
  18. def phi_diff(d_phi):
  19. return (d_phi + np.pi) % (2 * np.pi) - np.pi
  20. def exp_fit_func(x, corr_len):
  21. return np.pi / 2. * np.exp(-corr_len * x)
  22. corr_len_expl = traj.f_get('correlation_length').f_get_range()
  23. corr_len_list = np.unique(corr_len_expl)
  24. seed_expl = traj.f_get('seed').f_get_range()
  25. seed_list = np.unique(seed_expl)
  26. dim = 60
  27. size = 900
  28. if not load:
  29. fit_correlation_lengths_dict = {}
  30. for corr_len in corr_len_list:
  31. fit_corr_lens_different_seeds = []
  32. for map_seed in seed_list:
  33. par_dict = {'seed': map_seed, 'correlation_length': corr_len, 'long_axis': 100.0}
  34. run_name = filter_run_names_by_par_dict(traj, par_dict)
  35. ex_tunings = traj.results.runs[run_name].ex_tunings
  36. ex_tun_array = np.array(ex_tunings).reshape((dim, dim))
  37. x_displacements = range(-dim + 1, dim)
  38. y_displacements = range(-dim + 1, dim)
  39. r_c_array = np.ndarray((len(x_displacements), len(y_displacements)))
  40. dist_array = np.ndarray((len(x_displacements), len(y_displacements)))
  41. for x_displacement in x_displacements:
  42. for y_displacement in y_displacements:
  43. a_1 = ex_tun_array[max(0, x_displacement): min(dim, dim + x_displacement),
  44. max(0, y_displacement): min(dim, dim + y_displacement)]
  45. a_2 = ex_tun_array[max(0, -x_displacement): min(dim, dim - x_displacement),
  46. max(0, -y_displacement): min(dim, dim - y_displacement)]
  47. if a_1.shape == (1, 1):
  48. # TODO: Still not sure which value to put here.
  49. r_c = np.nan
  50. else:
  51. # TODO: For some reason, some values come out as NaN or +/-inf
  52. r_c, p_c = pg.circ_corrcc(a_1.flatten(), a_2.flatten(), correction_uniform=True)
  53. r_c_array[x_displacement + dim - 1, y_displacement + dim - 1] = r_c
  54. dist_array[x_displacement + dim - 1, y_displacement + dim - 1] = np.sqrt(
  55. x_displacement ** 2 + y_displacement ** 2)
  56. # correlations over distances
  57. dist_array *= size / dim
  58. distances = dist_array.flatten()
  59. correlations = r_c_array.flatten()
  60. distances = distances[~np.isnan(correlations)]
  61. correlations = correlations[~np.isnan(correlations)]
  62. # Binned mean and exponential fit
  63. bins = np.linspace(0.0, size, 301)
  64. binned_ids = np.digitize(distances, bins)
  65. binned_correlations = [[] for _ in range(len(bins))]
  66. for id, bin_id in enumerate(binned_ids):
  67. bin_corr = correlations[id]
  68. binned_correlations[bin_id - 1].append(bin_corr)
  69. binned_mean_correlations = np.array([np.mean(bin) for bin in binned_correlations])
  70. bins = bins[np.isfinite(binned_mean_correlations)]
  71. binned_mean_correlations = binned_mean_correlations[np.isfinite(binned_mean_correlations)]
  72. # TODO: bins is wrong here, as it is the bin borders
  73. params, _ = curve_fit(exp_fit_func, bins, binned_mean_correlations, [0.5])
  74. fit_corr_len = 1 / params[0]
  75. fit_corr_lens_different_seeds.append(fit_corr_len)
  76. fit_correlation_lengths_dict[corr_len] = np.mean(fit_corr_lens_different_seeds)
  77. np.save(DATA_FOLDER + map_type + '_fit_correlation_lengths_dict.npy', fit_correlation_lengths_dict)
  78. return fit_correlation_lengths_dict
  79. else:
  80. fit_correlation_lengths_dict = np.load(DATA_FOLDER + map_type + '_fit_correlation_lengths_dict.npy')
  81. return fit_correlation_lengths_dict.item()