Validation_FOV_alignment.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. #!/usr/bin/env python
  2. # coding: utf-8
  3. # ### Define paths and animals for the analysis
  4. # In[1]:
  5. path = '/media/andrey/My Passport/GIN/backup_Anesthesia_CA1/meta_data/meta_recordings - anesthesia.xlsx'
  6. path4results = '/media/andrey/My Passport/GIN/Anesthesia_CA1/validation/calcium_imaging/' #To store transformation matrisies
  7. save_plots_path = '/media/andrey/My Passport/GIN/Anesthesia_CA1/validation/calcium_imaging/'
  8. log_file_path = save_plots_path + 'registration_logs.txt'
  9. animals_for_analysis = [48,51,53,37527,37528,37529,37530]
  10. # ### Align FOV's for all recordings
  11. # In[2]:
  12. repeat_calc = 1
  13. silent_mode = False
  14. #######################
  15. import pandas as pd
  16. import numpy as np
  17. import os
  18. import matplotlib.pyplot as plt
  19. from matplotlib import colors
  20. import sys
  21. np.set_printoptions(threshold=sys.maxsize)
  22. from pystackreg import StackReg
  23. # Sobel filter (not used)
  24. #from scipy import ndimage #https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.sobel.html
  25. meta_data = pd.read_excel(path)
  26. #%% compute transformations matrices between recordings
  27. recordings = meta_data['Number']
  28. animals = animals_for_analysis
  29. #print("Recordings: ", recordings)
  30. #ALIGNMENT ALGORITHM
  31. # log file
  32. f = open(log_file_path, "a")
  33. print("n, i, j, rigid_mean_enh, rigid_mean, affine_mean_enh, affine_mean, best_method ", file=f)
  34. if (silent_mode!=True):
  35. print(" RMSD's: (rigid, mean_enh) | (rigid, mean) | (affine, mean_enh) | (affine, mean) | best method ")
  36. '''
  37. for animal in animals:
  38. if (silent_mode!=True):
  39. print("Animal #", str(animal))
  40. if not os.path.exists(path4results + 'StackReg/' +
  41. str(animal) + '.npy') or repeat_calc == 1:
  42. # if not os.path.exists('Q:/Personal/Mattia/Calcium Imaging/results/StackRegEnhImage/' +
  43. # str(animal) + '.npy') or repeat_calc == 1:
  44. # if not os.path.exists('Q:/Personal/Mattia/Calcium Imaging/results/StackReg/' +
  45. # str(animal) + '.npy') or repeat_calc == 1:
  46. meta_animal = meta_data[meta_data['Mouse'] == animal]
  47. recordings = meta_animal['Number']
  48. images_mean = np.zeros((512, 512, np.shape(recordings)[0]))
  49. images_mean_enh = np.zeros((512, 512, np.shape(recordings)[0]))
  50. images_quality_check = np.zeros((512, 512, np.shape(recordings)[0]))
  51. best_tmats = np.zeros((np.shape(recordings)[0], np.shape(recordings)[0], 3, 3))
  52. best_score = np.zeros((np.shape(recordings)[0], np.shape(recordings)[0]))
  53. best_methods = np.zeros((np.shape(recordings)[0], np.shape(recordings)[0]))
  54. tmats_affine = np.zeros((np.shape(recordings)[0], np.shape(recordings)[0], 3, 3))
  55. tmats_rigid = np.zeros((np.shape(recordings)[0], np.shape(recordings)[0], 3, 3))
  56. tmats_affine_enh = np.zeros((np.shape(recordings)[0], np.shape(recordings)[0], 3, 3))
  57. tmats_rigid_enh = np.zeros((np.shape(recordings)[0], np.shape(recordings)[0], 3, 3))
  58. # load all (enhanced) images
  59. for idx, recording in enumerate(recordings):
  60. print(recording)
  61. options = np.load(meta_data['Folder'][recording] +
  62. meta_data['Subfolder'][recording] +
  63. str(int(meta_data['Recording idx'][recording])) +
  64. '/suite2p/plane0/ops.npy',
  65. allow_pickle=True)
  66. # mean image or mean enhanced image
  67. images_mean[:, :, idx] = options.item(0)['meanImg']
  68. images_mean_enh[:, :, idx] = options.item(0)['meanImgE']
  69. #cut_boarders=50
  70. #quality check
  71. images_quality_check[:, :, idx] = options.item(0)['meanImg']
  72. # loop through every pair and compute the transformation matrix
  73. conditions = [meta_data['Condition'][recording] for recording in recordings]
  74. for idx0 in range(np.shape(images_mean)[2]):
  75. #if (idx0!=14):
  76. #continue
  77. for idx1 in range(idx0, np.shape(images_mean)[2]):
  78. #if (idx1!=16):
  79. #continue
  80. fraction_of_non_zero_pixels = [0.0,0.0,0.0,0.0]
  81. ### MEAN RIGID and AFFINE
  82. reference_image = images_mean[:, :, idx0]
  83. initial_image = images_mean[:, :, idx1]
  84. #sx = ndimage.sobel(reference_image, axis=0, mode='constant')
  85. #sy = ndimage.sobel(reference_image, axis=1, mode='constant')
  86. #reference_image = np.hypot(sx, sy)
  87. #sx = ndimage.sobel(initial_image, axis=0, mode='constant')
  88. #sy = ndimage.sobel(initial_image, axis=1, mode='constant')
  89. #initial_image = np.hypot(sx, sy)
  90. boarder_cut = 100
  91. sr = StackReg(StackReg.AFFINE)
  92. tmats_affine[idx0, idx1, :, :] = sr.register(reference_image, initial_image)
  93. image_transformed = sr.transform(images_quality_check[:, :, idx1], tmats_affine[idx0, idx1, :, :])
  94. image_difference = images_quality_check[:, :, idx0] - image_transformed
  95. fraction_of_non_zero_pixels[3] = np.count_nonzero(image_transformed[:,:]<0.001)/262144
  96. #plt.imshow(image_transformed)
  97. #plt.show()
  98. image_difference = image_difference[boarder_cut:-boarder_cut, boarder_cut:-boarder_cut]
  99. image_difference = np.square(image_difference)
  100. rmsd_affine = np.sqrt(image_difference.sum()/(512 - 2 * boarder_cut)**2)
  101. if (silent_mode!=True):
  102. print("Fraction of non-zero pixels in 3 (mean affine): ", fraction_of_non_zero_pixels[3]," Score:",rmsd_affine)
  103. sr = StackReg(StackReg.RIGID_BODY)
  104. tmats_rigid[idx0, idx1, :, :] = sr.register(reference_image, initial_image)
  105. image_transformed = sr.transform(images_quality_check[:, :, idx1], tmats_rigid[idx0, idx1, :, :])
  106. image_difference = images_quality_check[:, :, idx0] - image_transformed
  107. fraction_of_non_zero_pixels[1] = np.count_nonzero(image_transformed[:,:]<0.001)/262144
  108. #plt.imshow(image_transformed)
  109. #plt.show()
  110. image_difference = image_difference[boarder_cut:-boarder_cut, boarder_cut:-boarder_cut]
  111. image_difference = np.square(image_difference)
  112. rmsd_rigid = np.sqrt(image_difference.sum()/(512 - 2 * boarder_cut)**2)
  113. if (silent_mode!=True):
  114. print("Fraction of non-zero pixels in 1 (mean rigid): ", fraction_of_non_zero_pixels[1], "Score", rmsd_rigid)
  115. #plt.imshow(image_difference)
  116. ### MEAN_ENH RIGID and AFFINE
  117. reference_image = images_mean_enh[:, :, idx0]
  118. initial_image = images_mean_enh[:, :, idx1]
  119. # sx = ndimage.sobel(reference_image, axis=0, mode='constant')
  120. # sy = ndimage.sobel(reference_image, axis=1, mode='constant')
  121. # reference_image = np.hypot(sx, sy)
  122. # sx = ndimage.sobel(initial_image, axis=0, mode='constant')
  123. # sy = ndimage.sobel(initial_image, axis=1, mode='constant')
  124. # initial_image = np.hypot(sx, sy)
  125. boarder_cut = 100
  126. sr = StackReg(StackReg.AFFINE)
  127. tmats_affine_enh[idx0, idx1, :, :] = sr.register(reference_image, initial_image)
  128. image_transformed = sr.transform(images_quality_check[:, :, idx1], tmats_affine_enh[idx0, idx1, :, :])
  129. image_difference = images_quality_check[:, :, idx0] - image_transformed #TODO: delete image quality check! replace it with meanimage
  130. fraction_of_non_zero_pixels[2] = np.count_nonzero(image_transformed[:,:]<0.001)/262144
  131. #plt.imshow(image_transformed)
  132. #plt.show()
  133. image_difference = image_difference[boarder_cut:-boarder_cut, boarder_cut:-boarder_cut]
  134. image_difference = np.square(image_difference)
  135. rmsd_affine_enh = np.sqrt(image_difference.sum()/(512 - 2 * boarder_cut)**2)
  136. if (silent_mode!=True):
  137. print("Fraction of non-zero pixels in 2 (mean enh affine): ", fraction_of_non_zero_pixels[2],"Score:", rmsd_affine_enh)
  138. sr = StackReg(StackReg.RIGID_BODY)
  139. tmats_rigid_enh[idx0, idx1, :, :] = sr.register(reference_image, initial_image)
  140. image_transformed = sr.transform(images_quality_check[:, :, idx1], tmats_rigid_enh[idx0, idx1, :, :])
  141. image_difference = images_quality_check[:, :, idx0] - image_transformed
  142. fraction_of_non_zero_pixels[0] = np.count_nonzero(image_transformed[:,:]<0.001)/262144
  143. #plt.imshow(image_transformed)
  144. #plt.show()
  145. image_difference = image_difference[boarder_cut:-boarder_cut, boarder_cut:-boarder_cut]
  146. image_difference = np.square(image_difference)
  147. rmsd_rigid_enh = np.sqrt(image_difference.sum()/(512 - 2 * boarder_cut)**2)
  148. if (silent_mode!=True):
  149. print("Fraction of non-zero pixels in 0 (mean enh rigid): ", fraction_of_non_zero_pixels[0],"Score", rmsd_rigid_enh)
  150. rmsds=[rmsd_rigid_enh,rmsd_rigid,rmsd_affine_enh,rmsd_affine]
  151. tmatss=[tmats_rigid_enh[idx0, idx1, :, :],tmats_rigid[idx0, idx1, :, :],tmats_affine_enh[idx0, idx1, :, :],tmats_affine[idx0, idx1, :, :]]
  152. methods=["rigid_mean_enh", "rigid_mean" ,"affine_mean_enh","affine_mean"]
  153. #print(tmats_rigid_enh,tmats_rigid,tmats_affine_enh,tmats_affine)
  154. #print(" ")
  155. #best_method_idx = rmsds.index(min(rmsds))
  156. #smaller_fraction_idx = fraction_of_non_zero_pixels.index(min(fraction_of_non_zero_pixels))
  157. #smaller_fraction_idx = 1
  158. #print(best_method_idx)
  159. #print(smaller_fraction_idx)
  160. list_of_methods=np.argsort(rmsds)
  161. best_score[idx1, idx0] = np.sort(rmsds)[0]
  162. best_score[idx0, idx1] = np.sort(rmsds)[0]
  163. the_best_idx = list_of_methods[0]
  164. if (fraction_of_non_zero_pixels[list_of_methods[0]] > 0.1):
  165. print("Warning: alignment with the best method failed. The second best method is applied")
  166. the_best_idx = list_of_methods[1]
  167. if (fraction_of_non_zero_pixels[list_of_methods[1]] > 0.1):
  168. print("Warning: alignment with the second best method failed. The 3rd best method is applied")
  169. the_best_idx = list_of_methods[2]
  170. best_method = methods[the_best_idx]
  171. best_tmats[idx0, idx1, :, :]=tmatss[the_best_idx]
  172. best_methods[idx1, idx0]=the_best_idx
  173. best_methods[idx0, idx1]=the_best_idx
  174. best_tmats[idx1, idx0, :, :]=np.linalg.inv(best_tmats[idx0, idx1, :, :])
  175. if(idx0==idx1):
  176. best_method="-,-"
  177. if (silent_mode!=True):
  178. print("{0:d}, {1:d}, {2:4.4f}, {3:4.4f}, {4:4.4f}, {5:4.4f}, {6:s}".format(idx0, idx1, rmsd_rigid_enh, rmsd_rigid, rmsd_affine_enh, rmsd_affine, best_method))
  179. print("{0:d}, {1:d}, {2:4.4f}, {3:4.4f}, {4:4.4f}, {5:4.4f}, {6:s}".format(idx0, idx1, rmsd_rigid_enh, rmsd_rigid,
  180. rmsd_affine_enh, rmsd_affine, best_method), file=f)
  181. #print(" " + str(idx0) + "-" + str(idx1) + " " + str(rmsd_rigid_enh) + " " + str(rmsd_rigid) + " " + str(rmsd_affine_enh) + " " + str(rmsd_affine))
  182. # plt.imshow(image_difference)
  183. #plt.savefig(save_plots_path + "StackRegVisualInspection/" + file_title + "_d_reference_m_corrected.png")
  184. #print(str(idx0) + '-' + str(idx1))
  185. # save all the transformation matrices
  186. if not os.path.exists(path4results+'StackReg'):
  187. os.makedirs(path4results+'StackReg')
  188. #print(best_tmats)
  189. np.save(path4results+'StackReg/' + str(animal) + "_best_tmats", best_tmats)
  190. np.save(path4results+'StackReg/' + str(animal) + "_best_methods", best_methods)
  191. np.save(path4results+'StackReg/' + str(animal) + "_best_score", best_score)
  192. # if not os.path.exists('Q:/Personal/Mattia/Calcium Imaging/results/StackRegEnhImage'):
  193. # os.makedirs('Q:/Personal/Mattia/Calcium Imaging/results/StackRegEnhImage')
  194. # np.save('Q:/Personal/Mattia/Calcium Imaging/results/StackRegEnhImage/' + str(animal), tmats)
  195. # if not os.path.exists(save_plots_path+ 'StackRegAffine'):
  196. # os.makedirs(save_plots_path + 'StackRegAffine')
  197. # np.save(save_plots_path+ 'StackRegAffine/' + str(animal), tmats)
  198. f.close()
  199. '''
  200. # ### Install package for image comparison (similarity index)
  201. # In[79]:
  202. #get_ipython().system('conda install -c conda-forge imagehash --yes')
  203. #!pip install ImageHash # as alternative if anaconda is not installed
  204. # In[33]:
  205. from PIL import Image
  206. import imagehash
  207. for animal in animals:
  208. print("Animal #", str(animal))
  209. meta_animal = meta_data[meta_data['Mouse'] == animal]
  210. recordings = meta_animal['Number']
  211. index_similarity = np.zeros((np.shape(recordings)[0], np.shape(recordings)[0]))
  212. cut = 100
  213. images_mean = np.zeros((512, 512, np.shape(recordings)[0]))
  214. images_mean_enh = np.zeros((512, 512, np.shape(recordings)[0]))
  215. # load all (enhanced) images
  216. for idx, recording in enumerate(recordings):
  217. options = np.load(meta_data['Folder'][recording] +
  218. meta_data['Subfolder'][recording] +
  219. str(int(meta_data['Recording idx'][recording])) +
  220. '/suite2p/plane0/ops.npy',
  221. allow_pickle=True)
  222. # mean image or mean enhanced image
  223. images_mean[:, :, idx] = options.item(0)['meanImg']
  224. images_mean_enh[:, :, idx] = options.item(0)['meanImgE']
  225. for idx0 in range(np.shape(images_mean)[2]):
  226. for idx1 in range(idx0, np.shape(images_mean)[2]):
  227. hash1=imagehash.average_hash(Image.fromarray(images_mean[cut:-cut, cut:-cut, idx0]))
  228. otherhash=imagehash.average_hash(Image.fromarray(images_mean[cut:-cut, cut:-cut, idx1]))
  229. index_similarity[idx0,idx1] = (hash1 - otherhash)
  230. index_similarity[idx1,idx0] = (hash1 - otherhash)
  231. #index_similarity = (np.max(index_similarity)-index_similarity)/np.max(index_similarity)
  232. #print(index_similarity)
  233. best_tmats = np.load(path4results+'StackReg/' + str(animal) + "_best_tmats.npy")
  234. best_score = np.load(path4results+'StackReg/' + str(animal) + "_best_score.npy")
  235. metric_best_tmats = np.abs(best_tmats)
  236. metric_best_tmats = metric_best_tmats.sum(axis=(2,3))
  237. metric_best_tmats = np.max(metric_best_tmats) - metric_best_tmats
  238. metric_best_score = (np.max(best_score)-best_score)/np.max(best_score)
  239. #plt.xticks(np.arange(0, np.shape(images_mean)[2], 1));
  240. #plt.yticks(np.arange(0, np.shape(images_mean)[2], 1));
  241. fig, ax = plt.subplots(1,3, figsize=(21, 7))
  242. #cmap = colors.ListedColormap(['#FDE725FF', '#556667FF','#238A8DFF', '#404788FF', '#00000000'])
  243. #boundaries = [0, 5, 10, 20, 30,100]
  244. #norm = colors.BoundaryNorm(boundaries, cmap.N, clip=True)
  245. index_similarity[index_similarity > 30] = 30
  246. index_similarity = 30 -index_similarity
  247. image = ax[0].imshow(index_similarity,cmap='viridis') #,cmap=cmap, norm=norm
  248. #cbar = fig.colorbar(image, ax=ax[0], orientation='horizontal', fraction=.1, shrink = 0.5)
  249. #cbar.ax.set_xticklabels(['High','Medium','Low','Very Low'], fontsize=15)
  250. #minorticks = image.norm(np.arange(0, 30, 5))
  251. #cbar.ax.yaxis.set_ticks(minorticks, minor=True)
  252. ax[0].set_title("Similarity index", fontsize = 15)
  253. ax[1].imshow(metric_best_tmats)
  254. ax[1].set_title("Displacement index", fontsize = 15)
  255. ax[2].imshow(metric_best_score)
  256. ax[2].set_title("Distance function", fontsize = 15)
  257. fig.suptitle('Animal #' + str(animal), fontsize = 20)
  258. plt.savefig("./FOV_Validation_" + str(animal) + ".png")
  259. # ### Plot all corrected FOV's for comparison (optional)
  260. #
  261. # **The running takes considerable amount of time!**
  262. # In[23]:
  263. '''
  264. for animal in animals:
  265. tmats_loaded = np.load(path4results + 'StackReg/' + str(animal) + "_best_tmats" + '.npy')
  266. meta_animal = meta_data[meta_data['Mouse'] == animal]
  267. recordings = meta_animal['Number']
  268. images = np.zeros((512, 512, np.shape(meta_animal)[0]))
  269. images_mean = np.zeros((512, 512, np.shape(recordings)[0]))
  270. images_mean_enh = np.zeros((512, 512, np.shape(recordings)[0]))
  271. # load all (enhanced) images
  272. for idx, recording in enumerate(recordings):
  273. options = np.load(meta_data['Folder'][recording] +
  274. meta_data['Subfolder'][recording] +
  275. str(int(meta_data['Recording idx'][recording])) +
  276. '/suite2p/plane0/ops.npy',
  277. allow_pickle=True)
  278. # mean image or mean enhanced image
  279. images_mean[:, :, idx] = options.item(0)['meanImg']
  280. images_mean_enh[:, :, idx] = options.item(0)['meanImgE']
  281. #cut_boarders=50
  282. #quality check
  283. #images_quality_check[:, :, idx] = options.item(0)['meanImg']
  284. # loop through every pair and compute the transformation matrix
  285. conditions = [meta_data['Condition'][recording] for recording in recordings]
  286. recording_idx = [meta_data['Recording idx'][recording] for recording in recordings]
  287. for idx0 in range(np.shape(images_mean)[2]):
  288. #if (idx0!=14):
  289. #continue
  290. for idx1 in range(idx0, np.shape(images_mean)[2]):
  291. #if (idx1!=16):
  292. #continue
  293. reference_image = images_mean_enh[:, :, idx0]
  294. initial_image = images_mean_enh[:, :, idx1]
  295. if not os.path.exists(save_plots_path + 'StackRegVisualInspection/'):
  296. os.makedirs(save_plots_path + 'StackRegVisualInspection/')
  297. if not os.path.exists(save_plots_path + 'StackRegVisualInspection/' + str(animal) + '/'):
  298. os.makedirs(save_plots_path + 'StackRegVisualInspection/' + str(animal) + '/')
  299. plt.imshow(reference_image)
  300. # image_title = meta_data['Subfolder'][recording][:-1] + str(meta_data['Recording idx'][recording]) + "\n" + "_condition_" + \
  301. # meta_data['Condition'][recording]
  302. # plt.title(image_title)
  303. #file_title = meta_data['Subfolder'][recording][:-1] + str(
  304. # meta_data['Recording idx'][recording]) + "_condition_" + \
  305. # meta_data['Condition'][recording] + "_" + str(idx0) + "_" + str(idx1)
  306. file_title = str(str(idx0) + '_' + str(idx1) + '_' + conditions[idx0]) + '_' + str(recording_idx[idx0]) + '_' + str(conditions[idx1]) + "_" + str(recording_idx[idx1])
  307. print(file_title)
  308. plt.savefig(save_plots_path + "StackRegVisualInspection/" + str(animal) + '/' + file_title + "_b_reference.png")
  309. #sx = ndimage.sobel(images[:, :, idx1], axis=0, mode='constant')
  310. #sy = ndimage.sobel(images[:, :, idx1], axis=1, mode='constant')
  311. #sob = np.hypot(sx, sy)
  312. #plt.imshow(sob)
  313. plt.imshow(initial_image)
  314. plt.savefig(save_plots_path + "StackRegVisualInspection/" + str(animal) + '/' + file_title + "_a_initial.png")
  315. #grad = np.gradient(images[:, :, idx1])
  316. #print(images[50:55, 50:55, idx1].shape)
  317. #grad = np.gradient(images[50:55, 50:55, idx1])
  318. #print(images[50:55, 50:55, idx1])
  319. #print(" ")
  320. #print(grad)
  321. #image_inversed = sr.transform(reference_image, best_tmats[idx1, idx0, :, :])
  322. #plt.imshow(image_inversed)
  323. #plt.savefig(save_plots_path + "StackRegVisualInspection/" + str(animal) + '/' + file_title + "_1_inversed.png")
  324. #sx = ndimage.sobel(images[:, :, idx1], axis=0, mode='constant')
  325. #sy = ndimage.sobel(images[:, :, idx1], axis=1, mode='constant')
  326. #sob = np.hypot(sx, sy)
  327. #plt.imshow(images[:, :, idx1])
  328. #plt.savefig(save_plots_path + "StackRegVisualInspection/" + file_title + "_sobel.png")
  329. image_corrected = sr.transform(initial_image, tmats_loaded[idx0, idx1, :, :])
  330. plt.imshow(image_corrected)
  331. plt.savefig(save_plots_path + "StackRegVisualInspection/" + str(animal) + '/' + file_title + "_c_corrected.png")
  332. #image_difference = images_quality_check[:, :, idx0] - sr.transform(images_quality_check[:, :, idx1], best_tmats[idx0, idx1, :, :])
  333. #image_difference = reference_image - image_corrected
  334. #plt.imshow(image_difference)
  335. #plt.savefig(save_plots_path + "StackRegVisualInspection/" + str(animal) + '/' + file_title + "_d_reference_m_corrected.png")
  336. # In[9]:
  337. ###TODO Seaborn Scatterplot heatmap could be a nice option for plotting
  338. # In[ ]:
  339. '''