make_movie_BO.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. import argparse
  2. import ipyparallel as ipp
  3. import numpy as np
  4. import os
  5. import cv2 as cv2
  6. import time
  7. from retinatools.library import image_to_cone
  8. #Helper function
  9. def RF_FilterFrame(idx, luminance_frame, r_RF):
  10. """
  11. Helper function to do the parallelized filtering with RFs.
  12. """
  13. from retinatools.library import get_RF
  14. X = np.linspace(-128, 128, 256)
  15. Y = np.linspace(-128, 128, 256)
  16. idx_x = idx%256
  17. idx_y = np.int(np.floor(idx/256))
  18. x, y = np.meshgrid(X,Y)
  19. xoff = X[idx_x]
  20. yoff = Y[idx_y]
  21. RF = get_RF(xoff, yoff, r_RF)
  22. product = np.sum(luminance_frame * RF)
  23. return product
  24. def parallel_prediction(idx_x, RFstrip, fitparameters):
  25. '''
  26. Helper function to do the parallelized processing of the filtered signal;
  27. This transform center/surround signal to activity with standard parameters.
  28. '''
  29. from retinatools.library import get_BO_response
  30. lum_signalS = RFstrip[0, idx_x, :]
  31. lum_signalM = RFstrip[1, idx_x, :]
  32. lum_signalL = RFstrip[2, idx_x, :]
  33. R = get_BO_response(lum_signalS, lum_signalM, lum_signalL, fitparameters)
  34. return R
  35. fitparameters = {'rcenterB': 10.0, 'rcenterY': 14.0,\
  36. 'tau1': 10.2, 'tau2': 86, 'delay': 29, 'beta': 0.0, 'alpha': 4.9, \
  37. 'tau3': 280, 'kL': 0.01, 'kM': 0.051, 'kB': 0.12, 'offset': 0.025, 'dt': 1000/150.}
  38. cell = "178Bon"
  39. savepath = "/jukebox/tank/schottdorf/tmp/"
  40. N_MAXFRAME = 4500 # 4500 to only process first 2.5 min *60sec *30fps in the mpg -> 30sec ret video
  41. def main(profile):
  42. rc = ipp.Client(profile=profile) # ipyparallel status
  43. print(rc.ids)
  44. dview = rc[:] # Set up the engines
  45. dview.use_cloudpickle() # Make sure to fix the closure over luminance_frame
  46. with dview.sync_imports():
  47. import numpy as np
  48. ################################
  49. ##### Process the movie #####
  50. ################################
  51. vidcap = cv2.VideoCapture('/usr/people/ms81/cells/1x10_256.mpg')
  52. data = np.ones((3, 256, 256, N_MAXFRAME)) # for s,m,l X 256px X 256px X N_frames
  53. counter = 0
  54. success = True
  55. t0 = time.time()
  56. while success and (counter < N_MAXFRAME):
  57. success, image = vidcap.read()
  58. scone, mcone, lcone = image_to_cone(image)
  59. for tt in ["S", "M", "L"]:
  60. if tt == "S":
  61. r_RF = fitparameters['rcenterB']
  62. luminance_frame = scone
  63. idx = 0
  64. elif tt == "M":
  65. r_RF = fitparameters['rcenterY']
  66. luminance_frame = mcone
  67. idx = 1
  68. elif tt == "L":
  69. r_RF = fitparameters['rcenterY']
  70. luminance_frame = lcone
  71. idx = 2
  72. rc[:].push(dict( luminance_frame = luminance_frame, RF_FilterFrame = RF_FilterFrame, r_RF = r_RF )) # Seed namespace for the ipython engines.
  73. tmp = rc[:].map_sync( lambda ix: RF_FilterFrame(ix, luminance_frame, r_RF), range(0,256*256) ) # Execute parallel computation on all ipython engines
  74. data[idx,:,:,counter] = np.reshape(tmp, (256,256)) # Reorganize data back to frame
  75. counter += 1
  76. print(counter/N_MAXFRAME)
  77. t1 = time.time()
  78. # filename = "rgc-{0}-raw.npy".format(os.environ["SLURM_JOB_ID"])
  79. # np.save(filename, data)
  80. print("Part 1 DONE; Time needed [in h]:")
  81. print( (t1-t0)/3600 )
  82. ################################
  83. #### Apply temp. process #####
  84. ################################
  85. t1 = time.time()
  86. output_model = np.zeros((256, 256, N_MAXFRAME))
  87. rc[:].push(dict( parallel_prediction = parallel_prediction))
  88. for idx_x in range(0,256):
  89. RFstrip = data[:,idx_x,:,:]
  90. rc[:].push(dict( RFstrip = RFstrip, fitparameters = fitparameters, parallel_prediction = parallel_prediction ))
  91. tmp = rc[:].map_sync(lambda idx_y: parallel_prediction(idx_y, RFstrip, fitparameters), range(0,256))
  92. output_model[idx_x,:,:] = np.reshape(tmp, (256, N_MAXFRAME))
  93. print(idx_x/256)
  94. t2 = time.time()
  95. filename = "-{0}-frames.npy".format(os.environ["SLURM_JOB_ID"])
  96. pp = savepath + cell + filename
  97. np.save(pp, output_model)
  98. print("Part 2 DONE; Time needed [in h]:")
  99. print("Saved: " + pp)
  100. print((t2-t1)/3600)
  101. if __name__ == '__main__':
  102. parser = argparse.ArgumentParser()
  103. parser.add_argument("-p", "--profile", required=True, help="Name of IPython profile to use")
  104. args = parser.parse_args()
  105. main(args.profile)
  106. print("Done and done.")