master_funcs.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. import pandas as pd
  2. import matplotlib.pyplot as plt
  3. from scipy import signal
  4. import numpy as np
  5. import math
  6. from mpl_toolkits import mplot3d
  7. import warnings
  8. def y_invert(data, height):
  9. coln=data.columns
  10. for i in range(1, len(coln), 3):
  11. data[coln[i][0], 'y']=-(data[coln[i][0], 'y']-height)
  12. return data
  13. def likelihood(data, threshold):
  14. coln=data.columns
  15. for i in range(1, len(coln), 3):
  16. data.loc[data[coln[i][0], 'likelihood']<threshold, (coln[i][0], 'x')]=math.nan
  17. data.loc[data[coln[i][0], 'likelihood']<threshold, (coln[i][0], 'y')]=math.nan
  18. return data
  19. def prep_dlc(pathway, threshold, height):
  20. data=pd.read_csv(pathway, index_col=0, delimiter=',', skiprows=0, header=[1,2])
  21. data=likelihood(data, threshold)
  22. data=y_invert(data, height)
  23. return data
  24. def normalized(v):
  25. return v/np.sqrt(np.dot(v,v))
  26. def ortho_proj_vec(v, u_null):
  27. return v-np.dot(v,u_null)*u_null
  28. def ortho_proj_array(Varray, u_null):
  29. return Varray-np.outer(np.dot(Varray, u_null), u_null)
  30. def filter_butter(data):
  31. coln=data.columns
  32. dims=['x', 'y', 'z']
  33. fs = 2000
  34. fc = 20 # Cut-off frequency of the filter
  35. w = fc / (fs / 2) # Normalize the frequency
  36. b, a = signal.butter(4, w, 'low')
  37. for i in range(1, len(coln), 3):
  38. for j in dims:
  39. idx=list(np.argwhere(np.isnan(data[coln[i][0], j].tolist())))
  40. for k in idx:
  41. data[coln[i][0], j][int(k)]=np.nanmean(data[coln[i][0], j])
  42. data[coln[i][0], j]=signal.filtfilt(b, a, data[coln[i][0], j])
  43. return data
  44. def transform(data, zero, x, y):
  45. coln=data.columns
  46. dims=['x', 'y', 'z']
  47. for i in range(1, len(coln), 3):
  48. for j in dims:
  49. data[coln[i][0], j]=data[coln[i][0], j]-np.nanmedian(data[zero, j])
  50. v1=(np.nanmedian(data[x, 'x']), np.nanmedian(data[x, 'y']), np.nanmedian(data[x, 'z']))
  51. v2=(np.nanmedian(data[y, 'x']), np.nanmedian(data[y, 'y']), np.nanmedian(data[y, 'z']))
  52. v1_null=normalized(v1)
  53. v2_null=normalized(ortho_proj_vec(v2,v1_null))
  54. v3=(((v1[1]*v2[2])-(v1[2]*v2[1])),(v1[2]*v2[0])-(v1[0]*v2[2]) ,(v1[0]*v2[1])-(v1[1]*v2[0]))
  55. v3_null=-normalized(v3)
  56. M=np.array([[v1_null[0], v1_null[1], v1_null[2]], [v2_null[0], v2_null[1], v2_null[2]], [v3_null[0], v3_null[1], v3_null[2]]])
  57. for i in range(1, len(coln), 3):
  58. vec=[data[coln[i][0], 'x'], data[coln[i][0], 'y'], data[coln[i][0], 'z']]
  59. result=M.dot(vec)
  60. data[coln[i][0], 'x'], data[coln[i][0], 'y'], data[coln[i][0], 'z']=(result[0]/np.nanmedian(data[x, 'x']))*10, (result[1]/np.nanmedian(data[y, 'y']))*10, result[2]
  61. return filter_butter(data)
  62. def distance_single(data, bp1, bp2, index):
  63. if(math.isnan(data[bp1, 'x'][index])==False) and (math.isnan(data[bp1, 'y'][index])==False) and (math.isnan(data[bp1, 'z'][index])==False) and (math.isnan(data[bp2, 'x'][index])==False) and (math.isnan(data[bp2, 'y'][index])==False) and (math.isnan(data[bp2, 'z'][index])==False):
  64. return math.sqrt((data[bp2, 'x'][index]-data[bp1, 'x'][index])**2+(data[bp2, 'y'][index]-data[bp1, 'y'][index])**2+(data[bp2, 'z'][index]-data[bp1, 'z'][index])**2)
  65. else:
  66. return np.nan
  67. def distance_all(data, bp1, bp2):
  68. res=[]
  69. for i in range(0, len(data)):
  70. if(math.isnan(data[bp1, 'x'][i])==False) and (math.isnan(data[bp1, 'y'][i])==False) and (math.isnan(data[bp1, 'z'][i])==False) and (math.isnan(data[bp2, 'x'][i])==False) and (math.isnan(data[bp2, 'y'][i])==False) and (math.isnan(data[bp2, 'z'][i])==False):
  71. res.append(math.sqrt((data[bp2, 'x'][i]-data[bp1, 'x'][i])**2+(data[bp2, 'y'][i]-data[bp1, 'y'][i])**2+(data[bp2, 'z'][i]-data[bp1, 'z'][i])**2))
  72. else:
  73. res.append(np.nan)
  74. return res
  75. def distance_all_2D(data, bp1, bp2):
  76. res=[]
  77. for i in range(0, len(data)):
  78. if(math.isnan(data[bp1, 'x'][i])==False) and (math.isnan(data[bp1, 'y'][i])==False) and (math.isnan(data[bp2, 'x'][i])==False) and (math.isnan(data[bp2, 'y'][i])==False):
  79. res.append(math.sqrt((data[bp2, 'x'][i]-data[bp1, 'x'][i])**2+(data[bp2, 'y'][i]-data[bp1, 'y'][i])**2))
  80. else:
  81. res.append(np.nan)
  82. return res
  83. def angle_all(data, ray1, vertex, ray2):
  84. res=[]
  85. for i in range(0, len(data)):
  86. if(math.isnan(data[ray1, 'x'][i])==False) and (math.isnan(data[ray1, 'y'][i])==False) and (math.isnan(data[ray1, 'z'][i])==False) and (math.isnan(data[ray2, 'x'][i])==False) and (math.isnan(data[ray2, 'y'][i])==False) and (math.isnan(data[ray2, 'z'][i])==False) and (math.isnan(data[vertex, 'x'][i])==False) and (math.isnan(data[vertex, 'y'][i])==False) and (math.isnan(data[vertex, 'z'][i])==False):
  87. res.append((180*(np.arccos(((data[ray1, 'x'][i]-data[vertex, 'x'][i])*(data[ray2, 'x'][i]-data[vertex, 'x'][i])+(data[ray1, 'y'][i]-data[vertex, 'y'][i])*(data[ray2, 'y'][i]-data[vertex, 'y'][i])+(data[ray1, 'z'][i]-data[vertex, 'z'][i])*(data[ray2, 'z'][i]-data[vertex, 'z'][i]))/(math.sqrt((data[ray1, 'x'][i]-data[vertex, 'x'][i])**2+(data[ray1, 'y'][i]-data[vertex, 'y'][i])**2+(data[ray1, 'z'][i]-data[vertex, 'z'][i])**2)*math.sqrt((data[ray2, 'x'][i]-data[vertex, 'x'][i])**2+(data[ray2, 'y'][i]-data[vertex, 'y'][i])**2+(data[ray2, 'z'][i]-data[vertex, 'z'][i])**2)))))/math.pi)
  88. else:
  89. res.append(np.nan)
  90. return res
  91. def angle_all_2D(data, ray1, vertex, ray2):
  92. res=[]
  93. for i in range(0, len(data)):
  94. if(math.isnan(data[ray1, 'x'][i])==False) and (math.isnan(data[ray1, 'y'][i])==False) and (math.isnan(data[ray2, 'x'][i])==False) and (math.isnan(data[ray2, 'y'][i])==False) and (math.isnan(data[vertex, 'x'][i])==False) and (math.isnan(data[vertex, 'y'][i])==False):
  95. res.append((180*(np.arccos(((data[ray1, 'x'][i]-data[vertex, 'x'][i])*(data[ray2, 'x'][i]-data[vertex, 'x'][i])+(data[ray1, 'y'][i]-data[vertex, 'y'][i])*(data[ray2, 'y'][i]-data[vertex, 'y'][i]))/(math.sqrt((data[ray1, 'x'][i]-data[vertex, 'x'][i])**2+(data[ray1, 'y'][i]-data[vertex, 'y'][i])**2)*math.sqrt((data[ray2, 'x'][i]-data[vertex, 'x'][i])**2+(data[ray2, 'y'][i]-data[vertex, 'y'][i])**2)))))/math.pi)
  96. else:
  97. res.append(np.nan)
  98. return res
  99. def angle(data, ray1, vertex, ray2, index):
  100. if(math.isnan(data[ray1, 'x'][index])==False) and (math.isnan(data[ray1, 'y'][index])==False) and (math.isnan(data[ray1, 'z'][index])==False) and (math.isnan(data[ray2, 'x'][index])==False) and (math.isnan(data[ray2, 'y'][index])==False) and (math.isnan(data[ray2, 'z'][index])==False) and (math.isnan(data[vertex, 'x'][index])==False) and (math.isnan(data[vertex, 'y'][index])==False) and (math.isnan(data[vertex, 'z'][index])==False):
  101. return (180*(np.arccos(((data[ray1, 'x'][index]-data[vertex, 'x'][index])*(data[ray2, 'x'][index]-data[vertex, 'x'][index])+(data[ray1, 'y'][index]-data[vertex, 'y'][index])*(data[ray2, 'y'][index]-data[vertex, 'y'][index])+(data[ray1, 'z'][index]-data[vertex, 'z'][index])*(data[ray2, 'z'][index]-data[vertex, 'z'][index]))/(math.sqrt((data[ray1, 'x'][index]-data[vertex, 'x'][index])**2+(data[ray1, 'y'][index]-data[vertex, 'y'][index])**2+(data[ray1, 'z'][index]-data[vertex, 'z'][index])**2)*math.sqrt((data[ray2, 'x'][index]-data[vertex, 'x'][index])**2+(data[ray2, 'y'][index]-data[vertex, 'y'][index])**2+(data[ray2, 'z'][index]-data[vertex, 'z'][index])**2)))))/math.pi
  102. else:
  103. return np.nan
  104. def circle_dis(data, bp, index):
  105. x1=np.nanmedian(data['cylinder_left', 'x'])
  106. y1=np.nanmedian(data['cylinder_left', 'y'])
  107. x2=np.nanmedian(data['cylinder_right', 'x'])
  108. y2=np.nanmedian(data['cylinder_right', 'y'])
  109. x3=np.nanmedian(data['cylinder_top', 'x'])
  110. y3=np.nanmedian(data['cylinder_top', 'y'])
  111. x4=np.nanmedian(data['cylinder_bottom', 'x'])
  112. y4=np.nanmedian(data['cylinder_bottom', 'y'])
  113. x=(((y4-((y4-y3)/(x4-x3))*x4)-(y2-((y2-y1)/(x2-x1))*x2))/(((y2-y1)/(x2-x1))-((y4-y3)/(x4-x3))))
  114. y=((y2-y1)/(x2-x1))*x+(y2-((y2-y1)/(x2-x1))*x2)
  115. rad=(math.sqrt((x1-x)**2+(y1-y)**2)+math.sqrt((x2-x)**2+(y2-y)**2)+math.sqrt((x3-x)**2+(y3-y)**2)+math.sqrt((x4-x)**2+(y4-y)**2))/4
  116. return math.sqrt((data[bp, 'x'][index]-x)**2+(data[bp, 'y'][index]-y)**2)/rad
  117. def vel_acc(data, dim):
  118. coln=data.columns
  119. if dim=='3D':
  120. vel_arr=np.sqrt(np.diff(data[coln[1][0], 'x'])**2+np.diff(data[coln[1][0], 'y'])**2+np.diff(data[coln[1][0], 'z'])**2)
  121. for i in range(4, len(coln), 3):
  122. vel_arr=np.c_[vel_arr, np.sqrt(np.diff(data[coln[i][0], 'x'])**2+np.diff(data[coln[i][0], 'y'])**2+np.diff(data[coln[i][0], 'z'])**2)]
  123. acc_arr=np.diff(vel_arr[:,0])
  124. for i in range(1, vel_arr.shape[1]):
  125. acc_arr=np.c_[acc_arr, np.diff(vel_arr[:, i])]
  126. elif dim=='2D':
  127. vel_arr=np.sqrt(np.diff(data[coln[1][0], 'x'])**2+np.diff(data[coln[1][0], 'y'])**2)
  128. for i in range(4, len(coln), 3):
  129. vel_arr=np.c_[vel_arr, np.sqrt(np.diff(data[coln[i][0], 'x'])**2+np.diff(data[coln[i][0], 'y'])**2)]
  130. acc_arr=np.diff(vel_arr[:,0])
  131. for i in range(1, vel_arr.shape[1]):
  132. acc_arr=np.c_[acc_arr, np.diff(vel_arr[:, i])]
  133. return np.r_[vel_arr, [[0]*vel_arr.shape[1]]], np.r_[acc_arr, [[0]*acc_arr.shape[1], [0]*acc_arr.shape[1]]]
  134. def max_step(data):
  135. step_dis=distance_all(data, 'forepaw_right', 'hindpaw_right')
  136. fs = 2000
  137. fc = 20 # Cut-off frequency of the filter
  138. w = fc / (fs / 2) # Normalize the frequency
  139. b, a = signal.butter(4, w, 'low')
  140. step_dis=signal.filtfilt(b, a, step_dis)
  141. step_number=0
  142. step_reset=0
  143. for i in range(0, len(step_dis)-2):
  144. if (step_number<=120) and (step_reset==0) and (step_dis[i]>step_dis[i+1]) and (step_dis[i]>step_dis[i+2]):
  145. step_number+=1
  146. step_reset=1
  147. index_last_step=i
  148. if (step_reset==1) and (step_dis[i]<step_dis[i+1]) and (step_dis[i]<step_dis[i+2]):
  149. step_reset=0
  150. return step_number, index_last_step
  151. def _validate_vector(u, dtype=None):
  152. u = np.asarray(u, dtype=dtype, order='c')
  153. if u.ndim == 1:
  154. return u
  155. u = np.atleast_1d(u.squeeze())
  156. return
  157. def _validate_weights(w, dtype=np.double):
  158. w = _validate_vector(w, dtype=dtype)
  159. return w
  160. def _nbool_correspond_all(u, v, w=None):
  161. if u.dtype == v.dtype == bool and w is None:
  162. not_u = ~u
  163. not_v = ~v
  164. nff = (not_u & not_v).sum()
  165. nft = (not_u & v).sum()
  166. ntf = (u & not_v).sum()
  167. ntt = (u & v).sum()
  168. else:
  169. dtype = np.find_common_type([int], [u.dtype, v.dtype])
  170. u = u.astype(dtype)
  171. v = v.astype(dtype)
  172. not_u = 1.0 - u
  173. not_v = 1.0 - v
  174. if w is not None:
  175. not_u = w * not_u
  176. u = w * u
  177. nff = (not_u * not_v).sum()
  178. nft = (not_u * v).sum()
  179. ntf = (u * not_v).sum()
  180. ntt = (u * v).sum()
  181. return (nff, nft, ntf, ntt)
  182. def jaccard(u, v, w=None):
  183. u = _validate_vector(u)
  184. v = _validate_vector(v)
  185. if w is not None:
  186. w = _validate_weights(w)
  187. (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
  188. return float(ntt) / float(ntt + ntf + nft)
  189. def mask(array1, array2):
  190. comb=array1+array2
  191. comb[comb==2]=1
  192. comb[comb==0]=0.5
  193. return comb