123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216 |
- import pandas as pd
- import matplotlib.pyplot as plt
- from scipy import signal
- import numpy as np
- import math
- from mpl_toolkits import mplot3d
- import warnings
- def y_invert(data, height):
- coln=data.columns
- for i in range(1, len(coln), 3):
- data[coln[i][0], 'y']=-(data[coln[i][0], 'y']-height)
- return data
- def likelihood(data, threshold):
- coln=data.columns
- for i in range(1, len(coln), 3):
- data.loc[data[coln[i][0], 'likelihood']<threshold, (coln[i][0], 'x')]=math.nan
- data.loc[data[coln[i][0], 'likelihood']<threshold, (coln[i][0], 'y')]=math.nan
- return data
- def prep_dlc(pathway, threshold, height):
- data=pd.read_csv(pathway, index_col=0, delimiter=',', skiprows=0, header=[1,2])
- data=likelihood(data, threshold)
- data=y_invert(data, height)
- return data
- def normalized(v):
- return v/np.sqrt(np.dot(v,v))
- def ortho_proj_vec(v, u_null):
- return v-np.dot(v,u_null)*u_null
- def ortho_proj_array(Varray, u_null):
- return Varray-np.outer(np.dot(Varray, u_null), u_null)
- def filter_butter(data):
- coln=data.columns
- dims=['x', 'y', 'z']
- fs = 2000
- fc = 20 # Cut-off frequency of the filter
- w = fc / (fs / 2) # Normalize the frequency
- b, a = signal.butter(4, w, 'low')
- for i in range(1, len(coln), 3):
- for j in dims:
- idx=list(np.argwhere(np.isnan(data[coln[i][0], j].tolist())))
- for k in idx:
- data[coln[i][0], j][int(k)]=np.nanmean(data[coln[i][0], j])
- data[coln[i][0], j]=signal.filtfilt(b, a, data[coln[i][0], j])
- return data
- def transform(data, zero, x, y):
- coln=data.columns
- dims=['x', 'y', 'z']
- for i in range(1, len(coln), 3):
- for j in dims:
- data[coln[i][0], j]=data[coln[i][0], j]-np.nanmedian(data[zero, j])
- v1=(np.nanmedian(data[x, 'x']), np.nanmedian(data[x, 'y']), np.nanmedian(data[x, 'z']))
- v2=(np.nanmedian(data[y, 'x']), np.nanmedian(data[y, 'y']), np.nanmedian(data[y, 'z']))
- v1_null=normalized(v1)
- v2_null=normalized(ortho_proj_vec(v2,v1_null))
- v3=(((v1[1]*v2[2])-(v1[2]*v2[1])),(v1[2]*v2[0])-(v1[0]*v2[2]) ,(v1[0]*v2[1])-(v1[1]*v2[0]))
- v3_null=-normalized(v3)
- M=np.array([[v1_null[0], v1_null[1], v1_null[2]], [v2_null[0], v2_null[1], v2_null[2]], [v3_null[0], v3_null[1], v3_null[2]]])
- for i in range(1, len(coln), 3):
- vec=[data[coln[i][0], 'x'], data[coln[i][0], 'y'], data[coln[i][0], 'z']]
- result=M.dot(vec)
- data[coln[i][0], 'x'], data[coln[i][0], 'y'], data[coln[i][0], 'z']=(result[0]/np.nanmedian(data[x, 'x']))*10, (result[1]/np.nanmedian(data[y, 'y']))*10, result[2]
- return filter_butter(data)
- def distance_single(data, bp1, bp2, index):
- if(math.isnan(data[bp1, 'x'][index])==False) and (math.isnan(data[bp1, 'y'][index])==False) and (math.isnan(data[bp1, 'z'][index])==False) and (math.isnan(data[bp2, 'x'][index])==False) and (math.isnan(data[bp2, 'y'][index])==False) and (math.isnan(data[bp2, 'z'][index])==False):
- return math.sqrt((data[bp2, 'x'][index]-data[bp1, 'x'][index])**2+(data[bp2, 'y'][index]-data[bp1, 'y'][index])**2+(data[bp2, 'z'][index]-data[bp1, 'z'][index])**2)
- else:
- return np.nan
-
- def distance_all(data, bp1, bp2):
- res=[]
- for i in range(0, len(data)):
- if(math.isnan(data[bp1, 'x'][i])==False) and (math.isnan(data[bp1, 'y'][i])==False) and (math.isnan(data[bp1, 'z'][i])==False) and (math.isnan(data[bp2, 'x'][i])==False) and (math.isnan(data[bp2, 'y'][i])==False) and (math.isnan(data[bp2, 'z'][i])==False):
- res.append(math.sqrt((data[bp2, 'x'][i]-data[bp1, 'x'][i])**2+(data[bp2, 'y'][i]-data[bp1, 'y'][i])**2+(data[bp2, 'z'][i]-data[bp1, 'z'][i])**2))
- else:
- res.append(np.nan)
- return res
- def distance_all_2D(data, bp1, bp2):
- res=[]
- for i in range(0, len(data)):
- if(math.isnan(data[bp1, 'x'][i])==False) and (math.isnan(data[bp1, 'y'][i])==False) and (math.isnan(data[bp2, 'x'][i])==False) and (math.isnan(data[bp2, 'y'][i])==False):
- res.append(math.sqrt((data[bp2, 'x'][i]-data[bp1, 'x'][i])**2+(data[bp2, 'y'][i]-data[bp1, 'y'][i])**2))
- else:
- res.append(np.nan)
- return res
- def angle_all(data, ray1, vertex, ray2):
- res=[]
- for i in range(0, len(data)):
- if(math.isnan(data[ray1, 'x'][i])==False) and (math.isnan(data[ray1, 'y'][i])==False) and (math.isnan(data[ray1, 'z'][i])==False) and (math.isnan(data[ray2, 'x'][i])==False) and (math.isnan(data[ray2, 'y'][i])==False) and (math.isnan(data[ray2, 'z'][i])==False) and (math.isnan(data[vertex, 'x'][i])==False) and (math.isnan(data[vertex, 'y'][i])==False) and (math.isnan(data[vertex, 'z'][i])==False):
- res.append((180*(np.arccos(((data[ray1, 'x'][i]-data[vertex, 'x'][i])*(data[ray2, 'x'][i]-data[vertex, 'x'][i])+(data[ray1, 'y'][i]-data[vertex, 'y'][i])*(data[ray2, 'y'][i]-data[vertex, 'y'][i])+(data[ray1, 'z'][i]-data[vertex, 'z'][i])*(data[ray2, 'z'][i]-data[vertex, 'z'][i]))/(math.sqrt((data[ray1, 'x'][i]-data[vertex, 'x'][i])**2+(data[ray1, 'y'][i]-data[vertex, 'y'][i])**2+(data[ray1, 'z'][i]-data[vertex, 'z'][i])**2)*math.sqrt((data[ray2, 'x'][i]-data[vertex, 'x'][i])**2+(data[ray2, 'y'][i]-data[vertex, 'y'][i])**2+(data[ray2, 'z'][i]-data[vertex, 'z'][i])**2)))))/math.pi)
- else:
- res.append(np.nan)
- return res
- def angle_all_2D(data, ray1, vertex, ray2):
- res=[]
- for i in range(0, len(data)):
- if(math.isnan(data[ray1, 'x'][i])==False) and (math.isnan(data[ray1, 'y'][i])==False) and (math.isnan(data[ray2, 'x'][i])==False) and (math.isnan(data[ray2, 'y'][i])==False) and (math.isnan(data[vertex, 'x'][i])==False) and (math.isnan(data[vertex, 'y'][i])==False):
- res.append((180*(np.arccos(((data[ray1, 'x'][i]-data[vertex, 'x'][i])*(data[ray2, 'x'][i]-data[vertex, 'x'][i])+(data[ray1, 'y'][i]-data[vertex, 'y'][i])*(data[ray2, 'y'][i]-data[vertex, 'y'][i]))/(math.sqrt((data[ray1, 'x'][i]-data[vertex, 'x'][i])**2+(data[ray1, 'y'][i]-data[vertex, 'y'][i])**2)*math.sqrt((data[ray2, 'x'][i]-data[vertex, 'x'][i])**2+(data[ray2, 'y'][i]-data[vertex, 'y'][i])**2)))))/math.pi)
- else:
- res.append(np.nan)
- return res
- def angle(data, ray1, vertex, ray2, index):
- if(math.isnan(data[ray1, 'x'][index])==False) and (math.isnan(data[ray1, 'y'][index])==False) and (math.isnan(data[ray1, 'z'][index])==False) and (math.isnan(data[ray2, 'x'][index])==False) and (math.isnan(data[ray2, 'y'][index])==False) and (math.isnan(data[ray2, 'z'][index])==False) and (math.isnan(data[vertex, 'x'][index])==False) and (math.isnan(data[vertex, 'y'][index])==False) and (math.isnan(data[vertex, 'z'][index])==False):
- return (180*(np.arccos(((data[ray1, 'x'][index]-data[vertex, 'x'][index])*(data[ray2, 'x'][index]-data[vertex, 'x'][index])+(data[ray1, 'y'][index]-data[vertex, 'y'][index])*(data[ray2, 'y'][index]-data[vertex, 'y'][index])+(data[ray1, 'z'][index]-data[vertex, 'z'][index])*(data[ray2, 'z'][index]-data[vertex, 'z'][index]))/(math.sqrt((data[ray1, 'x'][index]-data[vertex, 'x'][index])**2+(data[ray1, 'y'][index]-data[vertex, 'y'][index])**2+(data[ray1, 'z'][index]-data[vertex, 'z'][index])**2)*math.sqrt((data[ray2, 'x'][index]-data[vertex, 'x'][index])**2+(data[ray2, 'y'][index]-data[vertex, 'y'][index])**2+(data[ray2, 'z'][index]-data[vertex, 'z'][index])**2)))))/math.pi
- else:
- return np.nan
- def circle_dis(data, bp, index):
- x1=np.nanmedian(data['cylinder_left', 'x'])
- y1=np.nanmedian(data['cylinder_left', 'y'])
- x2=np.nanmedian(data['cylinder_right', 'x'])
- y2=np.nanmedian(data['cylinder_right', 'y'])
- x3=np.nanmedian(data['cylinder_top', 'x'])
- y3=np.nanmedian(data['cylinder_top', 'y'])
- x4=np.nanmedian(data['cylinder_bottom', 'x'])
- y4=np.nanmedian(data['cylinder_bottom', 'y'])
- x=(((y4-((y4-y3)/(x4-x3))*x4)-(y2-((y2-y1)/(x2-x1))*x2))/(((y2-y1)/(x2-x1))-((y4-y3)/(x4-x3))))
- y=((y2-y1)/(x2-x1))*x+(y2-((y2-y1)/(x2-x1))*x2)
- rad=(math.sqrt((x1-x)**2+(y1-y)**2)+math.sqrt((x2-x)**2+(y2-y)**2)+math.sqrt((x3-x)**2+(y3-y)**2)+math.sqrt((x4-x)**2+(y4-y)**2))/4
- return math.sqrt((data[bp, 'x'][index]-x)**2+(data[bp, 'y'][index]-y)**2)/rad
-
-
- def vel_acc(data, dim):
- coln=data.columns
- if dim=='3D':
- vel_arr=np.sqrt(np.diff(data[coln[1][0], 'x'])**2+np.diff(data[coln[1][0], 'y'])**2+np.diff(data[coln[1][0], 'z'])**2)
- for i in range(4, len(coln), 3):
- vel_arr=np.c_[vel_arr, np.sqrt(np.diff(data[coln[i][0], 'x'])**2+np.diff(data[coln[i][0], 'y'])**2+np.diff(data[coln[i][0], 'z'])**2)]
- acc_arr=np.diff(vel_arr[:,0])
- for i in range(1, vel_arr.shape[1]):
- acc_arr=np.c_[acc_arr, np.diff(vel_arr[:, i])]
- elif dim=='2D':
- vel_arr=np.sqrt(np.diff(data[coln[1][0], 'x'])**2+np.diff(data[coln[1][0], 'y'])**2)
- for i in range(4, len(coln), 3):
- vel_arr=np.c_[vel_arr, np.sqrt(np.diff(data[coln[i][0], 'x'])**2+np.diff(data[coln[i][0], 'y'])**2)]
- acc_arr=np.diff(vel_arr[:,0])
- for i in range(1, vel_arr.shape[1]):
- acc_arr=np.c_[acc_arr, np.diff(vel_arr[:, i])]
- return np.r_[vel_arr, [[0]*vel_arr.shape[1]]], np.r_[acc_arr, [[0]*acc_arr.shape[1], [0]*acc_arr.shape[1]]]
- def max_step(data):
- step_dis=distance_all(data, 'forepaw_right', 'hindpaw_right')
- fs = 2000
- fc = 20 # Cut-off frequency of the filter
- w = fc / (fs / 2) # Normalize the frequency
- b, a = signal.butter(4, w, 'low')
- step_dis=signal.filtfilt(b, a, step_dis)
- step_number=0
- step_reset=0
- for i in range(0, len(step_dis)-2):
- if (step_number<=120) and (step_reset==0) and (step_dis[i]>step_dis[i+1]) and (step_dis[i]>step_dis[i+2]):
- step_number+=1
- step_reset=1
- index_last_step=i
- if (step_reset==1) and (step_dis[i]<step_dis[i+1]) and (step_dis[i]<step_dis[i+2]):
- step_reset=0
- return step_number, index_last_step
- def _validate_vector(u, dtype=None):
- u = np.asarray(u, dtype=dtype, order='c')
- if u.ndim == 1:
- return u
- u = np.atleast_1d(u.squeeze())
- return
- def _validate_weights(w, dtype=np.double):
- w = _validate_vector(w, dtype=dtype)
- return w
- def _nbool_correspond_all(u, v, w=None):
- if u.dtype == v.dtype == bool and w is None:
- not_u = ~u
- not_v = ~v
- nff = (not_u & not_v).sum()
- nft = (not_u & v).sum()
- ntf = (u & not_v).sum()
- ntt = (u & v).sum()
- else:
- dtype = np.find_common_type([int], [u.dtype, v.dtype])
- u = u.astype(dtype)
- v = v.astype(dtype)
- not_u = 1.0 - u
- not_v = 1.0 - v
- if w is not None:
- not_u = w * not_u
- u = w * u
- nff = (not_u * not_v).sum()
- nft = (not_u * v).sum()
- ntf = (u * not_v).sum()
- ntt = (u * v).sum()
- return (nff, nft, ntf, ntt)
- def jaccard(u, v, w=None):
- u = _validate_vector(u)
- v = _validate_vector(v)
- if w is not None:
- w = _validate_weights(w)
- (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
- return float(ntt) / float(ntt + ntf + nft)
- def mask(array1, array2):
- comb=array1+array2
- comb[comb==2]=1
- comb[comb==0]=0.5
- return comb
|