|
@@ -0,0 +1,605 @@
|
|
|
+# -*- coding: utf-8 -*-
|
|
|
+"""
|
|
|
+Created on Tue Dec 10 15:02:37 2019
|
|
|
+
|
|
|
+@author: Cecilia and Jan
|
|
|
+
|
|
|
+
|
|
|
+The overall workflow is as follows:
|
|
|
+ 1) ITD TWF data files are LFP filtered and written back to disk
|
|
|
+ 2) In Matlab, LFP filtered data files are cleaned using Alain's DSS algorithm
|
|
|
+ 3) Cleaned datafiles are read back in and LFP response amplitudes are computed
|
|
|
+ 4) LFP response amplitudes are read back in and analysed with linear regression.
|
|
|
+
|
|
|
+"""
|
|
|
+
|
|
|
+#%% Step 0: import libraries and set paths
|
|
|
+import os
|
|
|
+from glob import glob
|
|
|
+import csv
|
|
|
+
|
|
|
+def zscore(X):
|
|
|
+ return (X-X.mean())/X.std()
|
|
|
+
|
|
|
+
|
|
|
+blankChans=[8,57,64] # these channels aren't connected ont eh Viventi electodes
|
|
|
+## create a datalog of all recorded filenames
|
|
|
+#PATH = r'Z:\ephys\2\Cecilia_TWF'
|
|
|
+PATH = r'Z:\home\colliculus\ephys\2\Cecilia_TWF'
|
|
|
+if not os.path.exists(PATH):
|
|
|
+ PATH='/ephys/2/Cecilia_TWF'
|
|
|
+if not os.path.exists(PATH):
|
|
|
+ PATH='/twinkle/ephys/2/Cecilia_TWF'
|
|
|
+if not os.path.exists(PATH):
|
|
|
+ PATH='/home/colliculus/ephys/2/Cecilia_TWF'
|
|
|
+EXT = "*.csv"
|
|
|
+
|
|
|
+
|
|
|
+import statsmodels.api as sm
|
|
|
+import os
|
|
|
+import sys
|
|
|
+
|
|
|
+# codeDir='//twinkle.bms.cityu.edu.hk/colliculus/behaviourBoxes/software/ratCageProgramsV2' #directory at ephys station
|
|
|
+codeDir='/home/colliculus/behaviourBoxes/software/ratCageProgramsV2' #directory at ephys station
|
|
|
+
|
|
|
+if not os.path.exists(codeDir):
|
|
|
+ #codeDir='d:/jan/behavbox/ratCageProgramsV2' #directory at ephys station
|
|
|
+ codeDir = 'Z:/behaviourBoxes/software/ratCageProgramsV2' ## directory at CC's desktop
|
|
|
+
|
|
|
+if not os.path.exists(codeDir):
|
|
|
+ codeDir = 'Z:/home/colliculus/behaviourBoxes/software/ratCageProgramsV2' ## directory at CC's laptop
|
|
|
+
|
|
|
+if not os.path.exists(codeDir):
|
|
|
+ raise BaseException('No code directory')
|
|
|
+
|
|
|
+sys.path.append(codeDir)
|
|
|
+
|
|
|
+import numpy as np
|
|
|
+import pandas as pd
|
|
|
+from matplotlib import pyplot as plt
|
|
|
+import RZ2ephys as ep
|
|
|
+import ntpath
|
|
|
+import copy
|
|
|
+#% Pre-step 1: read in tables listing relevant datafiles provided by Cecilia. Put into variable "filepaths"
|
|
|
+filepaths=[];
|
|
|
+#analysisPath=PATH+'/Analysis/Analysis_AC_ECoG_TWF_ITD_SparseData'
|
|
|
+# cond='ITD'
|
|
|
+#cond='ILD'
|
|
|
+cond='trained_ITD'
|
|
|
+# cond='trained_ILD'
|
|
|
+analysisPath=PATH+'/Analysis/Analysis_AC_ECoG_TWF_'+cond+'_SparseData'
|
|
|
+savePath = r'Z:\ephys\2\Cecilia_TWF\Analysis\Analysis_AC_ECoG_TWF_'+cond+'_SparseData\swp_stim_allData'
|
|
|
+if not os.path.exists(savePath):
|
|
|
+ savePath = '/ephys/2/Cecilia_TWF/Analysis/Analysis_AC_ECoG_TWF_'+cond+'_SparseData/swp_stim_allData'
|
|
|
+if not os.path.exists(savePath):
|
|
|
+ savePath = '/home/colliculus/ephys/2/Cecilia_TWF/Analysis/Analysis_AC_ECoG_TWF_'+cond+'_SparseData/swp_stim_allData'
|
|
|
+#with open(analysisPath+'/AC_ECoG_TWF_ITD_SparseData_filesToAnalyse.csv', newline='') as myFile:
|
|
|
+with open(analysisPath+'/AC_ECoG_TWF_'+cond+'_SparseData_filesToAnalyse.csv', newline='') as myFile:
|
|
|
+#with open('Z:/ephys/2/Cecilia_TWF/Analysis/Analysis_AC_ECoG_TWF_trained_ITD_SparseData/AC_ECoG_TWF_trained_ITD_SparseData_datalog.csv', newline='') as myFile:
|
|
|
+ reader = csv.reader(myFile)
|
|
|
+ for row in reader:
|
|
|
+ filepaths.append(row[0])
|
|
|
+
|
|
|
+#filePath=filepaths[2]
|
|
|
+
|
|
|
+#%% Step 1: Loop through data files, calculate LFP and write back to disk
|
|
|
+
|
|
|
+for filePath in filepaths:
|
|
|
+#%
|
|
|
+ filename = os.path.splitext(filePath)[0]
|
|
|
+ fname = ntpath.basename(filename)
|
|
|
+ LFPfilePath, LFPfileName = os.path.split(filePath)
|
|
|
+ LFPfileName='LFP_'+LFPfileName[:-4]
|
|
|
+
|
|
|
+ print("=== === ===")
|
|
|
+ print('Processing data from ' + fname)
|
|
|
+# swp, stim = ep.readEphysFile((PATH+'/'+filePath)[:-4])
|
|
|
+ swp, stim = ep.readEphysFile(filePath[:-4])
|
|
|
+ print('Reading in data done!')
|
|
|
+
|
|
|
+ print('Start to match swp and stim.')
|
|
|
+ #stim=stim.loc[:, ~stim.columns.str.contains('timeStamp')]
|
|
|
+ # get rid of timeStamps. We won't need them and they are all unique
|
|
|
+
|
|
|
+ #% there is likely to be a pre-sweep. For analysis purposes we throw that out
|
|
|
+ if len(swp)-stim.shape[0] == 1:
|
|
|
+ swp=swp[1:]
|
|
|
+ print('Matching done!')
|
|
|
+ else:
|
|
|
+ print('Warning: unexpected that number of sweeps does not equal 1-row of stim')
|
|
|
+
|
|
|
+ #% due to a bug in early versions of the recording program
|
|
|
+ # the last sweep in some files may be too short.
|
|
|
+ # If so, drop it. We use as criterion that the last sweep should at least have
|
|
|
+ # of 0.8 s
|
|
|
+
|
|
|
+ print('Taking care of the last swp.')
|
|
|
+ if swp[-1].signal.shape[0]/swp[-1].sampleRate < 0.8:
|
|
|
+ swp=swp[:-1]
|
|
|
+ stim=stim[:-1]
|
|
|
+ print('Trimming done!')
|
|
|
+
|
|
|
+# print('Applying AMUA filter.')
|
|
|
+# AMUAFilters=swp[0].AMUAFilterCoeffs()
|
|
|
+# for ii in range(len(swp)):
|
|
|
+# swp[ii]=swp[ii].calcAMUA(AMUAFilters)
|
|
|
+
|
|
|
+ print('Applying LFP filter.')
|
|
|
+ lpfCoefs=swp[1].LFPfilterCoeffs()
|
|
|
+ for ss in range(1,len(swp)):
|
|
|
+ swp[ss]=swp[ss].calcLFP(lpfCoefs,downsample=4)
|
|
|
+
|
|
|
+#% save LFP data
|
|
|
+
|
|
|
+ #ep.writeEphysFile(PATH+'/'+LFPfilePath+'/'+LFPfileName,swp)
|
|
|
+ ep.writeEphysFile(LFPfilePath+'/'+LFPfileName,swp)
|
|
|
+
|
|
|
+print('===================================')
|
|
|
+print('======Exporting LFPs complete =====')
|
|
|
+print('===================================')
|
|
|
+#%% Step 2: Now carry out step 2 by running EphysSparseTWFbatchAnalysis.m in Matlab
|
|
|
+ # once that's done:
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+#%% Step 3: Read in cleaned data files and compute TWF with regression:
|
|
|
+#ff=0
|
|
|
+##%%
|
|
|
+#
|
|
|
+#filePath=filepaths[ff]
|
|
|
+#ff+=1
|
|
|
+#%
|
|
|
+
|
|
|
+from pathlib import PurePath
|
|
|
+plotLFPs=False
|
|
|
+# create column names and row names
|
|
|
+
|
|
|
+
|
|
|
+sc=cond[-3:]
|
|
|
+rowName = [sc+' 0',sc+' 1',sc+' 2',sc+' 3']
|
|
|
+
|
|
|
+
|
|
|
+# processing
|
|
|
+
|
|
|
+
|
|
|
+for filePath in filepaths:
|
|
|
+#%
|
|
|
+ #
|
|
|
+ filename = os.path.splitext(filePath)[0]
|
|
|
+ fname = ntpath.basename(filename)
|
|
|
+ LFPfilePath, LFPfileName = os.path.split(filePath)
|
|
|
+ LFPfileName='CLEAN_'+LFPfileName[:-4]
|
|
|
+
|
|
|
+
|
|
|
+ #% step 3.1, read in cleaned data
|
|
|
+ print("=== === ===")
|
|
|
+ print('Processing data from ' + LFPfileName)
|
|
|
+
|
|
|
+
|
|
|
+ addPath = PurePath(LFPfilePath)
|
|
|
+ readInPath = PurePath(PATH).joinpath(addPath)
|
|
|
+
|
|
|
+ swpPath = PurePath(readInPath).joinpath(LFPfileName)
|
|
|
+ swpFilename = swpPath.with_suffix('.ephys').as_posix()
|
|
|
+
|
|
|
+ stimPath = PurePath(readInPath).joinpath(fname)
|
|
|
+ stimFilename = stimPath.with_suffix('.csv').as_posix()
|
|
|
+
|
|
|
+
|
|
|
+## read in .ephys file
|
|
|
+# swp, ignore = ep.readEphysFile(PATH+'/'+LFPfilePath+ '/'+LFPfileName)
|
|
|
+# stim = ep.readEphysStimTable((PATH+'/'+filePath)[:-4])
|
|
|
+
|
|
|
+
|
|
|
+#%
|
|
|
+# swp, ignore = ep.readEphysFile(swpFilename) ## this causes error, no stim file found. The CLEAN dataset does not have a corresponding .csv file.
|
|
|
+
|
|
|
+ swp =np.array([])
|
|
|
+ with open(swpFilename,'rb') as fileObject:
|
|
|
+ swpN=ep.MCsweep(fileObject)
|
|
|
+ while not swpN.signal is None:
|
|
|
+ swp=np.append(swp,swpN)
|
|
|
+ swpN=ep.MCsweep(fileObject)
|
|
|
+ fileObject.close()
|
|
|
+
|
|
|
+ # read in corresponding stim param table
|
|
|
+ stim = ep.readEphysStimTable(stimFilename)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ if stim.shape[0] > len(swp):
|
|
|
+
|
|
|
+ # last sweep may have been dropped. If so, drop last stim too
|
|
|
+ stim.drop(stim.index[-1])
|
|
|
+ print('Reading in data done!')
|
|
|
+
|
|
|
+#%
|
|
|
+ meanLFP=copy.copy(swp[0].signal)
|
|
|
+ nswp=1
|
|
|
+ for ss in range(1,len(swp)):
|
|
|
+ meanLFP+=swp[ss].signal
|
|
|
+ nswp+=1
|
|
|
+ meanLFP=meanLFP/nswp
|
|
|
+
|
|
|
+ if plotLFPs:
|
|
|
+ ## plotand save meanLFP
|
|
|
+ plt.figure(1)
|
|
|
+ plt.clf()
|
|
|
+ dchan=1
|
|
|
+ taxis=np.array(range(meanLFP.shape[0]))/swp[0].sampleRate
|
|
|
+ for chan in range(swp[0].signal.shape[1]): # loop through channels
|
|
|
+ plt.subplot(8,8,dchan)
|
|
|
+ if dchan==4:
|
|
|
+ plt.title(filename)
|
|
|
+ dchan+=1
|
|
|
+ if dchan in blankChans:
|
|
|
+ dchan += 1
|
|
|
+ plt.plot(taxis,meanLFP[:,chan])
|
|
|
+ plt.yticks([])
|
|
|
+
|
|
|
+ plt.pause(0.05)
|
|
|
+
|
|
|
+ if not os.path.exists(analysisPath+'/meanLFPfigures/'):
|
|
|
+ os.makedirs(analysisPath+'/meanLFPfigures/')
|
|
|
+ plt.savefig(analysisPath+'/meanLFPfigures/'+fname+'.svg',format='svg')
|
|
|
+
|
|
|
+ #% step 3.2, compute responses
|
|
|
+ print('Generate base line and response channel data.')
|
|
|
+ if 'preSilence (s)' in stim.columns :
|
|
|
+ stimStart=stim['preSilence (s)'][0]
|
|
|
+ else:
|
|
|
+ stimStart=0
|
|
|
+
|
|
|
+#%
|
|
|
+ respWin=stimStart+np.array([0.001,0.1]) # resp window
|
|
|
+# baselineWin=stimStart+np.array([0.65,-1])
|
|
|
+
|
|
|
+
|
|
|
+ # create np array header
|
|
|
+ resp=[]
|
|
|
+# baseline=[]
|
|
|
+ for cc in range(swp[0].signal.shape[1]):
|
|
|
+ resp.append('RespChan{}'.format(cc+1))
|
|
|
+# baseline.append('BaselineChan{}'.format(cc+1))
|
|
|
+ respM=np.array(resp)
|
|
|
+# baselineM=np.array(baseline)
|
|
|
+
|
|
|
+ for ss in range(swp.shape[0]):
|
|
|
+# resp= swp[ss].response(respWin)
|
|
|
+# resp= swp[ss].sigSnip(respWin).std(axis=0)
|
|
|
+ resp=np.sqrt(np.mean(swp[ss].sigSnip(respWin)**2,axis=0))
|
|
|
+ respM=np.vstack([respM,resp])
|
|
|
+# baseline= swp[ss].response(baselineWin)
|
|
|
+# baselineM=np.vstack([baselineM,baseline])
|
|
|
+ respD=pd.DataFrame(respM[1:,:],columns=respM[0,:])
|
|
|
+# baselineD=pd.DataFrame(baselineM[1:,:],columns=baselineM[0,:])
|
|
|
+ #%
|
|
|
+ allD=pd.merge(stim,respD,left_index=True,right_index=True)
|
|
|
+ for cc in range(swp[0].signal.shape[1]):
|
|
|
+ allD['RespChan{}'.format(cc+1)]=allD['RespChan{}'.format(cc+1)].astype(float)
|
|
|
+
|
|
|
+ #% save all LPF response data to csv
|
|
|
+
|
|
|
+ saveFile = fname + '_LFPresponses.csv'
|
|
|
+ allD.to_csv(os.path.join(savePath, saveFile))
|
|
|
+ print('All dataset saved!')
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+print('====================================')
|
|
|
+print('====== Analysis batch complete =====')
|
|
|
+print('====================================')
|
|
|
+
|
|
|
+#%% step 3a - Let's look through the data to find an example with a big difference in LFP amplitude for all pos and all neg ITDs.
|
|
|
+# for illustrative purposes
|
|
|
+from scipy.stats import ranksums
|
|
|
+sc=cond[-3:]
|
|
|
+rowName = [sc+' 0',sc+' 1',sc+' 2',sc+' 3']
|
|
|
+filePath=filepaths[11]
|
|
|
+# filePath=filepaths[3]
|
|
|
+filename = os.path.splitext(filePath)[0]
|
|
|
+fname = ntpath.basename(filename)
|
|
|
+if fname.find("900")<0:
|
|
|
+ clickRate=300
|
|
|
+else:
|
|
|
+ clickRate=900
|
|
|
+loadFile = fname + '_LFPresponses.csv'
|
|
|
+print('loading LFP responses from ',loadFile)
|
|
|
+allD=pd.read_csv(os.path.join(savePath, loadFile))
|
|
|
+sc=cond[-3:]
|
|
|
+
|
|
|
+for chan in [3, 6, 9, 12 ,15, 18, 21, 24, 27, 30]:
|
|
|
+ posITDs=allD[(allD["ITD 0"]>0) & (allD["ITD 1"]>0) & (allD["ITD 2"]>0) & (allD["ITD 3"]>0)]
|
|
|
+ negITDs=allD[(allD["ITD 0"]<0) & (allD["ITD 1"]<0) & (allD["ITD 2"]<0) & (allD["ITD 3"]<0)]
|
|
|
+ mu1=posITDs["RespChan{}".format(chan)].mean()
|
|
|
+ mu2=negITDs["RespChan{}".format(chan)].mean()
|
|
|
+ s1=posITDs["RespChan{}".format(chan)].std()
|
|
|
+ s2=negITDs["RespChan{}".format(chan)].std()
|
|
|
+ print((mu1-mu2)/np.sqrt(0.5*(s1**2+s2**2)))
|
|
|
+
|
|
|
+chan=29
|
|
|
+# chan=3
|
|
|
+plt.clf()
|
|
|
+plt.boxplot([posITDs["RespChan{}".format(chan)], negITDs["RespChan{}".format(chan)]])
|
|
|
+ranksums(posITDs["RespChan{}".format(chan)], negITDs["RespChan{}".format(chan)])
|
|
|
+#%% so with trial and error we found that filePath 11 channel 29 gives reasonably distinct LFP amplitudes for the all pos vs all neg ITDs on average.
|
|
|
+# Let's check whether these would make for a good figure to illustrate the logic of the regression analysis
|
|
|
+chan=29
|
|
|
+filePath=filepaths[11]
|
|
|
+LFPfilePath, LFPfileName = os.path.split(filePath)
|
|
|
+LFPfileName='CLEAN_'+LFPfileName[:-4]
|
|
|
+filename = os.path.splitext(filePath)[0]
|
|
|
+fname = ntpath.basename(filename)
|
|
|
+if fname.find("900")<0:
|
|
|
+ clickRate=300
|
|
|
+else:
|
|
|
+ clickRate=900
|
|
|
+loadFile = fname + '_LFPresponses.csv'
|
|
|
+print('loading LFP responses from ',loadFile)
|
|
|
+allD=pd.read_csv(os.path.join(savePath, loadFile))
|
|
|
+swpFilename = PATH+'/'+LFPfilePath+'/'+LFPfileName+'.ephys'
|
|
|
+stimPath = PATH+'/'+LFPfilePath+'/'+fname+'.csv'
|
|
|
+
|
|
|
+
|
|
|
+## read in .ephys file
|
|
|
+# swp, ignore = ep.readEphysFile(PATH+'/'+LFPfilePath+ '/'+LFPfileName)
|
|
|
+# stim = ep.readEphysStimTable((PATH+'/'+filePath)[:-4])
|
|
|
+
|
|
|
+
|
|
|
+#%
|
|
|
+# swp, ignore = ep.readEphysFile(swpFilename) ## this causes error, no stim file found. The CLEAN dataset does not have a corresponding .csv file.
|
|
|
+posIdx=np.where((allD["ITD 0"]>0) & (allD["ITD 1"]>0) & (allD["ITD 2"]>0) & (allD["ITD 3"]>0))[0]
|
|
|
+negIdx=np.where((allD["ITD 0"]<0) & (allD["ITD 1"]<0) & (allD["ITD 2"]<0) & (allD["ITD 3"]<0))[0]
|
|
|
+pSwp = []
|
|
|
+nSwp = []
|
|
|
+swpIdx=0
|
|
|
+with open(swpFilename,'rb') as fileObject:
|
|
|
+ swpN=ep.MCsweep(fileObject)
|
|
|
+ while not swpN.signal is None:
|
|
|
+ if swpIdx in posIdx:
|
|
|
+ pSwp.append(swpN.signal[:,chan])
|
|
|
+ if swpIdx in negIdx:
|
|
|
+ nSwp.append(swpN.signal[:,chan])
|
|
|
+ swpIdx+=1
|
|
|
+ swpN=ep.MCsweep(fileObject)
|
|
|
+fileObject.close()
|
|
|
+pSwp=np.array(pSwp)
|
|
|
+nSwp=np.array(nSwp)
|
|
|
+#%% now plot
|
|
|
+sampleRate= 6103.515625
|
|
|
+posMean=pSwp.mean(axis=0)*1000
|
|
|
+negMean=nSwp.mean(axis=0)*1000
|
|
|
+posSEM=pSwp.std(axis=0)/np.sqrt(pSwp.shape[0])*1000
|
|
|
+negSEM=nSwp.std(axis=0)/np.sqrt(nSwp.shape[0])*1000
|
|
|
+taxis=np.arange(pSwp.shape[1])/sampleRate*1000
|
|
|
+fig=plt.figure(4, figsize=(4,4))
|
|
|
+plt.clf()
|
|
|
+plt.fill_between(taxis, posMean - posSEM, posMean + posSEM, color='red', alpha=0.5)
|
|
|
+plt.fill_between(taxis, negMean - negSEM, negMean + negSEM, color='blue', alpha=0.5)
|
|
|
+plt.xlim([0,30])
|
|
|
+plt.xlabel('Time (ms)')
|
|
|
+plt.ylabel('mV')
|
|
|
+plt.legend(['all ITDs negative','all ITDs positive'])
|
|
|
+plt.savefig('Figure4.tif',dpi=300)
|
|
|
+plt.savefig('Figure4.svg')
|
|
|
+#%% step 4 fit regression model and make figures
|
|
|
+
|
|
|
+# allD=pd.merge(allD,baselineD,left_index=True,right_index=True)
|
|
|
+# for cc in range(swp[0].signal.shape[1]):
|
|
|
+# allD['BaselineChan{}'.format(cc+1)]=allD['BaselineChan{}'.format(cc+1)].astype(float)
|
|
|
+
|
|
|
+sc=cond[-3:]
|
|
|
+rowName = [sc+' 0',sc+' 1',sc+' 2',sc+' 3']
|
|
|
+
|
|
|
+anlResults = pd.DataFrame([ ])
|
|
|
+chAll = list(range(1,65))
|
|
|
+chBlank = {8,57, 64}
|
|
|
+chList = [ele for ele in chAll if ele not in chBlank]
|
|
|
+chList.reverse() # reversing the list order means that later chan 1 will be plotted last, which is good for sticking labels on panels.
|
|
|
+colNameP = []
|
|
|
+colNameB = []
|
|
|
+colNameS = []
|
|
|
+for chN in range(1,65): # naming loop through channels
|
|
|
+ chP = 'P_chan' + str(chN)
|
|
|
+ colNameP.append(chP)
|
|
|
+
|
|
|
+ chB = 'Beta_chan' + str(chN)
|
|
|
+ colNameB.append(chB)
|
|
|
+
|
|
|
+ chS = 'Sig_chan' + str(chN)
|
|
|
+ colNameS.append(chS)
|
|
|
+# we also create a CSV file that keeps all the betas and pvals for summary statistics
|
|
|
+betaFile=open(analysisPath +'/betas_and_pvals.csv','w')
|
|
|
+betaFile.write("filename,clickrate,channel,beta1,beta2,beta3,beta4,pval1,pval2,pval3,pval4\n")
|
|
|
+for filePath in filepaths:
|
|
|
+ filename = os.path.splitext(filePath)[0]
|
|
|
+ fname = ntpath.basename(filename)
|
|
|
+ if fname.find("900")<0:
|
|
|
+ clickRate=300
|
|
|
+ else:
|
|
|
+ clickRate=900
|
|
|
+ loadFile = fname + '_LFPresponses.csv'
|
|
|
+ print('loading LFP responses from ',loadFile)
|
|
|
+ allD=pd.read_csv(os.path.join(savePath, loadFile))
|
|
|
+ sc=cond[-3:]
|
|
|
+ chan=1
|
|
|
+
|
|
|
+ #% Ordinary least squares regression
|
|
|
+ # build a chan x ITD array of pvals, a chan x ITD array of regression coeffs, and a chan x ITD array of significance outcomes
|
|
|
+ pvals=np.zeros((4,64))
|
|
|
+ betas=np.zeros((4,64))
|
|
|
+ sigs = np.zeros((4,64))
|
|
|
+ for chan in chList: # loop through channels
|
|
|
+ responses=np.array(allD['RespChan'+str(chan+1)])
|
|
|
+ responses=np.log(responses)
|
|
|
+ responses=zscore(responses)
|
|
|
+ # find indeces of non-outliers (keep)
|
|
|
+ criterion=np.median(responses)+np.std(responses)*3
|
|
|
+ keep=np.where(responses<criterion)
|
|
|
+ responses=responses[keep]
|
|
|
+ model = sm.OLS(responses, sm.add_constant(allD[[sc+' 0',sc+' 1',sc+' 2',sc+' 3']].loc[keep])).fit()
|
|
|
+ # model = sm.OLS(responses, sm.add_constant(allD[[sc+' 0',sc+' 1',sc+' 2',sc+' 3']])).fit()
|
|
|
+ #model = sm.OLS(allD['RespChan7'], sm.add_constant(allD[['ILD 0','ILD 1','ILD 2','ILD 3']])).fit()
|
|
|
+ pvals[:,chan]=model.pvalues[1:]
|
|
|
+ #betas[:,chan]= -model.params[[sc+' 0',sc+' 1',sc+' 2',sc+' 3']]
|
|
|
+ betas[:,chan]= -model.params[1:]
|
|
|
+ betaFile.write("{},{},{}".format(fname,clickRate,chan))
|
|
|
+ for clck in range(4):
|
|
|
+ betaFile.write(",{}".format(betas[clck,chan]))
|
|
|
+ for clck in range(4):
|
|
|
+ betaFile.write(",{}".format(pvals[clck,chan]))
|
|
|
+ betaFile.write("\n")
|
|
|
+
|
|
|
+
|
|
|
+ ## save regression results
|
|
|
+ # turn significant information into dataframe, 0 = no significance, 1 = significance level P < 0.05
|
|
|
+
|
|
|
+ for par in range(pvals.shape[0]):
|
|
|
+ if pvals[par,chan] < 0.05:
|
|
|
+ sigs[par,chan] = 1
|
|
|
+
|
|
|
+ sdf = pd.DataFrame(sigs, columns = colNameS)
|
|
|
+
|
|
|
+ # turn betas and pvalues with column names into dataframe
|
|
|
+ bvdf = pd.DataFrame(betas, columns = colNameB)
|
|
|
+ pvdf = pd.DataFrame(pvals, columns = colNameP)
|
|
|
+
|
|
|
+ # concat betas, pvalues and significan indicates into a big dataframe
|
|
|
+
|
|
|
+ results = pd.concat([bvdf,pvdf,sdf], axis=1)
|
|
|
+
|
|
|
+ # add rowname and the processed data filename as column
|
|
|
+ results.insert(0,'Condition', fname)
|
|
|
+ results.insert(1, 'Click_N', rowName)
|
|
|
+
|
|
|
+ # concat results from different datafiles
|
|
|
+ anlResults = pd.concat([anlResults, results], ignore_index=True)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ ## plot and save TWF weight figures
|
|
|
+ plt.figure(2, figsize=(6,6))
|
|
|
+ plt.clf();
|
|
|
+ plt.rc('font',size=9)
|
|
|
+ ylim=v=np.max(np.abs(betas))*1.05
|
|
|
+ for dchan in chList: # loop through channels
|
|
|
+ plt.subplot(8,8,dchan)
|
|
|
+ plt.plot([0,3],[0,0],'k:')
|
|
|
+ if dchan==4:
|
|
|
+ plt.title(fname)
|
|
|
+ #%
|
|
|
+ plt.plot(betas[:,dchan])
|
|
|
+ plt.ylim([-ylim,ylim])
|
|
|
+ #plt.xlabel('Click number')
|
|
|
+ #plt.ylabel('Beta value')
|
|
|
+ plt.xticks(np.arange(4), ('1', '2', '3', '4'))
|
|
|
+
|
|
|
+ # unshow the x and y tick labels in channels other than chan1
|
|
|
+ if not (dchan in [1,9,17,25,33,41,49]):
|
|
|
+ plt.yticks([])
|
|
|
+ if dchan < 57:
|
|
|
+ if not(dchan in [49,56]):
|
|
|
+ plt.xticks([])
|
|
|
+
|
|
|
+ # highlight significant values in red
|
|
|
+ for par in range(pvals.shape[0]):
|
|
|
+ if pvals[par,dchan] < 0.05:
|
|
|
+ plt.plot(par,betas[par,dchan],'r*')
|
|
|
+ #%plt.title('Channel '+str(chan))
|
|
|
+ if dchan==25:
|
|
|
+ plt.ylabel('Weight β (1/ms)')
|
|
|
+ if dchan==60:
|
|
|
+ plt.xlabel('Click #')
|
|
|
+ # if dchan==1:
|
|
|
+ # plt.text(-4,2,letter[index],fontsize=16)
|
|
|
+
|
|
|
+ plt.pause(0.05)
|
|
|
+ if not os.path.exists(analysisPath+'/TWFweightFigures/'):
|
|
|
+ os.makedirs(analysisPath+'/TWFweightFigures/')
|
|
|
+ plt.savefig(analysisPath+'/TWFweightFigures/'+fname+'.svg',format='svg')
|
|
|
+ # select figures to save as panels for Fig 4
|
|
|
+ if fname=='trained_twf_1801_ITD_900Hz_leftCortex':
|
|
|
+ plt.text(-4,ylim*2,'A',fontsize=16)
|
|
|
+ plt.savefig('/home/jan/jan/document/science/cityu/TWF_ITD_ILD_NormalHearingRats/figures/Fig5A.svg')
|
|
|
+ if fname=='trained_twf_1803_ITD_900Hz':
|
|
|
+ plt.text(-4,ylim*2,'B',fontsize=16)
|
|
|
+ plt.savefig('/home/jan/jan/document/science/cityu/TWF_ITD_ILD_NormalHearingRats/figures/Fig5B.svg')
|
|
|
+ if fname=='trained_twf_1804_ITD_900Hz_leftCortex':
|
|
|
+ plt.text(-4,ylim*2,'C',fontsize=16)
|
|
|
+ plt.savefig('/home/jan/jan/document/science/cityu/TWF_ITD_ILD_NormalHearingRats/figures/Fig5C.svg')
|
|
|
+ if fname=='trained_twf_1803_ITD_300Hz_leftCortex':
|
|
|
+ plt.text(-4,ylim*2,'D',fontsize=16)
|
|
|
+ plt.savefig('/home/jan/jan/document/science/cityu/TWF_ITD_ILD_NormalHearingRats/figures/Fig5D.svg')
|
|
|
+
|
|
|
+## save resuls dataframe as csv file
|
|
|
+anlResults.to_csv(analysisPath + "/" + cond + "_cleanedLFP_TWFregressionResults.csv", index = False)
|
|
|
+betaFile.close()
|
|
|
+print('=======================================')
|
|
|
+print('====== Creating OLS TWFs complete =====')
|
|
|
+print('=======================================')
|
|
|
+
|
|
|
+#%% Let's read in the regression results and make boxplots for Figure 6
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+alpha=0.05
|
|
|
+bNp=np.genfromtxt('/twinkle/ephys/2/Cecilia_TWF/Analysis/Analysis_AC_ECoG_TWF_ITD_SparseData/betas_and_pvals.csv', delimiter=',')
|
|
|
+bNp2=np.genfromtxt('/twinkle/ephys/2/Cecilia_TWF/Analysis/Analysis_AC_ECoG_TWF_trained_ITD_SparseData/betas_and_pvals.csv', delimiter=',')
|
|
|
+allBeta=np.vstack((bNp[1:,3:7],bNp2[1:,3:7]))
|
|
|
+allP=np.vstack((bNp[1:,7:12],bNp2[1:,7:12]))
|
|
|
+cRate=np.hstack((bNp[1:,1],bNp2[1:,1]))
|
|
|
+del bNp, bNp2
|
|
|
+#%%
|
|
|
+def myBoxPlot(X):
|
|
|
+ bp=plt.boxplot(X)
|
|
|
+ plt.ylim([-0.01,1.2])
|
|
|
+ for ii in range(len(bp['medians'])):
|
|
|
+ bp['medians'][ii].set_color([0.4,0.4,0.4])
|
|
|
+ bp['medians'][ii].set_linewidth(2)
|
|
|
+ bp['fliers'][ii].set_markersize(2)
|
|
|
+ bp['fliers'][ii].set_markeredgecolor([0.6,0.6,0.6])
|
|
|
+ return bp
|
|
|
+
|
|
|
+idx300=np.where(cRate==300)
|
|
|
+beta300=allBeta[idx300,:].squeeze()
|
|
|
+pv300=allP[idx300,:].squeeze()
|
|
|
+idx900=np.where(cRate==900)
|
|
|
+beta900=allBeta[idx900,:].squeeze()
|
|
|
+pv900=allP[idx900,:].squeeze()
|
|
|
+plt.figure(5,figsize=(6,6))
|
|
|
+plt.clf()
|
|
|
+ax=plt.subplot(2,2,1)
|
|
|
+myBoxPlot(np.abs(beta300))
|
|
|
+plt.ylabel('Weight β (1/ms))')
|
|
|
+plt.xticks([])
|
|
|
+plt.title('All β, 300 Hz')
|
|
|
+ax=plt.subplot(2,2,2)
|
|
|
+myBoxPlot(np.abs(beta900))
|
|
|
+plt.xticks([])
|
|
|
+plt.yticks([])
|
|
|
+plt.title('All β, 900 Hz')
|
|
|
+sig300=(pv300<0.05)
|
|
|
+sig900=(pv900<0.05)
|
|
|
+sigWeights300=[]
|
|
|
+sigWeights900=[]
|
|
|
+for clkIdx in range(4):
|
|
|
+ sigWeights300.append(beta300[np.where(sig300[:,clkIdx]),clkIdx].squeeze())
|
|
|
+ sigWeights900.append(beta900[np.where(sig900[:,clkIdx]),clkIdx].squeeze())
|
|
|
+ax=plt.subplot(2,2,3)
|
|
|
+myBoxPlot(np.abs(sigWeights300))
|
|
|
+plt.xlabel('Click #')
|
|
|
+plt.ylabel('Weight β (1/ms))')
|
|
|
+plt.title('Significant β, 300 Hz')
|
|
|
+
|
|
|
+ax=plt.subplot(2,2,4)
|
|
|
+bp=myBoxPlot(np.abs(sigWeights900))
|
|
|
+plt.yticks([])
|
|
|
+plt.xlabel('Click #')
|
|
|
+plt.title('Significant β, 900 Hz')
|
|
|
+
|
|
|
+#%%
|
|
|
+plt.savefig('/home/jan/jan/document/science/cityu/TWF_ITD_ILD_NormalHearingRats/figures/BoxplotsFig6.svg')
|
|
|
+plt.savefig('/home/jan/jan/document/science/cityu/TWF_ITD_ILD_NormalHearingRats/figures/BoxplotsFig6.png')
|