Forráskód Böngészése

added computing spiking metrics

asobolev 1 éve
szülő
commit
c367c2b532

A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 8 - 8
analysis/behavior.ipynb


A különbségek nem kerülnek megjelenítésre, a fájl túl nagy
+ 172 - 49
postprocessing/execute.ipynb


+ 9 - 2
postprocessing/unit_metrics.ipynb

@@ -2,13 +2,19 @@
  "cells": [
   {
    "cell_type": "code",
-   "execution_count": 313,
+   "execution_count": 2,
    "id": "e1e04811",
    "metadata": {},
    "outputs": [],
    "source": [
     "%matplotlib inline\n",
     "\n",
+    "# include modules to the path\n",
+    "import sys, os\n",
+    "parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))\n",
+    "sys.path.append(parent_dir)\n",
+    "sys.path.append(os.path.join(parent_dir, 'session'))\n",
+    "\n",
     "import numpy as np\n",
     "import nbimporter\n",
     "import os, h5py\n",
@@ -16,7 +22,8 @@
     "\n",
     "from scipy import signal\n",
     "from scipy.interpolate import interp1d\n",
-    "from utils import get_sessions_list, load_clu_res, get_sampling_rate"
+    "from session.utils import get_sessions_list, get_sampling_rate\n",
+    "from session.adapters import load_clu_res"
    ]
   },
   {

+ 38 - 5
session/utils.py

@@ -1,10 +1,14 @@
-import os
+import os, json, h5py
 import numpy as np
 import xml.etree.ElementTree as ET
 
 from datetime import datetime
 
 
+def guess_filebase(sessionpath):
+    return os.path.basename(os.path.normpath(sessionpath))
+
+
 def get_sessions_list(path_to_sessions_folder, animal):
     def convert(func, *args):
         try:
@@ -21,9 +25,9 @@ def get_sessions_list(path_to_sessions_folder, animal):
             is_dir(x) and has_parts(x) and starts_by_animal(x) and has_timestamp(x)])
 
 
-def get_sampling_rate(where):
-    filebase = os.path.basename(where)
-    xml_path = os.path.join(where, filebase + '.xml')
+def get_sampling_rate(sessionpath):
+    filebase = os.path.basename(sessionpath)
+    xml_path = os.path.join(sessionpath, filebase + '.xml')
     
     if not os.path.exists(xml_path):
         return None
@@ -33,6 +37,19 @@ def get_sampling_rate(where):
     return int(sampling_rate.text)
 
 
+def unit_number_for_electrode(sessionpath, electrode_idx):
+    filebase = ''
+    try:
+        filebase = guess_filebase(sessionpath)
+    except ValueError:
+        return 0  # no units on this electrode
+
+    clu_file = os.path.join(sessionpath, '.'.join([filebase, 'clu', str(electrode_idx)]))
+    cluster_map = np.loadtxt(clu_file, dtype=np.uint16)
+
+    return len(np.unique(cluster_map)) - 1  # 1st cluster is noise
+
+
 def unit_number_for_session(sessionpath):
     electrode_idxs = [x.split('.')[2] for x in os.listdir(sessionpath) if x.find('.clu.') > -1]
 
@@ -46,4 +63,20 @@ def unit_number_for_session(sessionpath):
 
     unit_counts = [unit_number_for_electrode(sessionpath, el_idx) for el_idx in np.unique(idxs)]
 
-    return np.array(unit_counts).sum()
+    return np.array(unit_counts).sum()
+
+
+def get_epochs(sessionpath):
+    filebase = guess_filebase(sessionpath)
+    h5name  = os.path.join(sessionpath, filebase + '.h5')
+
+    with h5py.File(h5name, 'r') as f:
+        cfg = json.loads(f['processed'].attrs['parameters'])
+
+    tp = np.array(cfg['experiment']['timepoints'])
+    s_duration = cfg['experiment']['session_duration']
+
+    tp = np.repeat(tp, 2)
+    epochs = np.concatenate([np.array([0]), tp, np.array([s_duration])])
+
+    return epochs.reshape(int(len(epochs)/2), 2)