Browse Source

added new SIT and sound debugging notebooks

dianamaro 2 years ago
parent
commit
5d9e54f2af
6 changed files with 774 additions and 81 deletions
  1. 1 0
      .gitignore
  2. 126 77
      Experiment.ipynb
  3. 5 0
      requirements.txt
  4. 8 4
      defaults.json
  5. 557 0
      sit.ipynb
  6. 77 0
      sound.ipynb

+ 1 - 0
.gitignore

@@ -5,6 +5,7 @@
 *.xlsx
 *.avi
 sessions*
+settings_*
 
 # ipython temp files
 .ipynb_checkpoints

+ 126 - 77
Experiment.ipynb

@@ -32,7 +32,7 @@
      "name": "stdout",
      "output_type": "stream",
      "text": [
-      "4.2.0\n"
+      "4.5.2\n"
      ]
     }
    ],
@@ -90,11 +90,11 @@
       "    \"experiment_type\": \"aSIT\",\n",
       "    \"background_color\": \"T\",\n",
       "    \"init_duration\": 0.2,\n",
-      "    \"arena_x\": 400,\n",
-      "    \"arena_y\": 300,\n",
-      "    \"arena_radius\": 300,\n",
+      "    \"arena_x\": 650,\n",
+      "    \"arena_y\": 500,\n",
+      "    \"arena_radius\": 430,\n",
       "    \"distractor_island\": 0,\n",
-      "    \"experiment_date\": \"2020-06-12_09-25-16\"\n",
+      "    \"experiment_date\": \"2021-06-28_17-21-54\"\n",
       "}\n"
      ]
     }
@@ -124,11 +124,36 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 3,
+   "execution_count": 12,
    "metadata": {
     "scrolled": true
    },
-   "outputs": [],
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "EXPERIMENT ID: 003901_aSIT_2021-06-28_17-21-54\n",
+      "Trials per session: 50\n",
+      "Session duration [s]: 3600\n",
+      "Trial duration [s]: 60\n",
+      "Radius of the starting platform [pixels]: 67\n",
+      "X-position of the starting platform [pixels]: 650\n",
+      "Y-position of the starting platform [pixels]: 500\n",
+      "Radius of the target platform [pixels]: 80\n",
+      "Target duration [s]: 5\n",
+      "Subject: 003901\n",
+      "Experiment type: aSIT\n",
+      "Subject is darker than background [T = True; F = False]: T\n",
+      "Initialisation Duration [s]: 0.2\n",
+      "Arena X coordinate [pixels]: 650\n",
+      "Arena Y coordinate [pixels]: 500\n",
+      "Arena radius [pixels]: 430\n",
+      "Enable distractor island [0/1]: 0\n",
+      "Experiment date: 2021-06-28_17-21-54\n"
+     ]
+    }
+   ],
    "source": [
     "def show_entry_fields():\n",
     "    print(\"EXPERIMENT ID: %s\" % experiment_id)\n",
@@ -261,10 +286,10 @@
     "        \n",
     "        \n",
     "# Initialize REAL Arduino feeder if connected\n",
-    "#board = RealFeeder('COM3')    # May be another COM-Port - in Windows, just check the Hardware Manager\n",
+    "board = RealFeeder('COM10')    # May be another COM-Port - in Windows, just check the Hardware Manager\n",
     "\n",
     "# OR initialize FAKE Arduino feeder for testing purposes. It will just print message here when feeding\n",
-    "board = FakeFeeder()"
+    "#board = FakeFeeder()"
    ]
   },
   {
@@ -366,15 +391,15 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 8,
+   "execution_count": 17,
    "metadata": {},
    "outputs": [],
    "source": [
     "# Define video capture device (0 = webcam1) to capture background frame\n",
-    "cap = cv2.VideoCapture(0)\n",
+    "cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n",
     "# Set picture dimensions\n",
-    "cap.set(3,800)      # Width\n",
-    "cap.set(4,600)      # Height\n",
+    "cap.set(3,1200)      # Width\n",
+    "cap.set(4,880)      # Height\n",
     "\n",
     "# Capture Background frame (c = capture)\n",
     "while(True):\n",
@@ -384,16 +409,17 @@
     "\n",
     "    # Display the resulting frame\n",
     "    imgArena = cv2.circle(img, (cfg['arena_x'], cfg['arena_y']), cfg['arena_radius'], (0,0,255), 2)\n",
-    "    imgArenaStart = cv2.circle(imgArena, (cfg['start_x'], cfg['start_x']), cfg['start_radius'], (255,0,255), 2)\n",
+    "    #imgArenaStart = cv2.circle(imgArena, (cfg['start_x'], cfg['start_x']), cfg['start_radius'], (255,0,255), 2)\n",
     "\n",
     "    # Mask the space outside the arena\n",
-    "    mask = np.zeros(shape = img.shape, dtype = \"uint8\")\n",
+    "    mask = np.zeros(shape=img.shape, dtype=\"uint8\")\n",
     "    cv2.circle(mask, (cfg['arena_x'], cfg['arena_y']), cfg['arena_radius'], (255,255,255), -1)\n",
     "\n",
     "    maskedImg2 = cv2.bitwise_and(src1 = img2, src2 = mask)\n",
-    "    imgArenaStart = cv2.bitwise_and(src1 = imgArenaStart, src2 = mask)\n",
+    "    #imgArenaStart = cv2.bitwise_and(src1 = imgArenaStart, src2 = mask)\n",
+    "    imgArena = cv2.bitwise_and(src1=imgArena, src2=mask)\n",
     "\n",
-    "    cv2.imshow('Press (c)-to capture the background image',imgArenaStart)\n",
+    "    cv2.imshow('Press (c)-to capture the background image', imgArena)\n",
     "    if cv2.waitKey(1) & 0xFF == ord('c'):\n",
     "        cv2.imwrite(os.path.join(save_to, 'background.png'), maskedImg2)\n",
     "        break\n",
@@ -403,7 +429,7 @@
     "cv2.destroyAllWindows()\n",
     "\n",
     "# Loads current background as object img for later use\n",
-    "img = cv2.imread(os.path.join(save_to, 'background.png'), 1)"
+    "background = cv2.imread(os.path.join(save_to, 'background.png'), 1)"
    ]
   },
   {
@@ -419,22 +445,22 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 9,
+   "execution_count": 31,
    "metadata": {},
    "outputs": [],
    "source": [
     "# Define video capture device for live-stream (0 = webcam1)\n",
-    "cap2 = cv2.VideoCapture(0)\n",
+    "cap2 = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n",
     "# Set picture dimensions\n",
-    "cap2.set(3,800)\n",
-    "cap2.set(4,600)\n",
+    "cap2.set(3,1200)\n",
+    "cap2.set(4,880)\n",
     "\n",
     "# Show video to see animal leaving the box\n",
     "while(True):\n",
     "    # Capture frame-by-frame\n",
     "    ret, img3 = cap2.read()\n",
     "    \n",
-    "    cv2.imshow('Press (c)-to continue',img3)\n",
+    "    cv2.imshow('Press (c)-to continue', img3)\n",
     "    if cv2.waitKey(1) & 0xFF == ord('c'):\n",
     "        break\n",
     "\n",
@@ -442,44 +468,6 @@
     "cv2.destroyAllWindows()"
    ]
   },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "## Initialize the camera\n",
-    "\n",
-    "This cell initializes the camera for the actual tracking and defines some counters and initial variables needed during the experiment."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 10,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# Define video capture device for live-stream (0 = webcam1) and tracking\n",
-    "cap = cv2.VideoCapture(0)\n",
-    "# Set picture dimensions\n",
-    "cap.set(3,800)\n",
-    "cap.set(4,600)\n",
-    "\n",
-    "# Mask the space outside the arena\n",
-    "mask = np.zeros(shape = img.shape, dtype = \"uint8\")\n",
-    "cv2.circle(mask, (cfg['arena_x'], cfg['arena_y']), cfg['arena_radius'], (255,255,255), -1)\n",
-    "\n",
-    "# Experiment starts in phase 0 with 0 trials\n",
-    "expPhase = 0   \n",
-    "trialCounter = 0\n",
-    "rewardCounter = 0\n",
-    "frameCounter = 0\n",
-    "trialCountdown = 0\n",
-    "targetCountdown = 0\n",
-    "\n",
-    "# Initial values for target area\n",
-    "targetX, targetY = 0, 0\n",
-    "distractorX, distractorY = 0, 0"
-   ]
-  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -491,7 +479,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 14,
+   "execution_count": 9,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -530,7 +518,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 12,
+   "execution_count": 34,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -545,6 +533,44 @@
     "root.mainloop()"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Initialize the camera\n",
+    "\n",
+    "This cell initializes the camera for the actual tracking and defines some counters and initial variables needed during the experiment."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 43,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Define video capture device for live-stream (0 = webcam1) and tracking\n",
+    "cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n",
+    "# Set picture dimensions\n",
+    "cap2.set(3,1200)\n",
+    "cap2.set(4,880)\n",
+    "\n",
+    "# Mask the space outside the arena\n",
+    "mask = np.zeros(shape = img.shape, dtype = \"uint8\")\n",
+    "cv2.circle(mask, (cfg['arena_x'], cfg['arena_y']), cfg['arena_radius'], (255,255,255), -1)\n",
+    "\n",
+    "# Experiment starts in phase 0 with 0 trials\n",
+    "expPhase = 0\n",
+    "trialCounter = 0\n",
+    "rewardCounter = 0\n",
+    "frameCounter = 0\n",
+    "trialCountdown = 0\n",
+    "targetCountdown = 0\n",
+    "\n",
+    "# Initial values for target area\n",
+    "targetX, targetY = 0, 0\n",
+    "distractorX, distractorY = 0, 0"
+   ]
+  },
   {
    "cell_type": "markdown",
    "metadata": {},
@@ -564,23 +590,35 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 13,
+   "execution_count": 21,
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "Fake Arduino - exiting...\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "def get_random_x_y():\n",
     "    alpha = 2 * math.pi * random.random()   # random angle\n",
     "    r = (cfg['arena_radius'] - 20 - cfg['target_radius']) * math.sqrt(random.random())  # random radius\n",
     "    return int(r * math.cos(alpha) + cfg['arena_x']), int(r * math.sin(alpha) + cfg['arena_y'])\n",
     "\n",
+    "\n",
+    "# Define video capture device for live-stream (0 = webcam1) and tracking\n",
+    "cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n",
+    "# Set picture dimensions\n",
+    "cap.set(3,1200)\n",
+    "cap.set(4,880)\n",
+    "\n",
+    "# Experiment starts in phase 0 with 0 trials\n",
+    "expPhase = 0\n",
+    "trialCounter = 0\n",
+    "rewardCounter = 0\n",
+    "frameCounter = 0\n",
+    "trialCountdown = 0\n",
+    "targetCountdown = 0\n",
+    "\n",
+    "# Initial values for target area\n",
+    "targetX, targetY = 0, 0\n",
+    "distractorX, distractorY = 0, 0\n",
+    "\n",
+    "\n",
     "# Define and start the experiment timer\n",
     "expTime = time.time()\n",
     "\n",
@@ -601,12 +639,16 @@
     "    ret, frame = cap.read()\n",
     "    if not ret == True:\n",
     "        break\n",
+    "\n",
+    "    # Mask the space outside the arena\n",
+    "    mask = np.zeros(shape = frame.shape, dtype = \"uint8\")\n",
+    "    cv2.circle(mask, (cfg['arena_x'], cfg['arena_y']), cfg['arena_radius'], (255,255,255), -1)        \n",
     "        \n",
     "    maskedFrame = cv2.bitwise_and(src1=frame, src2=mask)\n",
     "\n",
     "    ## Animal tracking\n",
     "    # Substracts background from current frame\n",
-    "    subject = cv2.subtract(img, maskedFrame) if cfg['background_color'] == 'T' else cv2.subtract(maskedFrame, img)\n",
+    "    subject = cv2.subtract(background, maskedFrame) if cfg['background_color'] == 'T' else cv2.subtract(maskedFrame, background)\n",
     "\n",
     "    # Converts subject to grey scale\n",
     "    subjectGray = cv2.cvtColor(subject, cv2.COLOR_BGR2GRAY)\n",
@@ -629,8 +671,8 @@
     "    if (len(contours) == 0):\n",
     "        x = 20\n",
     "        y = 40\n",
-    "        subjectHullCentroid = np.zeros(frame.shape,np.uint8)\n",
-    "        subjectHullCentroid = cv2.circle(subjectHullCentroid, (x,y), 3, BGR_COLOR['yellow'], -1)\n",
+    "        #subjectHullCentroid = np.zeros(frame.shape,np.uint8)\n",
+    "        #subjectHullCentroid = cv2.circle(subjectHullCentroid, (x,y), 3, BGR_COLOR['yellow'], -1)\n",
     "\n",
     "    # If there is a subject, it is tracked\n",
     "    else:\n",
@@ -847,6 +889,13 @@
     "cv2.destroyAllWindows()\n",
     "board.exit()"
    ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
   }
  ],
  "metadata": {
@@ -865,7 +914,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.2"
+   "version": "3.8.8"
   }
  },
  "nbformat": 4,

+ 5 - 0
requirements.txt

@@ -0,0 +1,5 @@
+jupyter==1.0.0
+pyFirmata==1.1.0
+numpy==1.18.4
+opencv-python==4.2.0.34
+sounddevice

+ 8 - 4
defaults.json

@@ -11,8 +11,12 @@
     "experiment_type": "aSIT",
     "background_color": "T",
     "init_duration": 0.2,
-    "arena_x": 400,
-    "arena_y": 300,
-    "arena_radius": 300,
-    "distractor_island": 0
+    "resolution_x": 1200,
+    "resolution_y": 880,    
+    "arena_x": 650,
+    "arena_y": 500,
+    "arena_radius": 430,
+    "distractor_island": 0,
+    "capture_background": false,
+    "MCSArduinoPort": "COM10"    
 }

+ 557 - 0
sit.ipynb

@@ -0,0 +1,557 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Simple animal position tracking"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "To run this notebook:\n",
+    "\n",
+    "- open Anaconda prompt via Windows start button -> type \"Anac..\" -> launch\n",
+    "- in Anaconda prompt type \"jupyter notebook\"\n",
+    "- in the Jupyter browser go to Desktop/postrack and click on \"experiment\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Import modules\n",
+    "\n",
+    "Make sure that Python 3 and the following modules (recommended version ID) are installed on your computer:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 32,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "jupyter==1.0.0\n",
+      "pyFirmata==1.1.0\n",
+      "numpy==1.18.4\n",
+      "opencv-python==4.2.0.34\n",
+      "sounddevice\n"
+     ]
+    }
+   ],
+   "source": [
+    "# to install with pip run:\n",
+    "# pip install -r requirements.txt\n",
+    "\n",
+    "with open('requirements.txt', 'r') as f:\n",
+    "    print(''.join(f.readlines()))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 33,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np                        # Import numpy module\n",
+    "import sounddevice as sd                  # Import sounddevice module for \"real-time\" sound playback\n",
+    "\n",
+    "from pyfirmata import Arduino             # Arduino support\n",
+    "from collections import OrderedDict       # keep order of session settings\n",
+    "from scipy.io  import wavfile             # WAV-file import filter\n",
+    "\n",
+    "import cv2                                # Import opencv module for image processing\n",
+    "import threading                          # use a separate thread for TTL pulses\n",
+    "import math                               # Import math module\n",
+    "import time                               # Import time module for time measurements and pausing\n",
+    "import random                             # Import random module for random number generation\n",
+    "import json                               # JSON to read / write session settings\n",
+    "import datetime                           # session date/time management\n",
+    "import os, shutil                         # file/folder path handling"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Load experiment settings\n",
+    "\n",
+    "For every experimental cofiguration you can copy the original 'settings.json' file, build your own specific experimental preset, save it in this folder as e.g. 'settings_elena.json' and load it here instead of 'settings.json'."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 24,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "settings_filename = 'settings_test.json'"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 25,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "{\n",
+      "    \"trial_number\": 50,\n",
+      "    \"session_duration\": 3600,\n",
+      "    \"trial_duration\": 60,\n",
+      "    \"start_radius\": 67,\n",
+      "    \"start_x\": 195,\n",
+      "    \"start_y\": 195,\n",
+      "    \"target_radius\": 80,\n",
+      "    \"target_duration\": 5,\n",
+      "    \"subject\": \"003901\",\n",
+      "    \"experiment_type\": \"aSIT\",\n",
+      "    \"background_color\": \"T\",\n",
+      "    \"init_duration\": 0.2,\n",
+      "    \"resolution_x\": 1200,\n",
+      "    \"resolution_y\": 880,\n",
+      "    \"arena_x\": 650,\n",
+      "    \"arena_y\": 500,\n",
+      "    \"arena_radius\": 430,\n",
+      "    \"distractor_island\": 0,\n",
+      "    \"capture_background\": false,\n",
+      "    \"MCSArduinoPort\": \"fake\",\n",
+      "    \"experiment_date\": \"2021-07-01_10-31-19\"\n",
+      "}\n"
+     ]
+    }
+   ],
+   "source": [
+    "with open(settings_filename) as json_file:\n",
+    "    cfg = OrderedDict(json.load(json_file))\n",
+    "cfg['experiment_date'] = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n",
+    "\n",
+    "print(json.dumps(cfg, indent=4))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Initialize session folder\n",
+    "\n",
+    "Run the upcoming cell, to create a session folder and to save the chosen experimetal parameters to a JSON-file (\"experiment_id_parameters.json\"). The session folder will be created here where this notebook is located."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 26,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# This session's protocols will be saved to this folder\n",
+    "experiment_id = \"%s_%s_%s\" % (cfg['subject'], cfg['experiment_type'], cfg['experiment_date'])\n",
+    "save_to = os.path.join('sessions', experiment_id)\n",
+    "             \n",
+    "if not os.path.exists(save_to):\n",
+    "    os.makedirs(save_to)\n",
+    "\n",
+    "# Saves all parameters to a JSON file with the user-defined \"Experiment ID\" as filename\n",
+    "with open(os.path.join(save_to, experiment_id + '_parameters.json'), 'w') as f:\n",
+    "    json.dump(cfg, f, indent=4)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Prepare the audio stream\n",
+    "\n",
+    "The following cell initiates the audio stream, to which we will later feed our stimuli. The default sample rate is set to 44.1 kHz. The cell also loads sound files with the stimuli. Here, we use short pure tones as stimuli and a silent sound object, which is fed to the audiostream between stimuli. In our setup, we found this to be necessarry to reduce undesired clicking sounds at stimulus on- and offset, even though the sounds are ramped. Whether this will be necessary for you, will strongly depend on your audio hardware. \n",
+    "\n",
+    "The audio stimulation provided by this notebook differs from the MATLAB version in two important aspects: Firstly, the MATLAB version generates the stimuli on the fly, while this notebook uses sound files as input. Feel free to change the code if you prefer the other solution. Secondly, the MATLAB version stimulates at fixed time intervals and the sample rate of the video tracking is locked to the stimulation interval, i.e. high temporal precision in the sound stimulation comes with the cost of lower temporal resolution of the animal tracking. Here, we chose the opposite approach, with the video feed defining the cycle frequency (approx. 14 Hz with the given Camera and a resolution of 800x600 px) and the audio stimulation being locked to the framerate of the camera. Thus, higher temporal resolution of the animal tracking comes with the cost that inter-stimulus intervals cannot freely be chosen, but only be multiple integers (3 or higher) of the mean video frame duration. In the example setup and the code below, we decided for the stimulus to be played every three cycles (approx. every 215 ms). \n",
+    "\n",
+    "The duration of the audio files should not exceed the cycle length."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 48,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "<ipython-input-48-a2b25e82ec4e>:12: WavFileWarning: Chunk (non-data) not understood, skipping it.\n",
+      "  sound_foraging = wavfile.read(os.path.join('assets', '6000Hz-short-68.wav'))[1]\n",
+      "<ipython-input-48-a2b25e82ec4e>:13: WavFileWarning: Chunk (non-data) not understood, skipping it.\n",
+      "  sound_distractor = wavfile.read(os.path.join('assets', '10kHz-short-68.wav'))[1]\n",
+      "<ipython-input-48-a2b25e82ec4e>:14: WavFileWarning: Chunk (non-data) not understood, skipping it.\n",
+      "  sound_target = wavfile.read(os.path.join('assets', '4000Hz-short-68.wav'))[1]\n",
+      "<ipython-input-48-a2b25e82ec4e>:15: WavFileWarning: Chunk (non-data) not understood, skipping it.\n",
+      "  sound_silence = wavfile.read(os.path.join('assets', 'silence-short-68.wav'))[1]\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Set sample rate for audio output\n",
+    "sd.default.samplerate = 44100\n",
+    "fs = 44100          \n",
+    "\n",
+    "# Audio stream\n",
+    "stream = sd.OutputStream(samplerate=fs, channels=1, dtype='float32')\n",
+    "\n",
+    "# Cycle counter: sound is played every \"delay_cycles\" cycles (video frames)\n",
+    "delay_cycles = 3  \n",
+    "\n",
+    "# Open sound files\n",
+    "sound_foraging = wavfile.read(os.path.join('assets', '6000Hz-short-68.wav'))[1]\n",
+    "sound_distractor = wavfile.read(os.path.join('assets', '10kHz-short-68.wav'))[1]\n",
+    "sound_target = wavfile.read(os.path.join('assets', '4000Hz-short-68.wav'))[1]\n",
+    "sound_silence = wavfile.read(os.path.join('assets', 'silence-short-68.wav'))[1]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Initialize the microcontroller to sync with the Acquisition system\n",
+    "\n",
+    "The next cell initializes a microcontroller to send a TTL pulse at the time the first detected animal position is written. If you DON'T have a real arduino connected, you can just still run this experiment with the Fake Arduino. The Fake Arduino will just print the text message here in this notebook when sending a pulse."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 27,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pin_diode = 13\n",
+    "pin_TTL = 6\n",
+    "\n",
+    "class MCSArduino(Arduino):\n",
+    "    def __init__(self, *args, **kwargs):\n",
+    "        self.last_cmd = False  # False - Arduino LOW, True - Arduino HIGH\n",
+    "        super(MCSArduino, self).__init__(*args, **kwargs)\n",
+    "        \n",
+    "    def start_or_stop(self):\n",
+    "        self.last_cmd = not self.last_cmd\n",
+    "        self.digital[pin_diode].write(self.last_cmd)\n",
+    "        self.digital[pin_TTL].write(self.last_cmd)\n",
+    "\n",
+    "class FakeArduino():\n",
+    "    def start_or_stop(self):\n",
+    "        print(\"Fake Arduino - sending a TTL pulse\")\n",
+    "        \n",
+    "    def exit(self):\n",
+    "        print(\"Fake Arduino - exiting...\")\n",
+    "        \n",
+    "# Initialize REAL Arduino if connected\n",
+    "if cfg['MCSArduinoPort'] == 'fake':\n",
+    "    board = FakeArduino()\n",
+    "else:\n",
+    "    board = MCSArduino(cfg['MCSArduinoPort'])  # Windows - 'COM10', Linux - '/dev/ttyACM0', check /dev/tty*"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Initialize the position log file\n",
+    "\n",
+    "The following cell generates a CSV-file to which the essential data (i.e. animal position, positions of the target areas, etc.) from each cycle (video frame) of the experiment will be saved. The CSV-file (\"ExperimentID_protocol.csv\") will be saved to the session folder inside the folder containing this notebook. "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 28,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def log_frame_data(args):\n",
+    "    with open(os.path.join(save_to, experiment_id + '_protocol.csv'), 'a') as f:\n",
+    "        f.write(\",\".join([str(x) for x in args]) + \"\\n\")\n",
+    "\n",
+    "headers = [\n",
+    "    'time',       # Time stamp\n",
+    "    'animal_x',   # X-Coordinate of the subject\n",
+    "    'animal_y',   # Y-Coordinate of the subject\n",
+    "]\n",
+    "\n",
+    "log_frame_data(headers)   # saves headers to the log file"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Capture a background image\n",
+    "\n",
+    "The tracking algorithm used in this notebook compares the frames of the video feed during the experiment with an image of the empty arena to later track the position of the largest object in the arena (which usually is your animal). If you are confident in the stability of your video quality, it should suffice to capture the picture once and to skip this cell in the subsequent experiments. However, since this step only takes a few seconds, we recommend to take a new picture of the arena for each new experiment. In the preview of the video feed that will pop-up if you run the next cell, the space outside the arena is masked, so that the camera preview can also be used to check if the camera/arena are still positioned correctly. \n",
+    "\n",
+    "Before taking the picture, make sure that the conditions in your lab (especially the illumination) are the exact same as they will be during the experiments. Once you are happy with the preview of your background image, press \"c\" to capture the image. It will be saved as \"background.png\" to the session folder containing this notebook.\n",
+    "\n",
+    "This notebook will use the main camera of your system as an input device. If you have more than one camera installed (e.g. on a notebook with internal chat camera), make sure to deactivate all cameras other than the camera of your setup  prior to running the notebook. Also make sure that the video dimensions defined here match you arena dimensions defined above and the video dimensions of the video feeds that will be defined in the subsequent cells."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 29,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "if cfg['capture_background']:\n",
+    "    # Define video capture device (0 = webcam1) to capture background frame\n",
+    "    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)  # if slow https://github.com/opencv/opencv/issues/17687\n",
+    "\n",
+    "    # Set picture dimensions\n",
+    "    cap.set(cv2.CAP_PROP_FRAME_WIDTH, cfg['resolution_x'])\n",
+    "    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cfg['resolution_y'])\n",
+    "\n",
+    "    # Capture Background frame (c = capture)\n",
+    "    while(True):\n",
+    "        ret, img = cap.read()\n",
+    "\n",
+    "        # Draw the arena area\n",
+    "        img_arena = cv2.circle(img, (cfg['arena_x'], cfg['arena_y']), cfg['arena_radius'], BGR_COLOR['red'], 2)\n",
+    "\n",
+    "        # Mask the space outside the arena\n",
+    "        mask = np.zeros(shape = img.shape, dtype = \"uint8\")\n",
+    "        cv2.circle(mask, (cfg['arena_x'], cfg['arena_y']), cfg['arena_radius'], BGR_COLOR['white'], -1)\n",
+    "        img_arena = cv2.bitwise_and(src1=img_arena, src2=mask)\n",
+    "\n",
+    "        # text for buttons\n",
+    "        img_arena = cv2.putText(img_arena, \"c-capture, q-quit\", (10, 20), cv2.FONT_HERSHEY_DUPLEX, .5, BGR_COLOR['white'])\n",
+    "\n",
+    "        # Display the resulting frame\n",
+    "        cv2.imshow('Press (c)-to capture the background image', img_arena)\n",
+    "\n",
+    "        k = cv2.waitKey(33)\n",
+    "        if k == ord('c'):\n",
+    "            cv2.imwrite(os.path.join(save_to, 'background.png'), img_arena)\n",
+    "            break\n",
+    "        if k == ord('q'):\n",
+    "            break        \n",
+    "\n",
+    "    # When the background image is captured, release the capture\n",
+    "    cap.release()\n",
+    "    cv2.destroyAllWindows()\n",
+    "\n",
+    "else:\n",
+    "    shutil.copyfile(os.path.join('assets', 'background.png'), os.path.join(save_to, 'background.png'))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Start the experiment\n",
+    "\n",
+    "This cell contains code for animal tracking. We hope that the comments provided in the code suffice to understand the individual steps and to adjust them to your own setup and needs, if necessary.\n",
+    "\n",
+    "- press 's' to start recording\n",
+    "- press 's' again to stop recording\n",
+    "- press 'q' to quit\n",
+    "\n",
+    "The experiment will stop automatically if the pre-defined session duration is reached."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 49,
+   "metadata": {
+    "scrolled": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Fake Arduino - sending a TTL pulse\n",
+      "Fake Arduino - sending a TTL pulse\n",
+      "Fake Arduino - exiting...\n"
+     ]
+    }
+   ],
+   "source": [
+    "# increase FPS\n",
+    "# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/\n",
+    "# https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/\n",
+    "\n",
+    "# Define BGR colors\n",
+    "BGR_COLOR = {\n",
+    "    'red': (0,0,255),\n",
+    "    'green': (127,255,0),\n",
+    "    'blue': (255,127,0),\n",
+    "    'yellow': (0,127,255),\n",
+    "    'black': (0,0,0),\n",
+    "    'white': (255,255,255)\n",
+    "}\n",
+    "\n",
+    "# Loads current background as object img\n",
+    "background = cv2.imread(os.path.join(save_to, 'background.png'), 1)\n",
+    "\n",
+    "# Define video capture device for live-stream (0 = webcam1) and tracking\n",
+    "cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n",
+    "\n",
+    "# Set picture dimensions\n",
+    "cap.set(cv2.CAP_PROP_FRAME_WIDTH, cfg['resolution_x'])\n",
+    "cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cfg['resolution_y'])\n",
+    "\n",
+    "# Mask the space outside the arena\n",
+    "mask = np.zeros(shape = background.shape, dtype = \"uint8\")\n",
+    "cv2.circle(mask, (cfg['arena_x'], cfg['arena_y']), cfg['arena_radius'], BGR_COLOR['white'], -1)\n",
+    "\n",
+    "# Define the codec and create VideoWriter object\n",
+    "videoName = os.path.join(save_to, experiment_id + '_video.avi')\n",
+    "fourcc = cv2.VideoWriter_fourcc(*'XVID')\n",
+    "#fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n",
+    "# Make sure that the frame rate of your output appoximately matches \n",
+    "# the number of cycles per second, to avoid time lapsed output videos\n",
+    "out = cv2.VideoWriter(videoName, fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))\n",
+    "\n",
+    "# Define and start the experiment timer\n",
+    "exp_time = time.time()\n",
+    "last_timestamp = time.time()\n",
+    "frame_counter = 0\n",
+    "exp_running = False  # indicates if recording has started\n",
+    "\n",
+    "# Start the audio stream\n",
+    "stream.start()\n",
+    "\n",
+    "# Here you can choose different modes of amplitude modulation\n",
+    "ampMod = (random.randrange(2396,2962,1)/100)**np.e/10000 # Unbiased Voltage Ratio -5dB\n",
+    "# ampMod = random.randrange(5623,10001,1)/10000 # Voltage Ratio -5dB\n",
+    "# ampMod = random.randrange(3162,10001,1)/10000 # Power Ratio -5dB\n",
+    "# ampMod = 1 # No modulation\n",
+    "    \n",
+    "while(cap.isOpened() and (time.time() - exp_time) <= cfg['session_duration']):\n",
+    "    \n",
+    "    ret, frame = cap.read()\n",
+    "    if not ret == True:\n",
+    "        break\n",
+    "        \n",
+    "    maskedFrame = cv2.bitwise_and(src1=frame, src2=mask)\n",
+    "\n",
+    "    # Substracts background from current frame\n",
+    "    subject = cv2.subtract(maskedFrame, background)\n",
+    "\n",
+    "    # Converts subject to grey scale\n",
+    "    subject_gray = cv2.cvtColor(subject, cv2.COLOR_BGR2GRAY)\n",
+    "\n",
+    "    # Applies blur and thresholding to the subject\n",
+    "    kernel_size = (25,25)\n",
+    "    frame_blur = cv2.GaussianBlur(subject_gray, kernel_size, 0)\n",
+    "    _, thresh = cv2.threshold(frame_blur, 40, 255, cv2.THRESH_BINARY)\n",
+    "\n",
+    "    # Finds contours and selects the contour with the largest area\n",
+    "    contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n",
+    "\n",
+    "    if (len(contours) == 0):  # If there is no subject, the sreen is blackened\n",
+    "        subject_hull_centroid = np.zeros(frame.shape, np.uint8)\n",
+    "        #subject_hull_centroid = maskedFrame\n",
+    "        subject_hull_centroid = cv2.circle(subject_hull_centroid, (20, 80), 3, BGR_COLOR['yellow'], -1)\n",
+    "    \n",
+    "    else:  # If there is a subject, it is tracked\n",
+    "        contour = contours[np.argmax(list(map(cv2.contourArea, contours)))]\n",
+    "        M = cv2.moments(contour)\n",
+    "        if ((M['m00']) == 0):\n",
+    "            subject_hull_centroid = np.zeros(frame.shape,np.uint8)\n",
+    "            #subject_hull_centroid = maskedFrame\n",
+    "            subject_hull_centroid = cv2.circle(subject_hull_centroid, (20, 60), 3, BGR_COLOR['yellow'], -1)\n",
+    "        else:\n",
+    "            x = int(M['m10'] / M['m00'])\n",
+    "            y = int(M['m01'] / M['m00'])\n",
+    "            hull = cv2.convexHull(contour)\n",
+    "            subject_hull_centroid = maskedFrame\n",
+    "\n",
+    "        # Draws contour and centroid of the subject\n",
+    "        cv2.drawContours(subject_hull_centroid, [contour], 0, BGR_COLOR['green'], 1, cv2.LINE_AA)\n",
+    "        subject_hull_centroid = cv2.circle(subject_hull_centroid, (x,y), 3, BGR_COLOR['yellow'], -1)\n",
+    "\n",
+    "    # Dot signaling experiment waiting / running\n",
+    "    dot_color = 'red' if exp_running else 'green'\n",
+    "    subject_hull_centroid = cv2.circle(subject_hull_centroid, (20, 40), 10, BGR_COLOR[dot_color], -6)\n",
+    "\n",
+    "    # Adds a stopwatch / FPS\n",
+    "    fps = 1.0 / (time.time() - last_timestamp)\n",
+    "    subject_hull_centroid = cv2.putText(subject_hull_centroid,\n",
+    "        str('Time: %.2f; FPS: %.1f' % (time.time() - exp_time, fps) ),\n",
+    "        (10, 20), cv2.FONT_HERSHEY_DUPLEX, .5, BGR_COLOR['white'])\n",
+    "    last_timestamp = time.time()\n",
+    "\n",
+    "    frame_counter += 1\n",
+    "    k = cv2.waitKey(33)\n",
+    "    if k == ord('s'):\n",
+    "        command = \"stop\" if exp_running else \"start\"\n",
+    "        exp_running = not exp_running\n",
+    "        \n",
+    "        t1 = threading.Timer(0, board.start_or_stop, ())\n",
+    "        t1.start()\n",
+    "        \n",
+    "    if k == ord('q'):\n",
+    "        break\n",
+    "\n",
+    "    # save the detected position\n",
+    "    if exp_running:\n",
+    "        log_frame_data([time.time(), x, y])\n",
+    "    \n",
+    "        # Writes the modified frame to the video protocol\n",
+    "        out.write(subject_hull_centroid)\n",
+    "\n",
+    "        if frame_counter % delay_cycles == 0:\n",
+    "            stream.write((sound_foraging*ampMod))\n",
+    "            soundPlayed = 'foraging'\n",
+    "        else:\n",
+    "            stream.write(sound_silence)\n",
+    "            soundPlayed = 'false'  \n",
+    "        \n",
+    "    else:\n",
+    "        stream.write(sound_silence)\n",
+    "        soundPlayed = 'false'\n",
+    "        \n",
+    "    # showing a video frame in a window\n",
+    "    cv2.imshow('Press (q)-to end the experiment', subject_hull_centroid)\n",
+    "    \n",
+    "# release objects    \n",
+    "cap.release()\n",
+    "out.release()\n",
+    "cv2.destroyAllWindows()\n",
+    "board.exit()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.8"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 77 - 0
sound.ipynb

@@ -0,0 +1,77 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "4b8ede9d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from scipy.io  import wavfile             # WAV-file import filter\n",
+    "\n",
+    "import numpy as np                        # Import numpy module\n",
+    "import sounddevice as sd                  # Import sounddevice module for \"real-time\" sound playback"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "fc1520f9",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Set sample rate for audio output\n",
+    "sd.default.samplerate = 44100\n",
+    "fs = 44100          \n",
+    "\n",
+    "# Audio stream\n",
+    "stream = sd.OutputStream(samplerate=fs, channels=1, dtype='float32')\n",
+    "\n",
+    "# Cycle counter: sound is played every \"delay_cycles\" cycles (video frames)\n",
+    "delay_cycles = 3  \n",
+    "\n",
+    "# Open sound files\n",
+    "sound_foraging = wavfile.read(os.path.join('assets', '6000Hz-short-68.wav'))[1]\n",
+    "sound_distractor = wavfile.read(os.path.join('assets', '10kHz-short-68.wav'))[1]\n",
+    "sound_target = wavfile.read(os.path.join('assets', '4000Hz-short-68.wav'))[1]\n",
+    "sound_silence = wavfile.read(os.path.join('assets', 'silence-short-68.wav'))[1]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "6a8f7f52",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "            if frameCounter % delayLength == 0:\n",
+    "                stream.write((distractorSoundOdd1*ampMod))\n",
+    "                soundPlayed = 'true-DistractorOdd1'\n",
+    "            else:\n",
+    "                stream.write(silenceSound)\n",
+    "                soundPlayed = 'false'  "
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.8"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}