Scheduled service maintenance on November 22


On Friday, November 22, 2024, between 06:00 CET and 18:00 CET, GIN services will undergo planned maintenance. Extended service interruptions should be expected. We will try to keep downtimes to a minimum, but recommend that users avoid critical tasks, large data uploads, or DOI requests during this time.

We apologize for any inconvenience.

Forráskód Böngészése

gin commit from lime

New files: 6
Reema Gupta 4 hete
szülő
commit
7dbaa262cd
6 módosított fájl, 258 hozzáadás és 0 törlés
  1. 218 0
      dataset_preparation.ipynb
  2. 15 0
      environment.yml
  3. 5 0
      metadata.csv
  4. BIN
      neural_data.nix
  5. 11 0
      session1_spikes.csv
  6. 9 0
      session2_spikes.csv

+ 218 - 0
dataset_preparation.ipynb

@@ -0,0 +1,218 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Processing session1...\n",
+      "  Adding Trial 1...\n",
+      "  Adding Trial 2...\n",
+      "Processing session2...\n",
+      "  Adding Trial 3...\n",
+      "  Adding Trial 4...\n",
+      "NIX file created and annotated with metadata.\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Import necessary libraries\n",
+    "import pandas as pd\n",
+    "import nixio as nix\n",
+    "import numpy as np\n",
+    "\n",
+    "# ---------------------------\n",
+    "# Part 1: Read CSV Files\n",
+    "# ---------------------------\n",
+    "\n",
+    "# Read the metadata CSV file\n",
+    "metadata_df = pd.read_csv('metadata.csv')\n",
+    "\n",
+    "# Read spike times CSV files for each session\n",
+    "session_files = ['session1_spikes.csv', 'session2_spikes.csv']\n",
+    "spike_dfs = {}\n",
+    "\n",
+    "for session_file in session_files:\n",
+    "    session_id = session_file.split('_')[0]  # Extract session ID from file name\n",
+    "    spike_dfs[session_id] = pd.read_csv(session_file)\n",
+    "\n",
+    "# ---------------------------\n",
+    "# Part 2: Create NIX File\n",
+    "# ---------------------------\n",
+    "\n",
+    "# Create a new NIX file\n",
+    "nix_file = nix.File.open('neural_data.nix', nix.FileMode.Overwrite)\n",
+    "\n",
+    "# ---------------------------\n",
+    "# Part 3: Add Data to NIX File\n",
+    "# ---------------------------\n",
+    "\n",
+    "# Iterate over sessions\n",
+    "for session_id, spike_df in spike_dfs.items():\n",
+    "    print(f'Processing {session_id}...')\n",
+    "\n",
+    "    # Create a block for the session\n",
+    "    block = nix_file.create_block(f'Session {session_id}', 'nix.session')\n",
+    "    \n",
+    "    # Get trials for this session\n",
+    "    session_trials = metadata_df[metadata_df['session_id'] == session_id]\n",
+    "    \n",
+    "    # Iterate over trials\n",
+    "    for index, trial in session_trials.iterrows():\n",
+    "        trial_id = trial['trial_id']\n",
+    "        print(f'  Adding Trial {trial_id}...')\n",
+    "        \n",
+    "        # Create a group for the trial\n",
+    "        trial_group = block.create_group(f'Trial {int(trial_id)}', 'nix.trial')\n",
+    "        \n",
+    "        # Get spike times for this trial\n",
+    "        trial_spikes = spike_df[spike_df['trial_id'] == trial_id]\n",
+    "        \n",
+    "        # Group spike times by unit_id (channel)\n",
+    "        units = trial_spikes['unit_id'].unique()\n",
+    "        \n",
+    "        for unit in units:\n",
+    "            # Get spike times for the unit\n",
+    "            unit_spike_times = trial_spikes[trial_spikes['unit_id'] == unit]['spike_time'].values\n",
+    "            \n",
+    "            # Create a data array for the spike times with a unique name\n",
+    "            data_array = block.create_data_array(f'Trial {int(trial_id)} Unit {int(unit)} Spike Times', 'nix.data.spike_times', data=unit_spike_times)\n",
+    "            data_array.unit = 's'  # Set the unit of the data\n",
+    "            data_array.label = f'Unit {int(unit)}'  # Set the label for the data array\n",
+    "            \n",
+    "            # Add the data array to the trial group\n",
+    "            trial_group.data_arrays.append(data_array)\n",
+    "        \n",
+    "        # Add trial metadata to the trial group\n",
+    "        trial_metadata = nix_file.create_section(f'Trial {int(trial_id)} Metadata', 'nix.metadata.trial')\n",
+    "        trial_metadata['start_time'] = trial['start_time']\n",
+    "        trial_metadata['end_time'] = trial['end_time']\n",
+    "        trial_metadata['target_1'] = [trial['target_1_x'], trial['target_1_y']]\n",
+    "        trial_metadata['target_2'] = [trial['target_2_x'], trial['target_2_y']]\n",
+    "        trial_metadata['reward_value'] = trial['reward_value']\n",
+    "        \n",
+    "        # Link metadata to the trial group\n",
+    "        trial_group.metadata = trial_metadata\n",
+    "\n",
+    "# ---------------------------\n",
+    "# Part 4: Close NIX File\n",
+    "# ---------------------------\n",
+    "\n",
+    "# Close the NIX file\n",
+    "nix_file.close()\n",
+    "\n",
+    "print('NIX file created and annotated with metadata.')\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Exploring the NIX File\n",
+    "\n",
+    "After creating the NIX file, you can explore it using NIXIO or other tools that support the NIX format, such as for GUI experience, [HDF5 viewer](https://www.hdfgroup.org/downloads/hdfview/)."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Block: Session session1\n",
+      "  Group: Trial 1\n",
+      "    DataArray: Trial 1 Unit 1 Spike Times, Label: Unit 1, Unit: s\n",
+      "    DataArray: Trial 1 Unit 2 Spike Times, Label: Unit 2, Unit: s\n",
+      "    Metadata for Trial 1:\n",
+      "      start_time: (0.0,)\n",
+      "      end_time: (10.0,)\n",
+      "      target_1: (5.0, 5.0)\n",
+      "      target_2: (-5.0, 5.0)\n",
+      "      reward_value: (10,)\n",
+      "  Group: Trial 2\n",
+      "    DataArray: Trial 2 Unit 1 Spike Times, Label: Unit 1, Unit: s\n",
+      "    DataArray: Trial 2 Unit 2 Spike Times, Label: Unit 2, Unit: s\n",
+      "    Metadata for Trial 2:\n",
+      "      start_time: (11.0,)\n",
+      "      end_time: (21.0,)\n",
+      "      target_1: (5.0, 5.0)\n",
+      "      target_2: (-5.0, 5.0)\n",
+      "      reward_value: (5,)\n",
+      "Block: Session session2\n",
+      "  Group: Trial 3\n",
+      "    DataArray: Trial 3 Unit 1 Spike Times, Label: Unit 1, Unit: s\n",
+      "    DataArray: Trial 3 Unit 2 Spike Times, Label: Unit 2, Unit: s\n",
+      "    Metadata for Trial 3:\n",
+      "      start_time: (0.0,)\n",
+      "      end_time: (9.5,)\n",
+      "      target_1: (5.0, 5.0)\n",
+      "      target_2: (-5.0, 5.0)\n",
+      "      reward_value: (15,)\n",
+      "  Group: Trial 4\n",
+      "    DataArray: Trial 4 Unit 1 Spike Times, Label: Unit 1, Unit: s\n",
+      "    DataArray: Trial 4 Unit 2 Spike Times, Label: Unit 2, Unit: s\n",
+      "    Metadata for Trial 4:\n",
+      "      start_time: (10.0,)\n",
+      "      end_time: (20.0,)\n",
+      "      target_1: (5.0, 5.0)\n",
+      "      target_2: (-5.0, 5.0)\n",
+      "      reward_value: (10,)\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Open the NIX file\n",
+    "nix_file = nix.File.open('neural_data.nix', nix.FileMode.ReadOnly)\n",
+    "\n",
+    "# List all blocks (sessions)\n",
+    "for block in nix_file.blocks:\n",
+    "    print(f'Block: {block.name}')\n",
+    "    \n",
+    "    # List all groups (trials) in the block\n",
+    "    for group in block.groups:\n",
+    "        print(f'  Group: {group.name}')\n",
+    "        \n",
+    "        # List all data arrays (units) in the group\n",
+    "        for da in group.data_arrays:\n",
+    "            print(f'    DataArray: {da.name}, Label: {da.label}, Unit: {da.unit}')\n",
+    "        \n",
+    "        # Access metadata\n",
+    "        if group.metadata:\n",
+    "            print(f'    Metadata for {group.name}:')\n",
+    "            for prop in group.metadata.props:\n",
+    "                print(f'      {prop.name}: {prop.values}')\n",
+    "                \n",
+    "# Close the NIX file\n",
+    "nix_file.close()\n"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "andani-dataset4",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.13.0"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 15 - 0
environment.yml

@@ -0,0 +1,15 @@
+name: andani-dataset4
+channels:
+  - conda-forge
+  - defaults
+dependencies:
+  - python
+  - pandas
+  - numpy
+  - matplotlib
+  - jupyter
+  - jupyterlab
+  - pip
+  - pip:
+    - quantities
+    - nixio>=1.5.3

+ 5 - 0
metadata.csv

@@ -0,0 +1,5 @@
+trial_id,session_id,start_time,end_time,target_1_x,target_1_y,target_2_x,target_2_y,reward_value
+1,session1,0.0,10.0,5.0,5.0,-5.0,5.0,10
+2,session1,11.0,21.0,5.0,5.0,-5.0,5.0,5
+3,session2,0.0,9.5,5.0,5.0,-5.0,5.0,15
+4,session2,10.0,20.0,5.0,5.0,-5.0,5.0,10

BIN
neural_data.nix


+ 11 - 0
session1_spikes.csv

@@ -0,0 +1,11 @@
+unit_id,trial_id,spike_time
+1,1,0.001
+1,1,0.005
+1,1,0.010
+2,1,0.002
+2,1,0.007
+2,1,0.012
+1,2,11.001
+1,2,11.005
+2,2,11.002
+2,2,11.007

+ 9 - 0
session2_spikes.csv

@@ -0,0 +1,9 @@
+unit_id,trial_id,spike_time
+1,3,0.003
+1,3,0.008
+2,3,0.004
+2,3,0.009
+1,4,10.001
+1,4,10.006
+2,4,10.002
+2,4,10.007