Browse Source

Initial commit of data and code of the Reach-to-Grasp Experiment.

Michael Denker 6 years ago
parent
commit
d1f7f85689
100 changed files with 28128 additions and 0 deletions
  1. 72 0
      .gitignore
  2. 50 0
      README.md
  3. 95 0
      cloudberry.yml
  4. 26 0
      code/LICENSE.txt
  5. 724 0
      code/data_overview_1.py
  6. 379 0
      code/data_overview_2.py
  7. 1 0
      code/elephant/AUTHORS.txt
  8. 10 0
      code/elephant/LICENSE.txt
  9. 23 0
      code/elephant/README.rst
  10. 107 0
      code/elephant/continuous_integration/install.sh
  11. 19 0
      code/elephant/continuous_integration/test_script.sh
  12. 153 0
      code/elephant/doc/Makefile
  13. 43 0
      code/elephant/doc/authors.rst
  14. 310 0
      code/elephant/doc/conf.py
  15. 223 0
      code/elephant/doc/developers_guide.rst
  16. 34 0
      code/elephant/doc/environment.yml
  17. BIN
      code/elephant/doc/images/elephant_favicon.ico
  18. BIN
      code/elephant/doc/images/elephant_logo.png
  19. BIN
      code/elephant/doc/images/elephant_logo_sidebar.png
  20. BIN
      code/elephant/doc/images/elephant_structure.png
  21. BIN
      code/elephant/doc/images/tutorials/tutorial_1_figure_1.png
  22. BIN
      code/elephant/doc/images/tutorials/tutorial_1_figure_2.png
  23. 44 0
      code/elephant/doc/index.rst
  24. 107 0
      code/elephant/doc/install.rst
  25. 190 0
      code/elephant/doc/make.bat
  26. 25 0
      code/elephant/doc/modules.rst
  27. 113 0
      code/elephant/doc/overview.rst
  28. 6 0
      code/elephant/doc/reference/asset.rst
  29. 6 0
      code/elephant/doc/reference/conversion.rst
  30. 6 0
      code/elephant/doc/reference/cubic.rst
  31. 6 0
      code/elephant/doc/reference/kernels.rst
  32. 6 0
      code/elephant/doc/reference/neo_tools.rst
  33. 6 0
      code/elephant/doc/reference/pandas_bridge.rst
  34. 13 0
      code/elephant/doc/reference/signal_processing.rst
  35. 6 0
      code/elephant/doc/reference/spectral.rst
  36. 12 0
      code/elephant/doc/reference/spike_train_correlation.rst
  37. 8 0
      code/elephant/doc/reference/spike_train_dissimilarity.rst
  38. 11 0
      code/elephant/doc/reference/spike_train_generation.rst
  39. 12 0
      code/elephant/doc/reference/spike_train_surrogates.rst
  40. 18 0
      code/elephant/doc/reference/sta.rst
  41. 6 0
      code/elephant/doc/reference/statistics.rst
  42. 6 0
      code/elephant/doc/reference/unitary_event_analysis.rst
  43. 96 0
      code/elephant/doc/release_notes.rst
  44. 6 0
      code/elephant/doc/requirements.txt
  45. 85 0
      code/elephant/doc/tutorial.rst
  46. 30 0
      code/elephant/elephant/__init__.py
  47. 1753 0
      code/elephant/elephant/asset.py
  48. 814 0
      code/elephant/elephant/conversion.py
  49. 221 0
      code/elephant/elephant/cubic.py
  50. 332 0
      code/elephant/elephant/current_source_density.py
  51. 1059 0
      code/elephant/elephant/current_source_density_src/KCSD.py
  52. 96 0
      code/elephant/elephant/current_source_density_src/README.md
  53. 3 0
      code/elephant/elephant/current_source_density_src/__init__.py
  54. 201 0
      code/elephant/elephant/current_source_density_src/basis_functions.py
  55. 887 0
      code/elephant/elephant/current_source_density_src/icsd.py
  56. BIN
      code/elephant/elephant/current_source_density_src/test_data.mat
  57. 362 0
      code/elephant/elephant/current_source_density_src/utility_functions.py
  58. 525 0
      code/elephant/elephant/kernels.py
  59. 199 0
      code/elephant/elephant/neo_tools.py
  60. 612 0
      code/elephant/elephant/pandas_bridge.py
  61. 334 0
      code/elephant/elephant/signal_processing.py
  62. 467 0
      code/elephant/elephant/spectral.py
  63. 601 0
      code/elephant/elephant/spike_train_correlation.py
  64. 412 0
      code/elephant/elephant/spike_train_dissimilarity.py
  65. 970 0
      code/elephant/elephant/spike_train_generation.py
  66. 523 0
      code/elephant/elephant/spike_train_surrogates.py
  67. 317 0
      code/elephant/elephant/sta.py
  68. 1157 0
      code/elephant/elephant/statistics.py
  69. 0 0
      code/elephant/elephant/test/__init__.py
  70. 64 0
      code/elephant/elephant/test/make_spike_extraction_test_data.py
  71. BIN
      code/elephant/elephant/test/spike_extraction_test_data.npz
  72. 228 0
      code/elephant/elephant/test/test_asset.py
  73. 505 0
      code/elephant/elephant/test/test_conversion.py
  74. 157 0
      code/elephant/elephant/test/test_csd.py
  75. 151 0
      code/elephant/elephant/test/test_cubic.py
  76. 1245 0
      code/elephant/elephant/test/test_icsd.py
  77. 183 0
      code/elephant/elephant/test/test_kcsd.py
  78. 129 0
      code/elephant/elephant/test/test_kernels.py
  79. 1382 0
      code/elephant/elephant/test/test_neo_tools.py
  80. 2684 0
      code/elephant/elephant/test/test_pandas_bridge.py
  81. 572 0
      code/elephant/elephant/test/test_signal_processing.py
  82. 309 0
      code/elephant/elephant/test/test_spectral.py
  83. 561 0
      code/elephant/elephant/test/test_spike_train_correlation.py
  84. 520 0
      code/elephant/elephant/test/test_spike_train_dissimilarity.py
  85. 580 0
      code/elephant/elephant/test/test_spike_train_generation.py
  86. 319 0
      code/elephant/elephant/test/test_spike_train_surrogates.py
  87. 414 0
      code/elephant/elephant/test/test_sta.py
  88. 554 0
      code/elephant/elephant/test/test_statistics.py
  89. 348 0
      code/elephant/elephant/test/test_unitary_event_analysis.py
  90. 805 0
      code/elephant/elephant/unitary_event_analysis.py
  91. 2 0
      code/elephant/readthedocs.yml
  92. 14 0
      code/elephant/requirements.txt
  93. 44 0
      code/elephant/setup.py
  94. 38 0
      code/example.m
  95. 253 0
      code/example.py
  96. 10 0
      code/load_local_neo_odml_elephant.py
  97. 877 0
      code/neo_utils.py
  98. 241 0
      code/odml_utils.py
  99. 1 0
      code/python-neo/AUTHORS
  100. 0 0
      code/python-neo/CITATION.txt

+ 72 - 0
.gitignore

@@ -0,0 +1,72 @@
+#########################################
+# Editor temporary/working/backup files #
+.#*
+[#]*#
+*~
+*$
+*.bak
+.coverage
+*.kdev4
+*.komodoproject
+.mr.developer.cfg
+nosetests.xml
+*.orig
+.project
+.pydevproject
+.settings
+*.tmp*
+.idea
+
+# Compiled source #
+###################
+*.a
+*.com
+*.class
+*.dll
+*.exe
+*.mo
+*.o
+*.py[ocd]
+*.so
+
+# Python files #
+################
+# setup.py working directory
+build
+# other build directories
+bin
+parts
+var
+lib
+lib64
+# sphinx build directory
+doc/_build
+# setup.py dist directory
+dist
+sdist
+# Egg metadata
+*.egg-info
+*.egg
+*.EGG
+*.EGG-INFO
+eggs
+develop-eggs
+# tox testing tool
+.tox
+# Packages
+.installed.cfg
+pip-log.txt
+# coverage
+cover
+
+# OS generated files #
+######################
+.directory
+.gdb_history
+.DS_Store?
+ehthumbs.db
+Icon?
+Thumbs.db
+
+# Things specific to this project #
+###################################

+ 50 - 0
README.md

@@ -0,0 +1,50 @@
+# Massively parallel multi-electrode recordings of macaque motor cortex during an instructed delayed reach-to-grasp task
+
+## Summary
+We provide two electrophysiological datasets recorded via a 10-by-10 multi-electrode array chronically implanted in the motor cortex of two macaque monkeys during an instructed delayed reach-to-grasp task. The datasets contain the continuous measure of extracellular potentials at each electrode sampled at 30 kHz, the local field potentials sampled at 1 kHz and the timing of the online and offline extracted spike times. It also includes the timing of several task-related and behavioral events recorded along with the electrophysiological data. Finally, the datasets provide a complete set of metadata structured in a standardized format. These metadata allow easy access to detailed information about the datasets such as the settings of the recording hardware, the array specifications, the location of the implant in the motor cortex, information about the monkeys, or the offline spike sorting.
+The two datasets can be exploited to address crucial issues in neurophysiology such as: What are the principles of neural interactions in a local cortical network and how are these interactions modulated during a well-described behavioral task?  How different neuronal signals such as single-unit activity, multi-unit activity or LFPs relate to each other? Which spike sorting methods provide the best estimate of single unit activity?  
+
+## Repository structure
+
+### Directory datasets
+Contains the two data sets `i140703-001` and `l101210-001`. Original data files are provided in the Blackrock file format (.nev, .ns2, .ns5, .ns6, .ccf), e.g., `i140703-001.nev`, `i140703-001.ns6`,.... The files `i140703-001-03.nev` and `l101210-001-02.nev` contain offline spike sorted data for both datasets as opposed to the original recordings `i140703-001.nev` and `l101210-001.nev` which contain the same spikes, but unreliable sorting that should not be used. The files `i140703-001.odml` and `l101210-001.odml` contain extensive metadata describing the datasets in the odML format. The Excel files `i140703-001.xls` and `l101210-001.xls` contain the same information as in the odML for easy reading and browsing, however, they are not used by the loading routines. The odml.xsl is an XML schema that is required for viewing the odML files with a web browser.
+
+### Directory datasets_matlab
+Contains the data and metadata output of the Python loading routines in the MATLAB .mat file format. These files are provided for convenience for MATLAB users, however, note that these files are not the original data files and contain a condensed, interpreted subset of the original data. Due to size restrictions of the MATLAB file format, the files `i140703-001_lfp-spikes.mat` and `l101210-001 _lfp-spikes.mat` contain only spikes and LFP data (for monkey N), while raw data is saved separately for each channel in correspondingly named files.
+
+### Directory code
+Contains example code to help in loading and analyzing the data. The file `examply.py` is a Python script that acts as a tutorial for loading and plotting data. The scripts `data_overview_1.py` and `data_overview_2.py` reproduce the plots of the data found  in the publication. The files `neo_utils.py` and `odml_utils.py` contain useful utility routines to work with data and metadata. Finally, the file `example.m` contains a rudimentary MATLAB script demonstrating how to use the data provided in the .mat files.
+
+To run the Python example code, download the release of this repository, and install the requirements in `code/requirements.txt`. Then, run the example via
+```
+   cd code
+   python example.py
+```
+The script produces a figure saved in three different graphics file formats.
+
+### Directory code/reachgraspio
+Contains the file `reachgraspio.py`, which contains the loading routine specific to the Reach-to-Grasp experiments in this repository. This loading routine merges the recorded data with metadata information from the odML files into a common Neo object. It is recommended that this loading routine is used in combination with the odML and Neo libraries (see below) to work on the data.
+
+### Further subdirectories of code
+The subdirectories `python-neo`, `python-odml`, and `elephant` contain snapshots of the Neo[1], odML[2], and Elephant[3] libraries, respectively, that are required by the example scripts and the reachgraspio loading routine. In short, Neo provides the data model, generic Blackrock loading routines, and APIs used to load the data; odML provides an API to handle the metadata files; and Elephant is a library for the analysis of neuronal data based on the Neo data model that is used by the example script for filtering raw signals to obtain offline filtered LFPs. By modifying the file `load_local_neo_odml_elephant.py` in the code directory it is possible to instruct the example scripts to use system-wide installed versions of these libraries instead of the static snapshots. Note however, that future versions of these libraries may requires adapted versions of the `reachgraspio.py` loading routine (see Updates below).
+* [1] https://github.com/NeuralEnsemble/python-neo
+* [2] https://github.com/G-Node/python-odml
+* [3] https://github.com/NeuralEnsemble/elephant
+
+## Updates
+Updated versions of the codes will be provided at:
+https://web.gin.g-node.org/INT/multielectrode_grasp
+This includes, in particular, the loading routine reachgraspio.py, which may need to be adapted as new versions of the Neo and odML libraries become available.
+
+## Related Publications
+* Riehle, A., Wirtssohn, S., Grün, S., & Brochier, T. (2013). Mapping the spatio-temporal structure of motor cortical LFP and spiking activities during reach-to-grasp movements. Frontiers in Neural Circuits, 7, 48. https://doi.org/10.3389/fncir.2013.00048
+* Milekovic, T., Truccolo, W., Grün, S., Riehle, A., & Brochier, T. (2015). Local field potentials in primate motor cortex encode grasp kinetic parameters. NeuroImage, 114, 338–355. https://doi.org/10.1016/j.neuroimage.2015.04.008
+* Torre, E., Quaglio, P., Denker, M., Brochier, T., Riehle, A., & Grun, S. (2016). Synchronous spike patterns in macaque motor cortex during an instructed-delay reach-to-grasp task. Journal of Neuroscience, 36(32), 8329–8340. https://doi.org/10.1523/JNEUROSCI.4375-15.2016
+* Zehl, L., Jaillet, F., Stoewer, A., Grewe, J., Sobolev, A., Wachtler, T., Brochier, T., Riehle, A., Denker, M., & Grün, S. (2016). Handling Metadata in a Neurophysiology Laboratory. Frontiers in Neuroinformatics, 10, 26. https://doi.org/10.3389/fninf.2016.00026
+* Denker, M., Zehl, L., Kilavik, B. E., Diesmann, M., Brochier, T., Riehle, A., & Grün, S. (2017). LFP beta amplitude is predictive of mesoscopic spatio-temporal phase patterns, 1703.09488 [q-NC]. https://arxiv.org/abs/1703.09488
+
+## Licensing
+<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">Massively parallel multi-electrode recordings of macaque motor cortex during an instructed delayed reach-to-grasp task</span> in the directories `datasets` and `datasets_matlab` by <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName">Institut de Neurosciences de la Timone (INT), UMR 7289, CNRS – Aix Marseille Université, Marseille, France and Institute of Neuroscience and Medicine (INM-6), Forschungszentrum Jülich, Jülich, Germany</span> is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>.
+
+All code in the directories `code`, `code/python-odml`, `code/python-neo`, `code/elephant`, and `code/reachgraspio` are each published under the BSD 3 clause licenses. See the `LICENSE.txt` or `LICENSE` files in the corresponding directories for the full license.
+

+ 95 - 0
cloudberry.yml

@@ -0,0 +1,95 @@
+## Required fields
+# The main researchers involved working on the resource,
+# or the authors of the publication in priority order.
+# May be a corporate/institutional or personal name.
+authors:
+  -
+    firstname: "Thomas"
+    lastname: "Brochier"
+    affiliation: "Institut de Neurosciences de la Timone (INT), UMR 7289, CNRS – Aix Marseille Université, Marseille, France"
+    id: "orcid.org/0000-0001-6948-1234"
+  -
+    firstname: "Lyuba"
+    lastname: "Zehl"
+    affiliation: "Institute of Neuroscience and Medicine (INM-6) and Institute for Advanced Simulation (IAS-6) and JARA BRAIN Institute I, Jülich Research Centre, Jülich, Germany"
+    id: "orcid.org/0000-0002-5947-9939"
+  -
+    firstname: "Yaoyao"
+    lastname: "Hao"
+    affiliation: "Institut de Neurosciences de la Timone (INT), UMR 7289, CNRS – Aix Marseille Université, Marseille, France"
+    id: "orcid.org/0000-0002-9390-4660"
+  -
+    firstname: "Margaux"
+    lastname: "Duret"
+    affiliation: "Institut de Neurosciences de la Timone (INT), UMR 7289, CNRS – Aix Marseille Université, Marseille, France"
+    id: "orcid.org/0000-0002-6557-748X"
+  -
+    firstname: "Julia"
+    lastname: "Sprenger"
+    affiliation: "Institute of Neuroscience and Medicine (INM-6) and Institute for Advanced Simulation (IAS-6) and JARA BRAIN Institute I, Jülich Research Centre, Jülich, Germany"
+    id: "orcid.org/0000-0002-9986-7477"
+  -
+    firstname: "Michael"
+    lastname: "Denker"
+    affiliation: "Institute of Neuroscience and Medicine (INM-6) and Institute for Advanced Simulation (IAS-6) and JARA BRAIN Institute I, Jülich Research Centre, Jülich, Germany"
+    id: "orcid.org/0000-0003-1255-7300"
+  -
+    firstname: "Sonja"
+    lastname: "Grün"
+    affiliation: "Institute of Neuroscience and Medicine (INM-6) and Institute for Advanced Simulation (IAS-6) and JARA BRAIN Institute I, Jülich Research Centre, Jülich, Germany"
+    id: "orcid.org/0000-0003-2829-2220"
+  -
+    firstname: "Alexa"
+    lastname: "Riehle"
+    affiliation: "Institut de Neurosciences de la Timone (INT), UMR 7289, CNRS – Aix Marseille Université, Marseille, France"
+
+# A name or title to describe the published resource.
+title: "Massively parallel multi-electrode recordings of macaque motor cortex during an instructed delayed reach-to-grasp task"
+
+# Any additional information. It is best practice to supply a description for the resource.
+description: |
+    We provide two electrophysiological datasets recorded via a 10-by-10 multi-electrode array chronically implanted in the motor cortex of two macaque monkeys during an instructed delayed reach-to-grasp task. The datasets contain the continuous measure of extracellular potentials at each electrode sampled at 30 kHz, the local field potentials sampled at 1 kHz and the timing of the online and offline extracted spike times. It also includes the timing of several task and behavioral events recorded along the electrophysiological data. Finally, the datasets provide a complete set of metadata structured in a standardized format. These metadata allow easy access to detailed information about the datasets such as the settings of the recording hardware, the array specifications, the location of the implant in the motor cortex, information about the monkeys, or the offline spike sorting.
+
+# List of keywords the resource should be associated with.
+keywords:
+  - Neuroscience
+  - Electrophysiology
+  - Utah Array
+  - Spikes
+  - Local Field Potential
+  - Macaque
+  - Motor Cortex
+  
+# Any rights information for this resource. Please provide both a license name and a link to the license.
+license:
+  name: "CC-BY"
+  url: "http://creativecommons.org/licenses/by/4.0/"
+
+## Optional Fields
+
+# Any funding reference for this resource.
+funding:
+  - "Helmholtz Association, Supercomputing and Modeling for the Human Brain"
+  - "EU, EU.604102"
+  - "EU, EU.720270"
+  - "DFG, DFG.GR 1753/4-2"
+  - "DFG, DFG.DE 2175/2-1"
+  - "RIKEN-CNRS, Collaborative Research Agreement"
+  - "ANR, GRASP"
+  - "CNRS, PEPS"
+  - "CNRS, Neuro_IC2010"
+  - "DAAD"
+  - "LIA Vision for Action"
+  
+
+# refType might be: IsCitedBy, IsSupplementTo, IsReferencedBy, IsPartOf
+# for further valid types see https://schema.datacite.org/meta/kernel-4
+references:
+  -
+    doi: "10.3389/fninf.2016.00026"
+    reftype: "HasMetadata"
+    name: "Zehl, L., Jaillet, F., Stoewer, A., Grewe, J., Sobolev, A., Wachtler, T., … Grün, S. (2016). Handling Metadata in a Neurophysiology Laboratory. Frontiers in Neuroinformatics, 10, 26."
+  -
+    doi: "10.3389/fncir.2013.00048"
+    reftype: "HasMetadata"
+    name: "Riehle, A., Wirtssohn, S., Grün, S., & Brochier, T. (2013). Mapping the spatio-temporal structure of motor cortical LFP and spiking activities during reach-to-grasp movements. Frontiers in Neural Circuits, 7, 48"

+ 26 - 0
code/LICENSE.txt

@@ -0,0 +1,26 @@
+Copyright (c) 2017, Institute of Neuroscience and Medicine (INM-6),
+Forschungszentrum Juelich, Germany
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+* Neither the names of the copyright holders nor the names of the contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 724 - 0
code/data_overview_1.py

@@ -0,0 +1,724 @@
+# -*- coding: utf-8 -*-
+"""
+Code for generating the first data figure in the manuscript.
+
+Authors: Julia Sprenger, Lyuba Zehl, Michael Denker
+
+
+Copyright (c) 2017, Institute of Neuroscience and Medicine (INM-6),
+Forschungszentrum Juelich, Germany
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+* Neither the names of the copyright holders nor the names of the contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+# This loads the Neo and odML libraries shipped with this code. For production
+# use, please use the newest releases of odML and Neo.
+import load_local_neo_odml_elephant
+
+import os
+
+import numpy as np
+from scipy import stats
+import quantities as pq
+import matplotlib.pyplot as plt
+
+from matplotlib import gridspec, ticker
+
+from reachgraspio import reachgraspio
+
+import odml.tools
+
+import neo_utils
+import odml_utils
+
+
+# =============================================================================
+# Define data and metadata directories
+# =============================================================================
+
+
+def get_monkey_datafile(monkey):
+    if monkey == "Lilou":
+        return "l101210-001"  # ns2 (behavior) and ns5 present
+    elif monkey == "Nikos2":
+        return "i140703-001"  # ns2 and ns6 present
+    else:
+        return ""
+
+
+# Enter your dataset directory here
+datasetdir = "../datasets/"
+
+trialtype_colors = {
+    'SGHF': 'MediumBlue', 'SGLF': 'Turquoise',
+    'PGHF': 'DarkGreen', 'PGLF': 'YellowGreen',
+    'LFSG': 'Orange', 'LFPG': 'Yellow',
+    'HFSG': 'DarkRed', 'HFPG': 'OrangeRed',
+    'SGSG': 'SteelBlue', 'PGPG': 'LimeGreen',
+    'NONE': 'k', 'PG': 'k', 'SG': 'k', 'LF': 'k', 'HF': 'k'}
+
+event_colors = {
+    'TS-ON': 'Gray',  # 'TS-OFF': 'Gray',
+    'WS-ON': 'Gray',  # 'WS-OFF': 'Gray',
+    'CUE-ON': 'Gray',
+    'CUE-OFF': 'Gray',
+    'GO-ON': 'Gray',  # 'GO-OFF': 'Gray',
+    #    'GO/RW-OFF': 'Gray',
+    'SR': 'Gray',  # 'SR-REP': 'Gray',
+    'RW-ON': 'Gray',  # 'RW-OFF': 'Gray',
+    'STOP': 'Gray'}
+
+
+# =============================================================================
+# Plot helper functions
+# =============================================================================
+
+
+def force_aspect(ax, aspect=1):
+    ax.set_aspect(abs(
+        (ax.get_xlim()[1] - ax.get_xlim()[0]) /
+        (ax.get_ylim()[1] - ax.get_ylim()[0])) / aspect)
+
+
+def get_arraygrid(blackrock_elid_list, chosen_el, rej_el=None):
+    if rej_el is None:
+        rej_el = []
+    array_grid = np.zeros((10, 10))
+    for m in range(10):
+        for n in range(10):
+            idx = (9 - m) * 10 + n
+            bl_id = blackrock_elid_list[idx]
+            if bl_id == -1:
+                array_grid[m, n] = 0.7
+            elif bl_id == chosen_el:
+                array_grid[m, n] = -0.7
+            elif bl_id in rej_el:
+                array_grid[m, n] = -0.35
+            else:
+                array_grid[m, n] = 0
+    return np.ma.array(array_grid, mask=np.isnan(array_grid))
+
+
+# =============================================================================
+# Load data and metadata for a monkey
+# =============================================================================
+# CHANGE this parameter to load data of the different monkeys
+# monkey = 'Lilou'
+monkey = 'Nikos2'
+
+nsx_none = {'Lilou': None, 'Nikos2': None}
+nsx_lfp = {'Lilou': 2, 'Nikos2': 2}
+nsx_raw = {'Lilou': 5, 'Nikos2': 6}
+chosen_el = {'Lilou': 71, 'Nikos2': 63}
+chosen_units = {'Lilou': range(1, 5), 'Nikos2': range(1, 5)}
+
+datafile = get_monkey_datafile(monkey)
+
+session = reachgraspio.ReachGraspIO(
+    filename=os.path.join(datasetdir, datafile),
+    odml_directory=datasetdir,
+    verbose=False)
+
+bl_lfp = session.read_block(
+    index=None,
+    name=None,
+    description=None,
+    nsx_to_load=nsx_lfp[monkey],
+    n_starts=None,
+    n_stops=None,
+    channels='all',
+    units=chosen_units[monkey],
+    load_waveforms=False,
+    load_events=True,
+    scaling='voltage',
+    lazy=False,
+    cascade=True)
+
+bl_raw = session.read_block(
+    index=None,
+    name=None,
+    description=None,
+    nsx_to_load=nsx_raw[monkey],
+    n_starts=None,
+    n_stops=None,
+    channels=chosen_el[monkey],
+    units=chosen_units[monkey],
+    load_waveforms=True,
+    load_events=True,
+    scaling='voltage',
+    lazy=False,
+    cascade=True)
+
+seg_raw = bl_raw.segments[0]
+seg_lfp = bl_lfp.segments[0]
+
+# Displaying loaded data structure as string output
+print "\nBlock"
+print 'Attributes ', bl_raw.__dict__.keys()
+print 'Annotations', bl_raw.annotations
+print "\nSegment"
+print 'Attributes ', seg_raw.__dict__.keys()
+print 'Annotations', seg_raw.annotations
+print "\nEvents"
+for x in seg_raw.events:
+    print '\tEvent with name', x.name
+    print '\t\tAttributes ', x.__dict__.keys()
+    print '\t\tAnnotation keys', x.annotations.keys()
+    print '\t\ttimes', x.times[:20]
+    for anno_key in ['trial_id', 'trial_timestamp_id', 'trial_event_labels',
+                     'trial_reject_IFC']:
+        print '\t\t'+anno_key, x.annotations[anno_key][:20]
+
+print "\nChannels"
+for x in bl_raw.channel_indexes:
+    print '\tChannel with name', x.name
+    print '\t\tAttributes ', x.__dict__.keys()
+    print '\t\tchannel_ids', x.channel_ids
+    print '\t\tchannel_names', x.channel_names
+    print '\t\tAnnotations', x.annotations
+print "\nUnits"
+for x in bl_raw.list_units:
+    print '\tUnit with name', x.name
+    print '\t\tAttributes ', x.__dict__.keys()
+    print '\t\tAnnotations', x.annotations
+    print '\t\tchannel_id', x.annotations['channel_id']
+    assert(x.annotations['channel_id'] == x.channel_index.channel_ids[0])
+print "\nSpikeTrains"
+for x in seg_raw.spiketrains:
+    print '\tSpiketrain with name', x.name
+    print '\t\tAttributes ', x.__dict__.keys()
+    print '\t\tAnnotations', x.annotations
+    print '\t\tchannel_id', x.annotations['channel_id']
+    print '\t\tspike times', x.times[0:20]
+print "\nAnalogSignals"
+for x in seg_raw.analogsignals:
+    print '\tAnalogSignal with name', x.name
+    print '\t\tAttributes ', x.__dict__.keys()
+    print '\t\tAnnotations', x.annotations
+    print '\t\tchannel_id', x.annotations['channel_id']
+
+# get start and stop events of trials
+start_events = neo_utils.get_events(
+    seg_raw,
+    properties={
+        'name': 'TrialEvents',
+        'trial_event_labels': 'TS-ON',
+        'performance_in_trial': 255})
+stop_events = neo_utils.get_events(
+    seg_raw,
+    properties={
+        'name': 'TrialEvents',
+        'trial_event_labels': 'STOP',
+        'performance_in_trial': 255})
+
+# there should only be one event object for these conditions
+assert len(start_events) == 1
+assert len(stop_events) == 1
+
+# insert epochs between 10ms before TS to 50ms after RW corresponding to trails
+neo_utils.add_epoch(
+    seg_raw,
+    start_events[0],
+    stop_events[0],
+    pre=-250 * pq.ms,
+    post=500 * pq.ms,
+    trial_status='complete_trials',
+    trial_type=start_events[0].annotations['belongs_to_trialtype'],
+    trial_performance=start_events[0].annotations['performance_in_trial'])
+
+# access single epoch of this data_segment
+epochs = neo_utils.get_epochs(seg_raw,
+                              properties={'trial_status': 'complete_trials'})
+assert len(epochs) == 1
+
+# cut segments according to inserted 'complete_trials' epochs and reset trial
+#  times
+cut_segments_raw = neo_utils.cut_segment_by_epoch(
+    seg_raw, epochs[0], reset_time=True)
+
+cut_segments_lfp = neo_utils.cut_segment_by_epoch(
+    seg_lfp, epochs[0], reset_time=True)
+
+# =============================================================================
+# Define data for overview plots
+# =============================================================================
+trial_index = {'Lilou': 0, 'Nikos2': 6}
+
+trial_seg_raw = cut_segments_raw[trial_index[monkey]]
+trial_seg_lfp = cut_segments_lfp[trial_index[monkey]]
+
+blackrock_elid_list = bl_lfp.annotations['avail_electrode_ids']
+
+# get 'TrialEvents'
+event = trial_seg_lfp.events[2]
+start = event.annotations['trial_event_labels'].index('TS-ON')
+trialx_trty = event.annotations['belongs_to_trialtype'][start]
+trialx_trtimeid = event.annotations['trial_timestamp_id'][start]
+trialx_color = trialtype_colors[trialx_trty]
+
+# find trial index for next trial with opposite force type (for ax5b plot)
+if 'LF' in trialx_trty:
+    trialz_trty = trialx_trty.replace('LF', 'HF')
+else:
+    trialz_trty = trialx_trty.replace('HF', 'LF')
+
+for i, tr in enumerate(cut_segments_lfp):
+    eventz = tr.events[2]
+    nextft = eventz.annotations['trial_event_labels'].index('TS-ON')
+    if eventz.annotations['belongs_to_trialtype'][nextft] == trialz_trty:
+        trialz_trtimeid = eventz.annotations['trial_timestamp_id'][nextft]
+        trialz_color = trialtype_colors[trialz_trty]
+        trialz_seg_lfp = tr
+        break
+
+
+# =============================================================================
+# Define figure and subplot axis for first data overview
+# =============================================================================
+fig = plt.figure()
+fig.set_size_inches(6.5, 10.)  # (w, h) in inches
+
+gs = gridspec.GridSpec(
+    nrows=5,
+    ncols=4,
+    left=0.05,
+    bottom=0.07,
+    right=0.9,
+    top=0.975,
+    wspace=0.3,
+    hspace=0.5,
+    width_ratios=None,
+    height_ratios=[1, 3, 3, 6, 3])
+
+ax1 = plt.subplot(gs[0, :])  # top row / odml data
+# second row
+ax2a = plt.subplot(gs[1, 0])  # electrode overview plot
+ax2b = plt.subplot(gs[1, 1])  # waveforms unit 1
+ax2c = plt.subplot(gs[1, 2])  # waveforms unit 2
+ax2d = plt.subplot(gs[1, 3])  # waveforms unit 3
+ax3 = plt.subplot(gs[2, :])  # third row / spiketrains
+ax4 = plt.subplot(gs[3, :], sharex=ax3)  # fourth row / raw signal
+ax5a = plt.subplot(gs[4, 0:3])  # fifth row / behavioral signals
+ax5b = plt.subplot(gs[4, 3])
+
+fontdict_titles = {'fontsize': 'small', 'fontweight': 'bold'}
+fontdict_axis = {'fontsize': 'x-small'}
+
+wf_time_unit = pq.ms
+wf_signal_unit = pq.microvolt
+
+plotting_time_unit = pq.s
+raw_signal_unit = wf_signal_unit
+
+behav_signal_unit = pq.V
+
+# =============================================================================
+# PLOT TRIAL SEQUENCE OF SUBSESSION
+# =============================================================================
+
+# load complete metadata collection
+odmldoc = odml.tools.xmlparser.load(datasetdir + datafile + '.odml')
+
+# get total trial number
+trno_tot = odml_utils.get_TrialCount(odmldoc)
+trno_ctr = odml_utils.get_TrialCount(odmldoc, performance_code=255)
+trno_ertr = trno_tot - trno_ctr
+
+# get trial id of chosen trial (and next trial with opposite force)
+trtimeids = odml_utils.get_TrialIDs(odmldoc, idtype='TrialTimestampID')
+trids = odml_utils.get_TrialIDs(odmldoc)
+trialx_trid = trids[trtimeids.index(trialx_trtimeid)]
+trialz_trid = trids[trtimeids.index(trialz_trtimeid)]
+
+# get all trial ids for grip error trials
+trids_pc191 = odml_utils.get_trialids_pc(odmldoc, 191)
+
+# get occurring trial types
+octrty = odml_utils.get_OccurringTrialTypes(odmldoc, code=False)
+
+# Subplot 1: Trial sequence
+boxes, labels = [], []
+for tt in octrty:
+    # Plot trial ids of current trial type into trial sequence bar plot
+    left = odml_utils.get_trialids_trty(odmldoc, tt)
+    height = np.ones_like(left)
+    width = 1.
+    if tt in ['NONE', 'PG', 'SG', 'LF', 'HF']:
+        color = 'w'
+    else:
+        color = trialtype_colors[tt]
+
+    B = ax1.bar(
+        left=left, height=height, width=width, color=color, linewidth=0.01)
+
+    # Mark trials of current trial type (left) if a grip error occurred
+    x = [i + width / 2. for i in list(set(left) & set(trids_pc191))]
+    y = np.ones_like(x) * 2.0
+    ax1.scatter(x, y, s=5, marker='*')
+
+    # Collect information for trial type legend
+    if tt not in ['PG', 'SG', 'LF', 'HF']:
+        boxes.append(B[0])
+        if tt == 'NONE':
+            # use errors for providing total trial number
+            labels.append('total: # %i' % trno_tot)
+            # add another box and label for error numbers
+            boxes.append(B[0])
+            labels.append('/ * errors: # %i' % trno_ertr)
+        else:
+            # trial type trial numbers
+            labels.append(tt + ': # %i' % len(left))
+
+# mark chosen trial
+x = [trialx_trid + width / 2.]
+y = np.ones_like(x) * 2.0
+ax1.scatter(x, y, s=5, marker='D', color='Red', edgecolors='Red')
+# mark next trial with opposite force
+x = [trialz_trid + width / 2.]
+y = np.ones_like(x) * 2.0
+ax1.scatter(x, y, s=5, marker='D', color='orange', edgecolors='orange')
+
+
+# Generate trial type legend; bbox: (left, bottom, width, height)
+leg = ax1.legend(
+    boxes, labels, bbox_to_anchor=(0., 1., 0.5, 0.1), loc=3, handlelength=1.1,
+    ncol=len(labels), borderaxespad=0., handletextpad=0.4,
+    prop={'size': 'xx-small'})
+leg.draw_frame(False)
+
+# adjust x and y axis
+xticks = [i + 0.5 for i in range(1, 101, 10)] + [100.5]
+ax1.set_xticks(xticks)
+ax1.set_xticklabels([str(int(t)) for t in xticks], size='xx-small')
+ax1.set_xlabel('trial ID', size='x-small')
+ax1.set_xlim(1, 101)
+ax1.yaxis.set_visible(False)
+ax1.set_ylim(0, 3)
+ax1.spines['top'].set_visible(False)
+ax1.spines['left'].set_visible(False)
+ax1.spines['right'].set_visible(False)
+ax1.tick_params(direction='out', top='off')
+ax1.set_title('sequence of the first 100 trials', fontdict_titles, y=2)
+ax1.set_aspect('equal')
+
+
+# =============================================================================
+# PLOT ELECTRODE POSITION of chosen electrode
+# =============================================================================
+arraygrid = get_arraygrid(blackrock_elid_list, chosen_el[monkey])
+cmap = plt.cm.RdGy
+
+ax2a.pcolormesh(
+    np.flipud(arraygrid), vmin=-1, vmax=1, lw=1, cmap=cmap, edgecolors='k',
+    shading='faceted')
+
+force_aspect(ax2a, aspect=1)
+ax2a.tick_params(
+    bottom='off', top='off', left='off', right='off',
+    labelbottom='off', labeltop='off', labelleft='off', labelright='off')
+ax2a.set_title('electrode pos.', fontdict_titles)
+
+
+# =============================================================================
+# PLOT WAVEFORMS of units of the chosen electrode
+# =============================================================================
+unit_ax_translator = {1: ax2b, 2: ax2c, 3: ax2d}
+unit_type = {1: '', 2: '', 3: ''}
+
+wf_lim = []
+# plotting waveform for all spiketrains available
+for spiketrain in trial_seg_raw.spiketrains:
+    unit_id = spiketrain.annotations['unit_id']
+    # get unit type
+    if spiketrain.annotations['sua']:
+        unit_type[unit_id] = 'SUA'
+    elif spiketrain.annotations['mua']:
+        unit_type[unit_id] = 'MUA'
+    else:
+        pass
+    # get correct ax
+    ax = unit_ax_translator[unit_id]
+    # get wf sampling time before threshold crossing
+    left_sweep = spiketrain.left_sweep
+
+    # plot waveforms in subplots according to unit id
+    for st_id, st in enumerate(spiketrain):
+        wf = spiketrain.waveforms[st_id]
+        wf_lim.append((np.min(wf), np.max(wf)))
+        wf_color = str(
+            (st / spiketrain.t_stop).rescale('dimensionless').magnitude)
+        times = range(len(wf[0])) * spiketrain.units - left_sweep
+        ax.plot(
+            times.rescale(wf_time_unit), wf[0].rescale(wf_signal_unit),
+            color=wf_color)
+        ax.set_xlim(
+            times.rescale(wf_time_unit)[0], times.rescale(wf_time_unit)[-1])
+
+# adding xlabels and titles
+for unit_id, ax in unit_ax_translator.iteritems():
+    ax.set_title('unit %i (%s)' % (unit_id, unit_type[unit_id]),
+                 fontdict_titles)
+    ax.tick_params(direction='in', length=3, labelsize='xx-small',
+                   labelleft='off', labelright='off')
+    ax.set_xlabel(wf_time_unit.dimensionality.latex, fontdict_axis)
+    xticklocator = ticker.MaxNLocator(nbins=5)
+    ax.xaxis.set_major_locator(xticklocator)
+    ax.set_ylim(np.min(wf_lim), np.max(wf_lim))
+    force_aspect(ax, aspect=1)
+
+# adding ylabel
+ax2d.tick_params(labelsize='xx-small', labelright='on')
+ax2d.set_ylabel(wf_signal_unit.dimensionality.latex, fontdict_axis)
+ax2d.yaxis.set_label_position("right")
+
+
+# =============================================================================
+# PLOT SPIKETRAINS of units of chosen electrode
+# =============================================================================
+plotted_unit_ids = []
+
+# plotting all available spiketrains
+for st in trial_seg_raw.spiketrains:
+    unit_id = st.annotations['unit_id']
+    plotted_unit_ids.append(unit_id)
+    ax3.plot(st.times.rescale(plotting_time_unit),
+             np.zeros(len(st.times)) + unit_id,
+             'k|')
+
+# setting layout of spiktrain plot
+ax3.set_ylim(min(plotted_unit_ids) - 0.5, max(plotted_unit_ids) + 0.5)
+ax3.set_ylabel(r'unit ID', fontdict_axis)
+ax3.yaxis.set_major_locator(ticker.MultipleLocator(base=1))
+ax3.yaxis.set_label_position("right")
+ax3.tick_params(axis='y', direction='in', length=3, labelsize='xx-small',
+                labelleft='off', labelright='on')
+ax3.invert_yaxis()
+ax3.set_title('spiketrains', fontdict_titles)
+
+# =============================================================================
+# PLOT "raw" SIGNAL of chosen trial of chosen electrode
+# =============================================================================
+# get "raw" data from chosen electrode
+assert len(trial_seg_raw.analogsignals) == 1
+el_raw_sig = trial_seg_raw.analogsignals[0]
+
+# plotting raw signal trace
+ax4.plot(el_raw_sig.times.rescale(plotting_time_unit),
+         el_raw_sig.rescale(raw_signal_unit),
+         color='k')
+
+# setting layout of raw signal plot
+ax4.set_ylabel(raw_signal_unit.units.dimensionality.latex, fontdict_axis)
+ax4.yaxis.set_label_position("right")
+ax4.tick_params(axis='y', direction='in', length=3, labelsize='xx-small',
+                labelleft='off', labelright='on')
+ax4.set_title('"raw" signal', fontdict_titles)
+
+ax4.set_xlim(trial_seg_raw.t_start.rescale(plotting_time_unit),
+             trial_seg_raw.t_stop.rescale(plotting_time_unit))
+ax4.xaxis.set_major_locator(ticker.MultipleLocator(base=1))
+
+
+# =============================================================================
+# PLOT EVENTS across ax3 and ax4 and add time bar
+# =============================================================================
+# find trial relevant events
+startidx = event.annotations['trial_event_labels'].index('TS-ON')
+stopidx = event.annotations['trial_event_labels'][startidx:].index('STOP') + \
+    startidx + 1
+
+for ax in [ax3, ax4]:
+    xticks = []
+    xticklabels = []
+    for ev_id, ev in enumerate(event[startidx:stopidx]):
+        ev_labels = event.annotations['trial_event_labels'][startidx:stopidx]
+        if ev_labels[ev_id] in event_colors.keys():
+            ev_color = event_colors[ev_labels[ev_id]]
+            ax.axvline(
+                ev.rescale(plotting_time_unit), color=ev_color, zorder=0.5)
+            xticks.append(ev.rescale(plotting_time_unit))
+            if ev_labels[ev_id] == 'CUE-OFF':
+                xticklabels.append('-OFF')
+            elif ev_labels[ev_id] == 'GO-ON':
+                xticklabels.append('GO')
+            else:
+                xticklabels.append(ev_labels[ev_id])
+
+    ax.set_xticks(xticks)
+    ax.set_xticklabels(xticklabels)
+    ax.tick_params(axis='x', direction='out', length=3, labelsize='xx-small',
+                   labeltop='off', top='off')
+
+timebar_ypos = ax4.get_ylim()[0] + np.diff(ax4.get_ylim())[0] / 10
+timebar_labeloffset = np.diff(ax4.get_ylim())[0] * 0.01
+timebar_xmin = xticks[-2] + ((xticks[-1] - xticks[-2]) / 2 - 0.25 * pq.s)
+timebar_xmax = timebar_xmin + 0.5 * pq.s
+
+ax4.plot([timebar_xmin, timebar_xmax], [timebar_ypos, timebar_ypos], '-',
+         linewidth=3, color='k')
+ax4.text(timebar_xmin + 0.25 * pq.s, timebar_ypos + timebar_labeloffset,
+         '500 ms', ha='center', va='bottom', size='xx-small', color='k')
+
+
+# =============================================================================
+# PLOT BEHAVIORAL SIGNALS of chosen trial
+# =============================================================================
+# get behavioral signals
+ainp_signals = [nsig for nsig in trial_seg_lfp.analogsignals if
+                nsig.annotations['channel_id'] > 96]
+
+ainp_trialz = [nsig for nsig in trialz_seg_lfp.analogsignals if
+               nsig.annotations['channel_id'] == 141][0]
+
+# find out what signal to use
+trialx_sec = odmldoc['Recording']['TaskSettings']['Trial_%03i' % trialx_trid]
+
+# get correct channel id
+trialx_chids = [143]
+FSRi = trialx_sec['AnalogEvents'].properties['UsedForceSensor'].value.data
+FSRinfosec = odmldoc['Setup']['Apparatus']['TargetObject']['FSRSensor']
+if 'SG' in trialx_trty:
+    sgchids = [d.data for d in FSRinfosec.properties['SGChannelIDs'].values]
+    trialx_chids.append(min(sgchids) if FSRi == 1 else max(sgchids))
+else:
+    pgchids = [d.data for d in FSRinfosec.properties['PGChannelIDs'].values]
+    trialx_chids.append(min(pgchids) if FSRi == 1 else max(pgchids))
+
+
+# define time epoch
+startidx = event.annotations['trial_event_labels'].index('SR')
+stopidx = event.annotations['trial_event_labels'].index('OBB')
+sr = event[startidx].rescale(plotting_time_unit)
+stop = event[stopidx].rescale(plotting_time_unit) + 0.050 * pq.s
+startidx = event.annotations['trial_event_labels'].index('FSRplat-ON')
+stopidx = event.annotations['trial_event_labels'].index('FSRplat-OFF')
+fplon = event[startidx].rescale(plotting_time_unit)
+fploff = event[stopidx].rescale(plotting_time_unit)
+
+# define time epoch trialz
+startidx = eventz.annotations['trial_event_labels'].index('FSRplat-ON')
+stopidx = eventz.annotations['trial_event_labels'].index('FSRplat-OFF')
+fplon_trz = eventz[startidx].rescale(plotting_time_unit)
+fploff_trz = eventz[stopidx].rescale(plotting_time_unit)
+
+# plotting grip force and object displacement
+for ainp in ainp_signals:
+    if ainp.annotations['channel_id'] in trialx_chids:
+        ainp_times = ainp.times.rescale(plotting_time_unit)
+        mask = (ainp_times > sr) & (ainp_times < stop)
+        ainp_ampli = stats.zscore(ainp.magnitude[mask])
+
+        if ainp.annotations['channel_id'] != 143:
+            color = 'gray'
+        else:
+            color = 'k'
+        ax5a.plot(ainp_times[mask], ainp_ampli, color=color)
+
+    # get force load of this trial for next plot
+    elif ainp.annotations['channel_id'] == 141:
+        ainp_times = ainp.times.rescale(plotting_time_unit)
+        mask = (ainp_times > fplon) & (ainp_times < fploff)
+        force_av_01 = np.mean(ainp.rescale(behav_signal_unit).magnitude[mask])
+
+# setting layout of grip force and object displacement plot
+ax5a.set_title('grip force and object displacement', fontdict_titles)
+ax5a.yaxis.set_label_position("left")
+ax5a.tick_params(direction='in', length=3, labelsize='xx-small',
+                 labelleft='off', labelright='on')
+ax5a.set_ylabel('zscore', fontdict_axis)
+
+# plotting load/pull force of LF and HF trial
+force_times = ainp_trialz.times.rescale(plotting_time_unit)
+mask = (force_times > fplon_trz) & (force_times < fploff_trz)
+force_av_02 = np.mean(ainp_trialz.rescale(behav_signal_unit).magnitude[mask])
+
+bar_width = [0.4, 0.4]
+color = [trialx_color, trialz_color]
+ax5b.bar([0, 0.6], [force_av_01, force_av_02], bar_width, color=color)
+
+ax5b.set_title('load/pull force', fontdict_titles)
+ax5b.set_ylabel(behav_signal_unit.units.dimensionality.latex, fontdict_axis)
+ax5b.set_xticks([0.2, 0.8])
+ax5b.set_xticklabels([trialx_trty, trialz_trty], fontdict_axis)
+ax5b.yaxis.set_label_position("right")
+ax5b.tick_params(direction='in', length=3, labelsize='xx-small',
+                 labelleft='off', labelright='on')
+
+# =============================================================================
+# PLOT EVENTS across ax5a and add time bar
+# =============================================================================
+# find trial relevant events
+startidx = event.annotations['trial_event_labels'].index('SR')
+stopidx = event.annotations['trial_event_labels'].index('OBB')
+
+xticks = []
+xticklabels = []
+for ev_id, ev in enumerate(event[startidx:stopidx]):
+    ev_labels = event.annotations['trial_event_labels'][startidx:stopidx + 1]
+    if ev_labels[ev_id] in ['RW-ON']:
+        ax5a.axvline(ev.rescale(plotting_time_unit), color='k', zorder=0.5)
+        xticks.append(ev.rescale(plotting_time_unit))
+        xticklabels.append(ev_labels[ev_id])
+    elif ev_labels[ev_id] in ['OT', 'OR', 'DO', 'OBB', 'FSRplat-ON',
+                              'FSRplat-OFF', 'HEplat-ON']:
+        ev_color = 'k'
+        xticks.append(ev.rescale(plotting_time_unit))
+        xticklabels.append(ev_labels[ev_id])
+        ax5a.axvline(
+            ev.rescale(plotting_time_unit), color='k', ls='-.', zorder=0.5)
+    elif ev_labels[ev_id] == 'HEplat-OFF':
+        ev_color = 'k'
+        ax5a.axvline(
+            ev.rescale(plotting_time_unit), color='k', ls='-.', zorder=0.5)
+
+ax5a.set_xticks(xticks)
+ax5a.set_xticklabels(xticklabels, fontdict_axis, rotation=90)
+ax5a.tick_params(axis='x', direction='out', length=3, labelsize='xx-small',
+                 labeltop='off', top='off')
+ax5a.set_ylim([-2.0, 2.0])
+
+timebar_ypos = ax5a.get_ylim()[0] + np.diff(ax5a.get_ylim())[0] / 10
+timebar_labeloffset = np.diff(ax5a.get_ylim())[0] * 0.02
+timebar_xmax = xticks[xticklabels.index('RW-ON')] - 0.1 * pq.s
+timebar_xmin = timebar_xmax - 0.25 * pq.s
+
+
+ax5a.plot([timebar_xmin, timebar_xmax], [timebar_ypos, timebar_ypos], '-',
+          linewidth=3, color='k')
+ax5a.text(timebar_xmin + 0.125 * pq.s, timebar_ypos + timebar_labeloffset,
+          '250 ms', ha='center', va='bottom', size='xx-small', color='k')
+
+# add time window of ax5a to ax4
+ax4.axvspan(ax5a.get_xlim()[0], ax5a.get_xlim()[1], facecolor=[0.9, 0.9, 0.9],
+            zorder=-0.1, ec=None)
+
+# =============================================================================
+# SAVE FIGURE
+# =============================================================================
+
+
+fname = 'data_overview_1_%s' % monkey
+for file_format in ['eps', 'png', 'pdf']:
+    fig.savefig(fname + '.%s' % file_format, dpi=400, format=file_format)

+ 379 - 0
code/data_overview_2.py

@@ -0,0 +1,379 @@
+# -*- coding: utf-8 -*-
+"""
+Code for generating the second data figure in the manuscript.
+
+Authors: Julia Sprenger, Lyuba Zehl, Michael Denker
+
+
+Copyright (c) 2017, Institute of Neuroscience and Medicine (INM-6),
+Forschungszentrum Juelich, Germany
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+* Neither the names of the copyright holders nor the names of the contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+# This loads the Neo and odML libraries shipped with this code. For production
+# use, please use the newest releases of odML and Neo.
+import load_local_neo_odml_elephant
+
+import os
+
+import matplotlib.pyplot as plt
+from matplotlib import gridspec, transforms
+
+import quantities as pq
+import numpy as np
+
+from neo import (AnalogSignal, SpikeTrain)
+from reachgraspio import reachgraspio
+
+import neo_utils
+
+# =============================================================================
+# Define data and metadata directories and general settings
+# =============================================================================
+
+
+def get_monkey_datafile(monkey):
+    if monkey == "Lilou":
+        return "l101210-001"  # ns2 (behavior) and ns5 present
+    elif monkey == "Nikos2":
+        return "i140703-001"  # ns2 and ns6 present
+    else:
+        return ""
+
+
+# Enter your dataset directory here
+datasetdir = os.path.join('..', 'datasets')
+
+nsx_none = {'Lilou': None, 'Nikos2': None}
+nsx_lfp = {'Lilou': 5, 'Nikos2': 2}
+chosen_els = {'Lilou': range(3, 97, 7), 'Nikos2': range(1, 97, 7)}
+chosen_el = {
+    'Lilou': chosen_els['Lilou'][0],
+    'Nikos2': chosen_els['Nikos2'][0]}
+trial_indexes = range(14)
+trial_index = trial_indexes[0]
+chosen_events = ['TS-ON', 'WS-ON', 'CUE-ON', 'CUE-OFF', 'GO-ON', 'SR-ON',
+                 'RW-ON', 'WS-OFF']  # , 'RW-OFF'
+
+# =============================================================================
+# Load data and metadata for a monkey
+# =============================================================================
+monkey = 'Lilou'
+# monkey = 'Nikos2'
+
+datafile = get_monkey_datafile(monkey)
+
+session = reachgraspio.ReachGraspIO(
+    filename=os.path.join(datasetdir, datafile),
+    odml_directory=datasetdir,
+    verbose=False)
+
+bl = session.read_block(
+    index=None,
+    name=None,
+    description=None,
+    nsx_to_load=nsx_lfp[monkey],
+    n_starts=None,
+    n_stops=None,
+    channels=chosen_els[monkey],
+    units=[1],  # loading only unit_id 1
+    load_waveforms=False,
+    load_events=True,
+    scaling='voltage',
+    lazy=False,
+    cascade=True)
+
+seg = bl.segments[0]
+
+# get start and stop events of trials
+start_events = neo_utils.get_events(
+    seg, properties={
+        'name': 'TrialEvents',
+        'trial_event_labels': 'TS-ON',
+        'performance_in_trial': session.performance_codes['correct_trial']})
+stop_events = neo_utils.get_events(
+    seg, properties={
+        'name': 'TrialEvents',
+        'trial_event_labels': 'RW-ON',
+        'performance_in_trial': session.performance_codes['correct_trial']})
+
+# there should only be one event object for these conditions
+assert len(start_events) == 1
+assert len(stop_events) == 1
+
+# insert epochs between 10ms before TS to 50ms after RW corresponding to trails
+neo_utils.add_epoch(
+    seg,
+    start_events[0],
+    stop_events[0],
+    pre=-250 * pq.ms,
+    post=500 * pq.ms,
+    segment_type='complete_trials',
+    trialtype=start_events[0].annotations[
+        'belongs_to_trialtype'])
+
+# access single epoch of this data_segment
+epochs = neo_utils.get_epochs(seg,
+                              properties={'segment_type': 'complete_trials'})
+assert len(epochs) == 1
+
+# cut segments according to inserted 'complete_trials' epochs and reset trial
+# times
+cut_segments = neo_utils.cut_segment_by_epoch(seg,
+                                              epochs[0],
+                                              reset_time=True)
+
+# explicitely adding trial type annotations to cut segments
+for i, cut_seg in enumerate(cut_segments):
+    cut_seg.annotate(trialtype=epochs[0].annotations['trialtype'][i])
+
+# =============================================================================
+# Define figure and subplot axis for first data overview
+# =============================================================================
+fig = plt.figure(facecolor='w')
+fig.set_size_inches(7.0, 9.9)  # (w, h) in inches
+# #(7.0, 9.9) corresponds to A4 portrait ratio
+
+gs = gridspec.GridSpec(
+    nrows=2,
+    ncols=2,
+    left=0.1,
+    bottom=0.05,
+    right=0.9,
+    top=0.975,
+    wspace=0.1,
+    hspace=0.1,
+    width_ratios=None,
+    height_ratios=[2, 1])
+
+ax1 = plt.subplot(gs[0, 0])  # top left
+ax2 = plt.subplot(gs[0, 1], sharex=ax1)  # top right
+ax3 = plt.subplot(gs[1, 0], sharex=ax1)  # bottom left
+ax4 = plt.subplot(gs[1, 1], sharex=ax1)  # bottom right
+
+fontdict_titles = {'fontsize': 9, 'fontweight': 'bold'}
+fontdict_axis = {'fontsize': 10, 'fontweight': 'bold'}
+
+# the x coords of the event labels are data, and the y coord are axes
+event_label_transform = transforms.blended_transform_factory(ax1.transData,
+                                                             ax1.transAxes)
+
+trialtype_colors = {
+    'SGHF': 'MediumBlue', 'SGLF': 'Turquoise',
+    'PGHF': 'DarkGreen', 'PGLF': 'YellowGreen',
+    'LFSG': 'Orange', 'LFPG': 'Yellow',
+    'HFSG': 'DarkRed', 'HFPG': 'OrangeRed',
+    'SGSG': 'SteelBlue', 'PGPG': 'LimeGreen',
+    None: 'black'}
+
+event_colors = {
+    'TS-ON': 'indigo', 'TS-OFF': 'indigo',
+    'WS-ON': 'purple', 'WS-OFF': 'purple',
+    'CUE-ON': 'crimson', 'CUE-OFF': 'crimson',
+    'GO-ON': 'orangered', 'GO-OFF': 'orangered',
+    'SR-ON': 'darkorange',
+    'RW-ON': 'orange', 'RW-OFF': 'orange'}
+
+electrode_cmap = plt.get_cmap('bone')
+electrode_colors = [electrode_cmap(x) for x in
+                    np.tile(np.array([0.3, 0.7]), len(chosen_els[monkey]) / 2)]
+
+time_unit = 'ms'
+lfp_unit = 'uV'
+
+# define scaling factors for analogsignals
+anasig_std = np.mean([np.std(anasig.rescale(lfp_unit)) for anasig in
+                      cut_segments[trial_index].analogsignals]) \
+    * getattr(pq, lfp_unit)
+anasig_offset = 3 * anasig_std
+
+
+# =============================================================================
+# SUPPLEMENTORY PLOTTING functions
+# =============================================================================
+
+def add_scalebar(ax, std):
+    # the x coords of the scale bar are axis, and the y coord are data
+    scalebar_transform = transforms.blended_transform_factory(ax.transAxes,
+                                                              ax.transData)
+    # adding scalebar
+    yscalebar = max(int(std.rescale(lfp_unit)), 1) * getattr(pq, lfp_unit) * 2
+    scalebar_offset = -2 * std
+    ax.vlines(x=0.4,
+              ymin=(scalebar_offset - yscalebar).magnitude,
+              ymax=scalebar_offset.magnitude,
+              color='k',
+              linewidth=4,
+              transform=scalebar_transform)
+    ax.text(0.4, (scalebar_offset - 0.5 * yscalebar).magnitude,
+            ' %i %s' % (yscalebar.magnitude, lfp_unit),
+            ha="left", va="center", rotation=0, color='k',
+            size=8, transform=scalebar_transform)
+
+
+# =============================================================================
+# PLOT DATA OF SINGLE TRIAL (left plots)
+# =============================================================================
+
+# get data of selected trial
+selected_trial = cut_segments[trial_index]
+
+# PLOT DATA FOR EACH CHOSEN ELECTRODE
+for el_idx, electrode_id in enumerate(chosen_els[monkey]):
+
+    # PLOT ANALOGSIGNALS in upper plot
+    anasigs = selected_trial.filter(
+        channel_id=electrode_id, objects=AnalogSignal)
+    for anasig in anasigs:
+        ax1.plot(anasig.times.rescale(time_unit),
+                 np.asarray(anasig.rescale(lfp_unit))
+                 + anasig_offset.magnitude * el_idx,
+                 color=electrode_colors[el_idx])
+
+    # PLOT SPIKETRAINS in lower plot
+    spiketrains = selected_trial.filter(
+        channel_id=electrode_id, objects=SpikeTrain)
+    for spiketrain in spiketrains:
+        ax3.plot(spiketrain.times.rescale(time_unit),
+                 np.zeros(len(spiketrain.times)) + el_idx, 'k|')
+
+# PLOT EVENTS in both plots
+for event_type in chosen_events:
+    # get events of each chosen event type
+    event_data = neo_utils.get_events(selected_trial,
+                                      {'trial_event_labels': event_type})
+    for event in event_data:
+        event_color = event_colors[event.annotations['trial_event_labels'][0]]
+        # adding lines
+        for ax in [ax1, ax3]:
+            ax.axvline(event.times.rescale(time_unit),
+                       color=event_color,
+                       zorder=0.5)
+        # adding labels
+        ax1.text(event.times.rescale(time_unit), 0,
+                 event.annotations['trial_event_labels'][0],
+                 ha="center", va="top", rotation=45, color=event_color,
+                 size=8, transform=event_label_transform)
+
+# SUBPLOT ADJUSTMENTS
+ax1.set_title('single trial', fontdict=fontdict_titles)
+ax1.set_ylabel('electrode id', fontdict=fontdict_axis)
+ax1.set_yticks(np.arange(len(chosen_els[monkey])) * anasig_offset)
+ax1.set_yticklabels(chosen_els[monkey])
+
+ax1.autoscale(enable=True, axis='y')
+plt.setp(ax1.get_xticklabels(), visible=False)  # show no xticklabels
+ax3.set_ylabel('electrode id', fontdict=fontdict_axis)
+ax3.set_yticks(range(0, len(chosen_els[monkey])))
+ax3.set_yticklabels(np.asarray(chosen_els[monkey]))
+ax3.set_ylim(-1, len(chosen_els[monkey]))
+ax3.set_xlabel('time [%s]' % time_unit, fontdict=fontdict_axis)
+# ax3.autoscale(axis='y')
+
+# =============================================================================
+# PLOT DATA OF SINGLE ELECTRODE
+# =============================================================================
+
+# plot data for each chosen trial
+for trial_idx, trial_id in enumerate(trial_indexes):
+    trial_data = cut_segments[trial_id].filter(channel_id=chosen_el[monkey])
+    trial_type = trial_data[0].parents[0].annotations['trialtype']
+    trial_color = trialtype_colors[trial_type]
+    for t_data in trial_data:
+
+        # PLOT ANALOGSIGNALS in upper plot
+        if isinstance(t_data, AnalogSignal):
+            ax2.plot(t_data.times.rescale(time_unit),
+                     np.asarray(t_data.rescale(lfp_unit))
+                     + anasig_offset.magnitude * trial_idx,
+                     color=trial_color, zorder=1)
+
+        # PLOT SPIKETRAINS in lower plot
+        elif isinstance(t_data, SpikeTrain):
+            ax4.plot(t_data.times.rescale(time_unit),
+                     np.ones(len(t_data.times)) + trial_idx, 'k|')
+
+    # PLOT EVENTS in both plots
+    for event_type in chosen_events:
+        # get events of each chosen event type
+        event_data = neo_utils.get_events(cut_segments[trial_id],
+                                          {'trial_event_labels': event_type})
+        for event in event_data:
+            color = event_colors[event.annotations['trial_event_labels'][0]]
+            ax2.vlines(x=event.times.rescale(time_unit),
+                       ymin=(trial_idx - 0.5) * anasig_offset,
+                       ymax=(trial_idx + 0.5) * anasig_offset,
+                       color=color,
+                       zorder=2)
+            ax4.vlines(x=event.times.rescale(time_unit),
+                       ymin=trial_idx + 1 - 0.4,
+                       ymax=trial_idx + 1 + 0.4,
+                       color=color,
+                       zorder=0.5)
+
+# SUBPLOT ADJUSTMENTS
+ax2.set_title('single electrode', fontdict=fontdict_titles)
+ax2.set_ylabel('trial id', fontdict=fontdict_axis)
+ax2.set_yticks(np.asarray(trial_indexes) * anasig_offset)
+ax2.set_yticklabels(np.asarray(trial_indexes) + 1)
+ax2.yaxis.set_label_position("right")
+ax2.tick_params(direction='in', length=3, labelleft='off', labelright='on')
+ax2.autoscale(enable=True, axis='y')
+add_scalebar(ax2, anasig_std)
+plt.setp(ax2.get_xticklabels(), visible=False)  # show no xticklabels
+
+ax4.set_ylabel('trial id', fontdict=fontdict_axis)
+ax4.set_xlabel('time [%s]' % time_unit, fontdict=fontdict_axis)
+
+start, end = ax4.get_xlim()
+ax4.xaxis.set_ticks(np.arange(start, end, 1000))
+ax4.xaxis.set_ticks(np.arange(start, end, 500), minor=True)
+ax4.set_yticks(range(1, len(trial_indexes) + 1))
+ax4.set_yticklabels(np.asarray(trial_indexes) + 1)
+ax4.yaxis.set_label_position("right")
+ax4.tick_params(direction='in', length=3, labelleft='off', labelright='on')
+ax4.autoscale(enable=True, axis='y')
+
+# GENERAL PLOT ADJUSTMENTS
+# adjust font sizes of ticks
+for ax in [ax4.yaxis, ax4.xaxis, ax3.xaxis, ax3.yaxis]:
+    for tick in ax.get_major_ticks():
+        tick.label.set_fontsize(10)
+
+# adjust time range on x axis
+t_min = np.min([cut_segments[tid].t_start.rescale(time_unit)
+                for tid in trial_indexes])
+t_max = np.max([cut_segments[tid].t_stop.rescale(time_unit)
+                for tid in trial_indexes])
+ax1.set_xlim(t_min, t_max)
+add_scalebar(ax1, anasig_std)
+
+# =============================================================================
+# SAVE FIGURE
+# =============================================================================
+fname = 'data_overview_2_%s' % monkey
+for file_format in ['eps', 'pdf', 'png']:
+    fig.savefig(fname + '.%s' % file_format, dpi=400, format=file_format)

+ 1 - 0
code/elephant/AUTHORS.txt

@@ -0,0 +1 @@
+See doc/authors.rst

+ 10 - 0
code/elephant/LICENSE.txt

@@ -0,0 +1,10 @@
+Copyright (c) 2014, Elephant authors and contributors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+* Neither the names of the copyright holders nor the names of the contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 23 - 0
code/elephant/README.rst

@@ -0,0 +1,23 @@
+Elephant - Electrophysiology Analysis Toolkit
+=============================================
+
+Elephant is a package for the analysis of neurophysiology data, based on Neo.
+
+Code status
+-----------
+
+.. image:: https://travis-ci.org/NeuralEnsemble/elephant.png?branch=master
+   :target: https://travis-ci.org/NeuralEnsemble/elephant
+   :alt: Unit Test Status
+.. image:: https://coveralls.io/repos/NeuralEnsemble/elephant/badge.png
+   :target: https://coveralls.io/r/NeuralEnsemble/elephant
+   :alt: Unit Test Coverage
+.. image:: https://requires.io/github/NeuralEnsemble/elephant/requirements.png?branch=master
+   :target: https://requires.io/github/NeuralEnsemble/elephant/requirements/?branch=master
+   :alt: Requirements Status
+.. image:: https://readthedocs.org/projects/elephant/badge/?version=latest
+   :target: https://readthedocs.org/projects/elephant/?badge=latest
+   :alt: Documentation Status
+
+:copyright: Copyright 2014-2015 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD License, see LICENSE.txt for details.

+ 107 - 0
code/elephant/continuous_integration/install.sh

@@ -0,0 +1,107 @@
+#!/bin/bash
+# Based on a script from scikit-learn
+
+# This script is meant to be called by the "install" step defined in
+# .travis.yml. See http://docs.travis-ci.com/ for more details.
+# The behavior of the script is controlled by environment variabled defined
+# in the .travis.yml in the top level folder of the project.
+
+set -e
+
+# Fix the compilers to workaround avoid having the Python 3.4 build
+# lookup for g++44 unexpectedly.
+export CC=gcc
+export CXX=g++
+
+if [[ "$DISTRIB" == "conda_min" ]]; then
+    # Deactivate the travis-provided virtual environment and setup a
+    # conda-based environment instead
+    deactivate
+
+    # Use the miniconda installer for faster download / install of conda
+    # itself
+    wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh \
+        -O miniconda.sh
+    chmod +x miniconda.sh && ./miniconda.sh -b -p $HOME/miniconda
+    export PATH=/home/travis/miniconda/bin:$PATH
+    conda config --set always_yes yes
+    conda update --yes conda
+
+    # Configure the conda environment and put it in the path using the
+    # provided versions
+    conda create -n testenv --yes python=$PYTHON_VERSION pip nose coverage \
+        six=$SIX_VERSION numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION
+    source activate testenv
+    conda install libgfortran=1
+
+    if [[ "$INSTALL_MKL" == "true" ]]; then
+        # Make sure that MKL is used
+        conda install --yes --no-update-dependencies mkl
+    else
+        # Make sure that MKL is not used
+        conda remove --yes --features mkl || echo "MKL not installed"
+    fi
+
+elif [[ "$DISTRIB" == "conda" ]]; then
+    # Deactivate the travis-provided virtual environment and setup a
+    # conda-based environment instead
+    deactivate
+
+    # Use the miniconda installer for faster download / install of conda
+    # itself
+    wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh \
+        -O miniconda.sh
+    chmod +x miniconda.sh && ./miniconda.sh -b -p $HOME/miniconda
+    export PATH=/home/travis/miniconda/bin:$PATH
+    conda config --set always_yes yes
+    conda update --yes conda
+
+    # Configure the conda environment and put it in the path using the
+    # provided versions
+    conda create -n testenv --yes python=$PYTHON_VERSION pip nose coverage six=$SIX_VERSION \
+        numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION pandas=$PANDAS_VERSION scikit-learn
+    source activate testenv
+
+    if [[ "$INSTALL_MKL" == "true" ]]; then
+        # Make sure that MKL is used
+        conda install --yes --no-update-dependencies mkl
+    else
+        # Make sure that MKL is not used
+        conda remove --yes --features mkl || echo "MKL not installed"
+    fi
+
+    if [[ "$COVERAGE" == "true" ]]; then
+        pip install coveralls
+    fi
+
+    python -c "import pandas; import os; assert os.getenv('PANDAS_VERSION') == pandas.__version__"
+
+elif [[ "$DISTRIB" == "ubuntu" ]]; then
+    deactivate
+    # Create a new virtualenv using system site packages for numpy and scipy
+    virtualenv --system-site-packages testenv
+    source testenv/bin/activate
+    pip install nose
+    pip install coverage
+    pip install numpy==$NUMPY_VERSION
+    pip install scipy==$SCIPY_VERSION
+    pip install six==$SIX_VERSION
+    pip install quantities
+fi
+
+if [[ "$COVERAGE" == "true" ]]; then
+    pip install coveralls
+fi
+
+# pip install neo==0.3.3
+wget https://github.com/NeuralEnsemble/python-neo/archive/master.tar.gz
+tar -xzvf master.tar.gz
+pushd python-neo-master
+python setup.py install
+popd
+
+pip install .
+
+
+python -c "import numpy; import os; assert os.getenv('NUMPY_VERSION') == numpy.__version__"
+python -c "import scipy; import os; assert os.getenv('SCIPY_VERSION') == scipy.__version__"

+ 19 - 0
code/elephant/continuous_integration/test_script.sh

@@ -0,0 +1,19 @@
+#!/bin/bash
+# Based on a script from scikit-learn
+
+# This script is meant to be called by the "script" step defined in
+# .travis.yml. See http://docs.travis-ci.com/ for more details.
+# The behavior of the script is controlled by environment variables defined
+# in the .travis.yml in the top level folder of the project.
+
+set -e
+
+python --version
+python -c "import numpy; print('numpy %s' % numpy.__version__)"
+python -c "import scipy; print('scipy %s' % scipy.__version__)"
+
+if [[ "$COVERAGE" == "true" ]]; then
+    nosetests --with-coverage --cover-package=elephant
+else
+    nosetests
+fi

+ 153 - 0
code/elephant/doc/Makefile

@@ -0,0 +1,153 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	-rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Elephant.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Elephant.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/Elephant"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Elephant"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."

+ 43 - 0
code/elephant/doc/authors.rst

@@ -0,0 +1,43 @@
+.. _authors:
+
+************************
+Authors and contributors
+************************
+
+The following people have contributed code and/or ideas to the current version
+of Elephant. The institutional affiliations are those at the time of the
+contribution, and may not be the current affiliation of a contributor.
+
+* Alper Yegenoglu [1]
+* Andrew Davison [2]
+* Detlef Holstein [2]
+* Eilif Muller [3, 4]
+* Emiliano Torre [1]
+* Espen Hagen [1]
+* Jan Gosmann [6, 8]
+* Julia Sprenger [1]
+* Junji Ito [1]
+* Michael Denker [1]
+* Paul Chorley [1]
+* Pierre Yger [2]
+* Pietro Quaglio [1]
+* Richard Meyes [1]
+* Vahid Rostami [1]
+* Subhasis Ray [5]
+* Robert Pröpper [6]
+* Richard C Gerkin [7]
+* Bartosz Telenczuk [2]
+* Chaitanya Chintaluri [9]
+* Michał Czerwiński [9]
+
+1. Institute of Neuroscience and Medicine (INM-6), Computational and Systems Neuroscience & Institute for Advanced Simulation (IAS-6), Theoretical Neuroscience, Jülich Research Centre and JARA, Jülich, Germany
+2. Unité de Neurosciences, Information et Complexité, CNRS UPR 3293, Gif-sur-Yvette, France
+3. Electronic Visions Group, Kirchhoff-Institute for Physics, University of Heidelberg, Germany
+4. Brain-Mind Institute, Ecole Polytechnique Fédérale de Lausanne, Switzerland
+5. NIH–NICHD, Laboratory of Cellular and Synaptic Physiology, Bethesda, Maryland 20892, USA
+6. Neural Information Processing Group, Institute of Software Engineering and Theoretical Computer Science, Technische Universität Berlin, Germany
+7. Arizona State University School of Life Sciences, USA
+8. Computational Neuroscience Research Group (CNRG), Waterloo Centre for Theoretical Neuroscience, Waterloo, Canada
+9. Nencki Institute of Experimental Biology, Warsaw, Poland
+
+If we've somehow missed you off the list we're very sorry - please let us know.

+ 310 - 0
code/elephant/doc/conf.py

@@ -0,0 +1,310 @@
+# -*- coding: utf-8 -*-
+#
+# Elephant documentation build configuration file, created by
+# sphinx-quickstart on Wed Feb  5 17:11:26 2014.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, '..')
+
+# -- General configuration -----------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
+              'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax',
+              'sphinx.ext.viewcode', 'numpydoc']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Elephant'
+authors = u'Elephant authors and contributors'
+copyright = u'2014-2017, ' + authors
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.4'
+# The full version, including alpha/beta/rc tags.
+release = '0.4.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'sphinxdoc'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+html_logo = 'images/elephant_logo_sidebar.png'
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = 'images/elephant_favicon.ico'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'elephantdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'elephant.tex', u'Elephant Documentation',
+   authors, 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output --------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'elephant', u'Elephant Documentation',
+     [authors], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output ------------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+    ('index', 'Elephant', u'Elephant Documentation',
+     authors, 'Elephant', 'Elephant is a package for the analysis of neurophysiology data.',
+     'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+
+# -- Options for Epub output ---------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = project
+epub_author = authors
+epub_publisher = authors
+epub_copyright = copyright
+
+# The language of the text. It defaults to the language option
+# or en if the language is not set.
+#epub_language = ''
+
+# The scheme of the identifier. Typical schemes are ISBN or URL.
+#epub_scheme = ''
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#epub_identifier = ''
+
+# A unique identification for the text.
+#epub_uid = ''
+
+# A tuple containing the cover image and cover page html template filenames.
+#epub_cover = ()
+
+# HTML files that should be inserted before the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_pre_files = []
+
+# HTML files shat should be inserted after the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_post_files = []
+
+# A list of files that should not be packed into the epub file.
+#epub_exclude_files = []
+
+# The depth of the table of contents in toc.ncx.
+#epub_tocdepth = 3
+
+# Allow duplicate toc entries.
+#epub_tocdup = True
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'http://docs.python.org/': None}
+
+# Use more reliable mathjax source
+mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
+
+# Remove the copyright notice from docstrings:
+def process_docstring_remove_copyright(app, what, name, obj, options, lines):
+    copyright_line = None
+    for i,line in enumerate(lines):
+        if line.startswith(':copyright:'):
+            copyright_line = i
+            break
+    if copyright_line:
+        while len(lines) > copyright_line:
+            lines.pop()
+
+
+def setup(app):
+    app.connect('autodoc-process-docstring', process_docstring_remove_copyright)

+ 223 - 0
code/elephant/doc/developers_guide.rst

@@ -0,0 +1,223 @@
+=================
+Developers' guide
+=================
+
+These instructions are for developing on a Unix-like platform, e.g. Linux or
+Mac OS X, with the bash shell. If you develop on Windows, please get in touch.
+
+
+Mailing lists
+-------------
+
+General discussion of Elephant development takes place in the `NeuralEnsemble Google
+group`_.
+
+Discussion of issues specific to a particular ticket in the issue tracker should
+take place on the tracker.
+
+
+Using the issue tracker
+-----------------------
+
+If you find a bug in Elephant, please create a new ticket on the `issue tracker`_,
+setting the type to "defect".
+Choose a name that is as specific as possible to the problem you've found, and
+in the description give as much information as you think is necessary to
+recreate the problem. The best way to do this is to create the shortest possible
+Python script that demonstrates the problem, and attach the file to the ticket.
+
+If you have an idea for an improvement to Elephant, create a ticket with type
+"enhancement". If you already have an implementation of the idea, open a pull request.
+
+
+Requirements
+------------
+
+See :doc:`install`. We strongly recommend using virtualenv_ or similar.
+
+
+Getting the source code
+-----------------------
+
+We use the Git version control system. The best way to contribute is through
+GitHub_. You will first need a GitHub account, and you should then fork the
+repository at https://github.com/NeuralEnsemble/elephant
+(see http://help.github.com/fork-a-repo/).
+
+To get a local copy of the repository::
+
+    $ cd /some/directory
+    $ git clone git@github.com:<username>/elephant.git
+    
+Now you need to make sure that the ``elephant`` package is on your PYTHONPATH.
+You can do this by installing Elephant::
+
+    $ cd elephant
+    $ python setup.py install
+    $ python3 setup.py install
+
+but if you do this, you will have to re-run ``setup.py install`` any time you make
+changes to the code. A better solution is to install Elephant with the *develop* option,
+this avoids reinstalling when there are changes in the code::
+
+    $ python setup.py develop
+
+or::
+
+    $ pip install -e .
+
+To update to the latest version from the repository::
+
+    $ git pull
+
+
+Running the test suite
+----------------------
+
+Before you make any changes, run the test suite to make sure all the tests pass
+on your system::
+
+    $ cd elephant/test
+
+With Python 2.7 or 3.x::
+
+    $ python -m unittest discover
+    $ python3 -m unittest discover
+
+If you have nose installed::
+
+    $ nosetests
+
+At the end, if you see "OK", then all the tests
+passed (or were skipped because certain dependencies are not installed),
+otherwise it will report on tests that failed or produced errors.
+
+
+Writing tests
+-------------
+
+You should try to write automated tests for any new code that you add. If you
+have found a bug and want to fix it, first write a test that isolates the bug
+(and that therefore fails with the existing codebase). Then apply your fix and
+check that the test now passes.
+
+To see how well the tests cover the code base, run::
+
+    $ nosetests --with-coverage --cover-package=elephant --cover-erase
+
+
+Working on the documentation
+----------------------------
+
+The documentation is written in `reStructuredText`_, using the `Sphinx`_
+documentation system. To build the documentation::
+
+    $ cd elephant/doc
+    $ make html
+    
+Then open `some/directory/elephant/doc/_build/html/index.html` in your browser.
+Docstrings should conform to the `NumPy docstring standard`_.
+
+To check that all example code in the documentation is correct, run::
+
+    $ make doctest
+
+To check that all URLs in the documentation are correct, run::
+
+    $ make linkcheck
+
+
+Committing your changes
+-----------------------
+
+Once you are happy with your changes, **run the test suite again to check
+that you have not introduced any new bugs**. Then you can commit them to your
+local repository::
+
+    $ git commit -m 'informative commit message'
+    
+If this is your first commit to the project, please add your name and
+affiliation/employer to :file:`doc/source/authors.rst`
+
+You can then push your changes to your online repository on GitHub::
+
+    $ git push
+    
+Once you think your changes are ready to be included in the main Elephant repository,
+open a pull request on GitHub (see https://help.github.com/articles/using-pull-requests).
+
+
+Python 3
+--------
+
+Elephant should work with Python 2.7 and Python 3.
+
+So far, we have managed to write code that works with both Python 2 and 3.
+Mainly this involves avoiding the ``print`` statement (use ``logging.info``
+instead), and putting ``from __future__ import division`` at the beginning of
+any file that uses division.
+
+If in doubt, `Porting to Python 3`_ by Lennart Regebro is an excellent resource.
+
+The most important thing to remember is to run tests with at least one version
+of Python 2 and at least one version of Python 3. There is generally no problem
+in having multiple versions of Python installed on your computer at once: e.g.,
+on Ubuntu Python 2 is available as `python` and Python 3 as `python3`, while
+on Arch Linux Python 2 is `python2` and Python 3 `python`. See `PEP394`_ for
+more on this.
+
+
+Coding standards and style
+--------------------------
+
+All code should conform as much as possible to `PEP 8`_, and should run with
+Python 2.7 and 3.2-3.5.
+
+
+Making a release
+----------------
+
+.. TODO: discuss branching/tagging policy.
+
+.. Add a section in /doc/releases/<version>.rst for the release.
+
+First check that the version string (in :file:`elephant/__init__.py`, :file:`setup.py`,
+:file:`doc/conf.py` and :file:`doc/install.rst`) is correct.
+
+To build a source package::
+
+    $ python setup.py sdist
+
+To upload the package to `PyPI`_ (if you have the necessary permissions)::
+
+    $ python setup.py sdist upload
+
+.. should we also distribute via software.incf.org
+
+Finally, tag the release in the Git repository and push it::
+
+    $ git tag <version>
+    $ git push --tags upstream
+    
+
+.. make a release branch
+
+
+
+.. _Python: http://www.python.org
+.. _nose: http://somethingaboutorange.com/mrl/projects/nose/
+.. _neo: http://neuralensemble.org/neo
+.. _coverage: http://nedbatchelder.com/code/coverage/
+.. _`PEP 8`: http://www.python.org/dev/peps/pep-0008/
+.. _`issue tracker`: https://github.com/NeuralEnsemble/elephant/issues
+.. _`Porting to Python 3`: http://python3porting.com/
+.. _`NeuralEnsemble Google group`: http://groups.google.com/group/neuralensemble
+.. _reStructuredText: http://docutils.sourceforge.net/rst.html
+.. _Sphinx: http://sphinx.pocoo.org/
+.. _numpy: http://www.numpy.org/
+.. _quantities: http://pypi.python.org/pypi/quantities
+.. _PEP394: http://www.python.org/dev/peps/pep-0394/
+.. _PyPI: http://pypi.python.org
+.. _GitHub: http://github.com
+.. _`NumPy docstring standard`: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
+.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/

+ 34 - 0
code/elephant/doc/environment.yml

@@ -0,0 +1,34 @@
+name: elephant
+dependencies:
+- libgfortran=1.0=0
+- alabaster=0.7.7=py35_0
+- babel=2.2.0=py35_0
+- docutils=0.12=py35_0
+- jinja2=2.8=py35_0
+- markupsafe=0.23=py35_0
+- mkl=11.3.1=0
+- numpy=1.10.4=py35_0
+- numpydoc=0.5=py35_1
+- openssl=1.0.2g=0
+- pip=8.1.1=py35_0
+- pygments=2.1.1=py35_0
+- python=3.5.1=0
+- pytz=2016.2=py35_0
+- readline=6.2=2
+- scipy=0.17.0=np110py35_0
+- setuptools=20.3=py35_0
+- six=1.10.0=py35_0
+- scikit-learn==0.17.1
+- snowballstemmer=1.2.1=py35_0
+- sphinx=1.3.5=py35_0
+- sphinx_rtd_theme=0.1.9=py35_0
+- sqlite=3.9.2=0
+- tk=8.5.18=0
+- wheel=0.29.0=py35_0
+- xz=5.0.5=1
+- zlib=1.2.8=0
+- pip:
+  - https://github.com/NeuralEnsemble/python-neo/archive/snapshot-20150821.zip
+  - quantities
+  - sphinx-rtd-theme==0.1.9
+ 

BIN
code/elephant/doc/images/elephant_favicon.ico


BIN
code/elephant/doc/images/elephant_logo.png


BIN
code/elephant/doc/images/elephant_logo_sidebar.png


BIN
code/elephant/doc/images/elephant_structure.png


BIN
code/elephant/doc/images/tutorials/tutorial_1_figure_1.png


BIN
code/elephant/doc/images/tutorials/tutorial_1_figure_2.png


+ 44 - 0
code/elephant/doc/index.rst

@@ -0,0 +1,44 @@
+.. Elephant documentation master file, created by
+   sphinx-quickstart on Thu Aug 22 08:39:42 2013.
+
+
+*********************************************
+Elephant - Electrophysiology Analysis Toolkit
+*********************************************
+
+Synopsis
+--------
+    
+
+*Elephant* is a toolbox for the analysis of electrophysiological data based on the Neo_ framework. This manual covers the installation of Elephant in an existing Python environment, several tutorials to help get you started, information on the structure and conventions of the library, a list of modules, and help for future contributors to Elephant.
+
+	
+Table of Contents
+-----------------
+
+.. toctree::
+    :maxdepth: 1
+
+    overview
+    install
+    tutorial
+    modules
+    developers_guide
+    authors
+    release_notes	       
+
+   
+
+.. Indices and tables
+.. ==================
+
+.. * :ref:`genindex`
+.. * :ref:`modindex`
+.. * :ref:`search`
+
+
+.. _`Neo`: https://github.com/NeuralEnsemble/python-neo
+
+
+.. |date| date::
+.. |time| date:: %H:%M

+ 107 - 0
code/elephant/doc/install.rst

@@ -0,0 +1,107 @@
+.. _install:
+
+****************************
+Prerequisites / Installation
+****************************
+
+Elephant is a pure Python package so that it should be easy to install on any system.
+
+
+Dependencies
+============
+
+The following packages are required to use Elephant:
+    * Python_ >= 2.7
+    * numpy_ >= 1.8.2
+    * scipy_ >= 0.14.0
+    * quantities_ >= 0.10.1
+    * neo_ >= 0.5.0
+
+The following packages are optional in order to run certain parts of Elephant:
+    * For using the pandas_bridge module: 
+        * pandas >= 0.14.1
+    * For using the ASSET analysis
+    * scikit-learn >= 0.15.1
+    * For building the documentation:
+        * numpydoc >= 0.5
+        * sphinx >= 1.2.2
+    * For running tests:
+        * nose >= 1.3.3
+
+All dependencies can be found on the Python package index (PyPI).
+
+
+Debian/Ubuntu
+-------------
+For Debian/Ubuntu, we recommend to install numpy and scipy as system packages using apt-get::
+    
+    $ apt-get install python-numpy python-scipy python-pip python-six
+
+Further packages are found on the Python package index (pypi) and should be installed with pip_::
+    
+    $ pip install quantities
+    $ pip install neo
+
+We highly recommend to install these packages using a virtual environment provided by virtualenv_ or locally in the home directory using the ``--user`` option of pip (e.g., ``pip install --user quantities``), neither of which require administrator privileges.
+
+Windows/Mac OS X
+----------------
+
+On non-Linux operating systems we recommend using the Anaconda_ Python distribution, and installing all dependencies in a `Conda environment`_, e.g.::
+
+    $ conda create -n neuroscience python numpy scipy pip six
+    $ source activate neuroscience
+    $ pip install quantities
+    $ pip install neo
+
+
+Installation
+============
+
+Automatic installation from PyPI
+--------------------------------
+
+The easiest way to install Elephant is via pip_::
+
+    $ pip install elephant    
+
+
+Manual installation from pypi
+-----------------------------
+
+To download and install manually, download the latest package from http://pypi.python.org/pypi/elephant
+
+Then::
+
+    $ tar xzf elephant-0.4.1.tar.gz
+    $ cd elephant-0.4.1
+    $ python setup.py install
+    
+or::
+
+    $ python3 setup.py install
+    
+depending on which version of Python you are using.
+
+
+Installation of the latest build from source
+--------------------------------------------
+
+To install the latest version of Elephant from the Git repository::
+
+    $ git clone git://github.com/NeuralEnsemble/elephant.git
+    $ cd elephant
+    $ python setup.py install
+
+
+
+.. _`Python`: http://python.org/
+.. _`numpy`: http://www.numpy.org/
+.. _`scipy`: http://scipy.org/scipylib/
+.. _`quantities`: http://pypi.python.org/pypi/quantities
+.. _`neo`: http://pypi.python.org/pypi/neo
+.. _`pip`: http://pypi.python.org/pypi/pip
+.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/
+.. _`this snapshot`: https://github.com/NeuralEnsemble/python-neo/archive/snapshot-20150821.zip
+.. _Anaconda: http://continuum.io/downloads
+.. _`Conda environment`: http://conda.pydata.org/docs/faq.html#creating-new-environments

+ 190 - 0
code/elephant/doc/make.bat

@@ -0,0 +1,190 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+	:help
+	echo.Please use `make ^<target^>` where ^<target^> is one of
+	echo.  html       to make standalone HTML files
+	echo.  dirhtml    to make HTML files named index.html in directories
+	echo.  singlehtml to make a single large HTML file
+	echo.  pickle     to make pickle files
+	echo.  json       to make JSON files
+	echo.  htmlhelp   to make HTML files and a HTML help project
+	echo.  qthelp     to make HTML files and a qthelp project
+	echo.  devhelp    to make HTML files and a Devhelp project
+	echo.  epub       to make an epub
+	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+	echo.  text       to make text files
+	echo.  man        to make manual pages
+	echo.  texinfo    to make Texinfo files
+	echo.  gettext    to make PO message catalogs
+	echo.  changes    to make an overview over all changed/added/deprecated items
+	echo.  linkcheck  to check all external links for integrity
+	echo.  doctest    to run all doctests embedded in the documentation if enabled
+	goto end
+)
+
+if "%1" == "clean" (
+	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+	del /q /s %BUILDDIR%\*
+	goto end
+)
+
+if "%1" == "html" (
+	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+	goto end
+)
+
+if "%1" == "dirhtml" (
+	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+	goto end
+)
+
+if "%1" == "singlehtml" (
+	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+	goto end
+)
+
+if "%1" == "pickle" (
+	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can process the pickle files.
+	goto end
+)
+
+if "%1" == "json" (
+	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can process the JSON files.
+	goto end
+)
+
+if "%1" == "htmlhelp" (
+	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+	goto end
+)
+
+if "%1" == "qthelp" (
+	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Elephant.qhcp
+	echo.To view the help file:
+	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Elephant.ghc
+	goto end
+)
+
+if "%1" == "devhelp" (
+	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished.
+	goto end
+)
+
+if "%1" == "epub" (
+	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The epub file is in %BUILDDIR%/epub.
+	goto end
+)
+
+if "%1" == "latex" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "text" (
+	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The text files are in %BUILDDIR%/text.
+	goto end
+)
+
+if "%1" == "man" (
+	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The manual pages are in %BUILDDIR%/man.
+	goto end
+)
+
+if "%1" == "texinfo" (
+	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+	goto end
+)
+
+if "%1" == "gettext" (
+	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+	goto end
+)
+
+if "%1" == "changes" (
+	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.The overview file is in %BUILDDIR%/changes.
+	goto end
+)
+
+if "%1" == "linkcheck" (
+	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+	goto end
+)
+
+if "%1" == "doctest" (
+	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+	goto end
+)
+
+:end

+ 25 - 0
code/elephant/doc/modules.rst

@@ -0,0 +1,25 @@
+****************************
+Function Reference by Module
+****************************
+
+.. toctree::
+   :maxdepth: 2
+
+   reference/statistics
+   reference/signal_processing
+   reference/spectral
+   reference/kernels
+   reference/spike_train_dissimilarity
+   reference/sta
+   reference/spike_train_correlation
+   reference/unitary_event_analysis
+   reference/cubic
+   reference/asset
+   reference/spike_train_generation
+   reference/spike_train_surrogates
+   reference/conversion
+   reference/csd
+   reference/neo_tools
+   reference/pandas_bridge
+
+

+ 113 - 0
code/elephant/doc/overview.rst

@@ -0,0 +1,113 @@
+********
+Overview
+********
+
+What is Elephant?
+=====================
+
+As a result of the complexity inherent in modern recording technologies that yield massively parallel data streams and in advanced analysis methods to explore such rich data sets, the need for more reproducible research in the neurosciences can no longer be ignored. Reproducibility rests on building workflows that may allow users to transparently trace their analysis steps from data acquisition to final publication. A key component of such a workflow is a set of defined analysis methods to perform the data processing.
+
+Elephant (Electrophysiology Analysis Toolkit) is an emerging open-source, community centered library for the analysis of electrophysiological data in the Python programming language. The focus of Elephant is on generic analysis functions for spike train data and time series recordings from electrodes, such as the local field potentials (LFP) or intracellular voltages. In addition to providing a common platform for analysis codes from different laboratories, the Elephant project aims to provide a consistent and homogeneous analysis framework that is built on a modular foundation. Elephant is the direct successor to Neurotools [#f1]_ and maintains ties to complementary projects such as OpenElectrophy [#f2]_ and spykeviewer [#f3]_.
+
+* Analysis functions use consistent data formats and conventions as input arguments and outputs. Electrophysiological data will generally be represented by data models defined by the Neo_ [#f4]_ project.
+* Library functions are based on a set of core functions for commonly used operations, such as sliding windows, converting data to alternate representations, or the generation of surrogates for hypothesis testing.
+* Accepted analysis functions must be equipped with a range of unit tests to ensure a high standard of code quality.
+
+
+Elephant library structure
+==========================
+
+Elephant is a standard python package and is structured into a number of submodules. The following is a sketch of the layout of the Elephant library (0.3.0 release).
+
+.. figure:: images/elephant_structure.png
+    :width: 400 px
+    :align: center
+    :figwidth: 80 %
+    
+    Modules of the Elephant library. Modules containing analysis functions are colored in blue shades, core functionality in green shades.
+   
+
+Conceptually, modules of the Elephant library can be divided into those related to a specific category of analysis methods, and supporting modules that provide a layer of various core utility functions. All available modules are available directly on the the top level of the Elephant package in the ``elephant`` subdirectory to avoid unnecessary hierarchical clutter. Unit tests for all functions are located in the ``elephant/test`` subdirectory and are named according the module name. This documentation is located in the top level ``doc`` subdirectory.
+
+In the following we provide a brief overview of the modules available in Elephant.
+
+
+Analysis modules
+----------------
+
+``statistics``
+^^^^^^^^^^^^^^
+Statistical measures of spike trains (e.g., Fano factor) and functions to estimate firing rates.
+
+``signal_processing``
+^^^^^^^^^^^^^^^^^^^^^
+Basic processing procedures for analog signals (e.g., performing a z-score of a signal, or filtering a signal).
+
+``spectral``
+^^^^^^^^^^^^
+Identification of spectral properties in analog signals (e.g., the power spectrum)
+
+``kernels``
+^^^^^^^^^^^^^^
+A class that provides representations for commonly used kernel functions.
+
+``spike_train_dissimilarity_measures``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Spike train metrics (e.g., the Victor-Purpura measure) to measure the (dis-)similarity between spike trains.
+
+``sta``
+^^^^^^^
+Calculate the spike-triggered average and spike-field-coherence of an analog signal.
+
+``spike_train_correlation``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Functions to quantify correlations between sets of spike trains.
+
+``unitary_event_analysis``
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+Determine periods where neurons synchronize their activity beyond chance level.
+
+``cubic``
+^^^^^^^^^
+Implements the method Cumulant Based Inference of higher-order Correlation (CuBIC) to detect the presence of higher-order correlations in massively parallel data based on its complexity distribution.
+
+``asset``
+^^^^^^^^^
+Implementation of the Analysis of Sequences of Synchronous EvenTs (ASSET) to detect, in particular, syn-fire chain like activity.
+
+``csd``
+^^^^^^^
+Inverse and standard methods to estimate of current source density (CSD) of laminar LFP recordings.
+
+
+Supporting modules
+------------------
+
+``conversion``
+^^^^^^^^^^^^^^
+This module allows to convert standard data representations (e.g., a spike train stored as Neo ``SpikeTrain`` object) into other representations useful to perform calculations on the data. An example is the representation of a spike train as a sequence of 0-1 values (*binned spike train*). 
+
+``spike_train_generation``
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+This module provides functions to generate spike trains according to prescribed stochastic models (e.g., a Poisson spike train). 
+
+``spike_train_surrogates``
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+This module provides functionality to generate surrogate spike trains from given spike train data. This is particularly useful in the context of determining the significance of analysis results via Monte-Carlo methods.
+
+``neo_tools``
+^^^^^^^^^^^^^
+Provides useful convenience functions to work efficiently with Neo objects.
+
+``pandas_bridge``
+^^^^^^^^^^^^^^^^^
+Bridge from Elephant to the pandas library.
+
+
+References
+==========
+.. [#f1]  http://neuralensemble.org/NeuroTools/
+.. [#f2]  http://neuralensemble.org/OpenElectrophy/
+.. [#f3]  http://spykeutils.readthedocs.org/en/0.4.1/
+.. [#f4]  Garcia et al. (2014) Front.~Neuroinform. 8:10
+.. _`Neo`: http://neuralensemble.org/neo/

+ 6 - 0
code/elephant/doc/reference/asset.rst

@@ -0,0 +1,6 @@
+===================================================
+Analysis of Sequences of Synchronous EvenTs (ASSET) 
+===================================================
+
+.. automodule:: elephant.asset
+   :members:

+ 6 - 0
code/elephant/doc/reference/conversion.rst

@@ -0,0 +1,6 @@
+=======================
+Data format conversions
+=======================
+
+.. automodule:: elephant.conversion
+   :members:

+ 6 - 0
code/elephant/doc/reference/cubic.rst

@@ -0,0 +1,6 @@
+============================================================
+Cumulant Based Inference of higher-order Correlation (CuBIC) 
+============================================================
+
+.. automodule:: elephant.cubic
+   :members:

+ 6 - 0
code/elephant/doc/reference/kernels.rst

@@ -0,0 +1,6 @@
+=======
+Kernels
+=======
+
+.. automodule:: elephant.kernels
+   :members:

+ 6 - 0
code/elephant/doc/reference/neo_tools.rst

@@ -0,0 +1,6 @@
+===========================================
+Utility functions to manipulate Neo objects
+===========================================
+
+.. automodule:: elephant.neo_tools
+   :members:

+ 6 - 0
code/elephant/doc/reference/pandas_bridge.rst

@@ -0,0 +1,6 @@
+============================
+Bridge to the pandas library
+============================
+
+.. automodule:: elephant.pandas_bridge
+   :members:

+ 13 - 0
code/elephant/doc/reference/signal_processing.rst

@@ -0,0 +1,13 @@
+=================
+Signal processing
+=================
+
+.. testsetup::
+
+   import numpy as np
+   from quantities import mV, s, Hz
+   import neo
+   from elephant.signal_processing import zscore
+
+.. automodule:: elephant.signal_processing
+   :members:

+ 6 - 0
code/elephant/doc/reference/spectral.rst

@@ -0,0 +1,6 @@
+=================
+Spectral analysis
+=================
+
+.. automodule:: elephant.spectral
+   :members:

+ 12 - 0
code/elephant/doc/reference/spike_train_correlation.rst

@@ -0,0 +1,12 @@
+=======================
+Spike train correlation
+=======================
+
+.. testsetup::
+
+   from quantities import Hz, s, ms
+   from elephant.spike_train_correlation import corrcoef
+
+
+.. automodule:: elephant.spike_train_correlation
+   :members:

+ 8 - 0
code/elephant/doc/reference/spike_train_dissimilarity.rst

@@ -0,0 +1,8 @@
+=================================================
+Spike Train Dissimilarity / Spike Train Synchrony
+=================================================
+
+
+.. automodule:: elephant.spike_train_dissimilarity
+   :members:
+

+ 11 - 0
code/elephant/doc/reference/spike_train_generation.rst

@@ -0,0 +1,11 @@
+=================================
+Stochastic spike train generation
+=================================
+
+.. testsetup::
+
+   from elephant.spike_train_generation import homogeneous_poisson_process, homogeneous_gamma_process
+
+
+.. automodule:: elephant.spike_train_generation
+   :members:

+ 12 - 0
code/elephant/doc/reference/spike_train_surrogates.rst

@@ -0,0 +1,12 @@
+======================
+Spike train surrogates
+======================
+
+
+.. testsetup::
+
+   from elephant.spike_train_surrogates import shuffle_isis, randomise_spikes, jitter_spikes, dither_spikes, dither_spike_train
+
+
+.. automodule:: elephant.spike_train_surrogates
+   :members:

+ 18 - 0
code/elephant/doc/reference/sta.rst

@@ -0,0 +1,18 @@
+=======================
+Spike-triggered average
+=======================
+
+.. testsetup::
+
+   import numpy as np
+   import neo
+   from quantities import ms
+   from elephant.sta import spike_triggered_average
+
+   signal1 = np.arange(1000.0)
+   signal2 = np.arange(1, 1001.0)
+   spiketrain1 = neo.SpikeTrain([10.12, 20.23, 30.45], units=ms, t_stop=50*ms)
+   spiketrain2 = neo.SpikeTrain([10.34, 20.56, 30.67], units=ms, t_stop=50*ms)
+
+.. automodule:: elephant.sta
+   :members:

+ 6 - 0
code/elephant/doc/reference/statistics.rst

@@ -0,0 +1,6 @@
+======================
+Spike train statistics
+======================
+
+.. automodule:: elephant.statistics
+   :members:

+ 6 - 0
code/elephant/doc/reference/unitary_event_analysis.rst

@@ -0,0 +1,6 @@
+===========================
+Unitary Event (UE) Analysis
+===========================
+
+.. automodule:: elephant.unitary_event_analysis
+   :members:

+ 96 - 0
code/elephant/doc/release_notes.rst

@@ -0,0 +1,96 @@
+*************
+Release Notes
+*************
+
+Elephant 0.4.1 release notes
+============================
+March 23rd 2017
+
+Other changes
+=============
+* Fix in `setup.py` to correctly import the current source density module
+
+Elephant 0.4.0 release notes
+============================
+March 22nd 2017
+
+New functions
+=============
+* `spike_train_generation` module:
+    * peak detection: **peak_detection()**
+* Modules for Current Source Density: `current_source_density_src`
+    * Module Current Source Density: `KCSD.py`
+    * Module for Inverse Current Source Density: `icsd.py`
+
+API changes
+===========
+* Interoperability between Neo 0.5.0 and Elephant
+    * Elephant has adapted its functions to the changes in Neo 0.5.0,
+      most of the functionality behaves as before
+    * See Neo documentation for recent changes: http://neo.readthedocs.io/en/latest/whatisnew.html
+
+Other changes
+=============
+* Fixes to travis and setup configuration files.
+* Minor bug fixes.
+* Added module `six` for Python 2.7 backwards compatibility
+
+
+Elephant 0.3.0 release notes
+============================
+April 12st 2016
+
+New functions
+=============
+* `spike_train_correlation` module:
+    * cross correlation histogram: **cross_correlation_histogram()**
+* `spike_train_generation` module:
+    * single interaction process (SIP): **single_interaction_process()**
+    * compound Poisson process (CPP): **compound_poisson_process()**
+* `signal_processing` module:
+    * analytic signal: **hilbert()**
+* `sta` module:
+    * spike field coherence: **spike_field_coherence()**
+* Module to represent kernels: `kernels` module
+* Spike train metrics / dissimilarity / synchrony measures: `spike_train_dissimilarity` module
+* Unitary Event (UE) analysis: `unitary_event_analysis` module
+* Analysis of Sequences of Synchronous EvenTs (ASSET): `asset` module
+
+API changes
+===========
+* Function **instantaneous_rate()** now uses kernels as objects defined in the `kernels` module. The previous implementation of the function using the `make_kernel()` function is deprecated, but still temporarily available as `oldfct_instantaneous_rate()`.
+
+Other changes
+=============
+* Fixes to travis and readthedocs configuration files.
+
+
+Elephant 0.2.1 release notes
+============================
+February 18th 2016
+
+Minor bug fixes.
+
+
+Elephant 0.2.0 release notes
+============================
+September 22nd 2015
+
+New functions
+=============
+
+* Added covariance function **covariance()** in the `spike_train_correlation` module
+* Added complexity pdf **complexity_pdf()** in the `statistics` module
+* Added spike train extraction from analog signals via threshold detection the in `spike_train_generation` module
+* Added **coherence()** function for analog signals in the `spectral` module
+* Added **Cumulant Based Inference for higher-order of Correlation (CuBIC)** in the `cubic` module for correlation analysis of parallel recorded spike trains
+
+API changes
+===========
+* **Optimized kernel bandwidth** in `rate_estimation` function: Calculates the optimized kernel width when the paramter kernel width is specified as `auto`
+
+Other changes
+=============
+* **Optimized creation of sparse matrices**: The creation speed of the sparse matrix inside the `BinnedSpikeTrain` class is optimized
+* Added **Izhikevich neuron simulator** in the `make_spike_extraction_test_data` module
+* Minor improvements to the test and continous integration infrastructure

+ 6 - 0
code/elephant/doc/requirements.txt

@@ -0,0 +1,6 @@
+# Requirements for building documentation
+numpy>=1.8.2
+quantities>=0.10.1
+neo>=0.5.0
+numpydoc
+sphinx

+ 85 - 0
code/elephant/doc/tutorial.rst

@@ -0,0 +1,85 @@
+*********
+Tutorials
+*********
+
+Getting Started
+---------------
+
+In this first tutorial, we will go through a very simple example of how to use Elephant. We will numerically verify that the coefficient of variation (CV), a measure of the variability of inter-spike intervals, of a spike train that is modeled as a random (stochastic) Poisson process is 1.
+
+As a first step, install Elephant and its dependencies as outlined in :ref:`install`. Next, start up your Python shell. Under Windows, you can likely launch a Python shell from the Start menu. Under Linux or Mac, you may start Python by typing::
+
+    $ python
+
+As a first step, we want to generate spike train data modeled as a stochastic Poisson process. For this purpose, we can use the :mod:`elephant.spike_train_generation` module, which provides the :func:`homogeneous_poisson_process` function::
+
+    >>> from elephant.spike_train_generation import homogeneous_poisson_process
+
+Use the :func:`help()` function of Python to display the documentation for this function::
+
+    >>> help(homogeneous_poisson_process)
+
+As you can see, the function requires three parameters: the firing rate of the Poisson process, the start time and the stop time. These three parameters are specified as :class:`Quantity` objects: these are essentially arrays or numbers with a unit of measurement attached. We will see how to use these objects in a second. You can quit the help screen by typing ``q``.
+
+Let us now generate 100 independent Poisson spike trains for 100 seconds each with a rate of 10 Hz for which we later will calculate the CV. For simplicity, we will store the spike trains in a list::
+
+    >>> from quantities import Hz, s, ms
+    >>> spiketrain_list = [
+    ...     homogeneous_poisson_process(rate=10.0*Hz, t_start=0.0*s, t_stop=100.0*s)
+    ...     for i in range(100)]
+
+Notice that the units ``s`` and ``Hz`` have both been imported from the :mod:`quantities` library and can be directly attached to the values by multiplication. The output is a list of 100 Neo :class:`SpikeTrain` objects::
+
+    >>> print(len(spiketrain_list))
+    100
+    >>> print(type(spiketrain_list[0]))
+    <class 'neo.core.spiketrain.SpikeTrain'>
+
+Before we continue, let us (optionally) have a look at the spike trains in a spike raster plot. This can be created, e.g., using the `matplotlib`_ framework (you may need to install this library, as it is not one of the dependencies of Elephant)::
+
+    >>> import matplotlib.pyplot as plt
+    >>> import numpy as np
+    >>> for i, spiketrain in enumerate(spiketrain_list):
+            t = spiketrain.rescale(ms)
+            plt.plot(t, i * np.ones_like(t), 'k.', markersize=2)
+    >>> plt.axis('tight')
+    >>> plt.xlim(0, 1000)
+    >>> plt.xlabel('Time (ms)', fontsize=16)
+    >>> plt.ylabel('Spike Train Index', fontsize=16)
+    >>> plt.gca().tick_params(axis='both', which='major', labelsize=14)
+    >>> plt.show()
+
+Notice how the spike times of each spike train are extracted from each of the spike trains in the for-loop. The :meth:`rescale` operation of the quantities library is used to transform units to milliseconds. In order to aid the visualization, we restrict the plot to the first 1000 ms (:func:`xlim` function). The :func:`show` command plots the spike raster in a new figure window on the screen.
+
+.. figure:: images/tutorials/tutorial_1_figure_1.png
+    :width: 600 px
+    :align: center
+    :figwidth: 80 %
+    
+    Spike raster plot of the 100 Poisson spike trains showing the first second of data.
+
+From the plot you can see the random nature of each Poisson spike train. Let us now calculate the distribution of the 100 CVs obtained from inter-spike intervals (ISIs) of these spike trains. Close the graphics window to get back to the Python prompt. The functions to calculate the list of ISIs and the CV are both located in the :mod:`elephant.statistics` module. Thus, for each spike train in our list, we first call the :func:`isi` function which returns an array of all *N-1* ISIs for the *N* spikes in the input spike train (refer to the online help using ``help(isi)``). We then feed the list of ISIs into the :func:`cv` function, which returns a single value for the coefficient of variation::
+
+    >>> from elephant.statistics import isi, cv
+    >>> cv_list = [cv(isi(spiketrain)) for spiketrain in spiketrain_list]
+
+In a final step, let's plot a histogram of the obtained CVs (again illustrated using the matplotlib framework for plotting)::
+
+    >>> plt.hist(cv_list)
+    >>> plt.xlabel('CV', fontsize=16)
+    >>> plt.ylabel('count', fontsize=16)
+    >>> plt.gca().tick_params(axis='both', which='major', labelsize=14)
+    >>> plt.show()
+
+As predicted by theory, the CV values are clustered around 1. This concludes our first "getting started" tutorial on the use of Elephant. More tutorials will be added soon.
+
+.. figure:: images/tutorials/tutorial_1_figure_2.png
+    :width: 600 px
+    :align: center
+    :figwidth: 80 %
+    
+    Distribution of CV values of the ISIs of 100 Poisson spike trains.
+
+
+
+.. _`matplotlib`: http://matplotlib.org/

+ 30 - 0
code/elephant/elephant/__init__.py

@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+"""
+Elephant is a package for the analysis of neurophysiology data, based on Neo.
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+from . import (statistics,
+               spike_train_generation,
+               spike_train_correlation,
+               unitary_event_analysis,
+               cubic,
+               spectral,
+               kernels,
+               spike_train_dissimilarity,
+               spike_train_surrogates,
+               signal_processing,
+               current_source_density,
+               sta,
+               conversion,
+               neo_tools)
+
+try:
+    from . import pandas_bridge
+    from . import asset
+except ImportError:
+    pass
+
+__version__ = "0.4.1"

File diff suppressed because it is too large
+ 1753 - 0
code/elephant/elephant/asset.py


+ 814 - 0
code/elephant/elephant/conversion.py

@@ -0,0 +1,814 @@
+# -*- coding: utf-8 -*-
+"""
+This module allows to convert standard data representations
+(e.g., a spike train stored as Neo SpikeTrain object)
+into other representations useful to perform calculations on the data.
+An example is the representation of a spike train as a sequence of 0-1 values
+(binned spike train).
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: BSD, see LICENSE.txt for details.
+"""
+
+from __future__ import division, print_function
+
+import neo
+import scipy
+import scipy.sparse as sps
+import numpy as np
+import quantities as pq
+
+
+def binarize(spiketrain, sampling_rate=None, t_start=None, t_stop=None,
+             return_times=None):
+    """
+    Return an array indicating if spikes occured at individual time points.
+
+    The array contains boolean values identifying whether one or more spikes
+    happened in the corresponding time bin.  Time bins start at `t_start`
+    and end at `t_stop`, spaced in `1/sampling_rate` intervals.
+
+    Accepts either a Neo SpikeTrain, a Quantity array, or a plain NumPy array.
+    Returns a boolean array with each element being the presence or absence of
+    a spike in that time bin.  The number of spikes in a time bin is not
+    considered.
+
+    Optionally also returns an array of time points corresponding to the
+    elements of the boolean array.  The units of this array will be the same as
+    the units of the SpikeTrain, if any.
+
+    Parameters
+    ----------
+
+    spiketrain : Neo SpikeTrain or Quantity array or NumPy array
+                 The spike times.  Does not have to be sorted.
+    sampling_rate : float or Quantity scalar, optional
+                    The sampling rate to use for the time points.
+                    If not specified, retrieved from the `sampling_rate`
+                    attribute of `spiketrain`.
+    t_start : float or Quantity scalar, optional
+              The start time to use for the time points.
+              If not specified, retrieved from the `t_start`
+              attribute of `spiketrain`.  If that is not present, default to
+              `0`.  Any value from `spiketrain` below this value is
+              ignored.
+    t_stop : float or Quantity scalar, optional
+             The start time to use for the time points.
+             If not specified, retrieved from the `t_stop`
+             attribute of `spiketrain`.  If that is not present, default to
+             the maximum value of `sspiketrain`.  Any value from
+             `spiketrain` above this value is ignored.
+    return_times : bool
+                   If True, also return the corresponding time points.
+
+    Returns
+    -------
+
+    values : NumPy array of bools
+             A ``True`` value at a particular index indicates the presence of
+             one or more spikes at the corresponding time point.
+    times : NumPy array or Quantity array, optional
+            The time points.  This will have the same units as `spiketrain`.
+            If `spiketrain` has no units, this will be an NumPy array.
+
+    Notes
+    -----
+    Spike times are placed in the bin of the closest time point, going to the
+    higher bin if exactly between two bins.
+
+    So in the case where the bins are `5.5` and `6.5`, with the spike time
+    being `6.0`, the spike will be placed in the `6.5` bin.
+
+    The upper edge of the last bin, equal to `t_stop`, is inclusive.  That is,
+    a spike time exactly equal to `t_stop` will be included.
+
+    If `spiketrain` is a Quantity or Neo SpikeTrain and
+    `t_start`, `t_stop` or `sampling_rate` is not, then the arguments that
+    are not quantities will be assumed to have the same units as `spiketrain`.
+
+    Raises
+    ------
+
+    TypeError
+        If `spiketrain` is a NumPy array and `t_start`, `t_stop`, or
+        `sampling_rate` is a Quantity..
+
+    ValueError
+        `t_start` and `t_stop` can be inferred from `spiketrain` if
+        not explicitly defined and not an attribute of `spiketrain`.
+        `sampling_rate` cannot, so an exception is raised if it is not
+        explicitly defined and not present as an attribute of `spiketrain`.
+    """
+    # get the values from spiketrain if they are not specified.
+    if sampling_rate is None:
+        sampling_rate = getattr(spiketrain, 'sampling_rate', None)
+        if sampling_rate is None:
+            raise ValueError('sampling_rate must either be explicitly defined '
+                             'or must be an attribute of spiketrain')
+    if t_start is None:
+        t_start = getattr(spiketrain, 't_start', 0)
+    if t_stop is None:
+        t_stop = getattr(spiketrain, 't_stop', np.max(spiketrain))
+
+    # we don't actually want the sampling rate, we want the sampling period
+    sampling_period = 1. / sampling_rate
+
+    # figure out what units, if any, we are dealing with
+    if hasattr(spiketrain, 'units'):
+        units = spiketrain.units
+        spiketrain = spiketrain.magnitude
+    else:
+        units = None
+
+    # convert everything to the same units, then get the magnitude
+    if hasattr(sampling_period, 'units'):
+        if units is None:
+            raise TypeError('sampling_period cannot be a Quantity if '
+                            'spiketrain is not a quantity')
+        sampling_period = sampling_period.rescale(units).magnitude
+    if hasattr(t_start, 'units'):
+        if units is None:
+            raise TypeError('t_start cannot be a Quantity if '
+                            'spiketrain is not a quantity')
+        t_start = t_start.rescale(units).magnitude
+    if hasattr(t_stop, 'units'):
+        if units is None:
+            raise TypeError('t_stop cannot be a Quantity if '
+                            'spiketrain is not a quantity')
+        t_stop = t_stop.rescale(units).magnitude
+
+    # figure out the bin edges
+    edges = np.arange(t_start - sampling_period / 2, t_stop + sampling_period * 3 / 2,
+                      sampling_period)
+    # we don't want to count any spikes before t_start or after t_stop
+    if edges[-2] > t_stop:
+        edges = edges[:-1]
+    if edges[1] < t_start:
+        edges = edges[1:]
+    edges[0] = t_start
+    edges[-1] = t_stop
+
+    # this is where we actually get the binarized spike train
+    res = np.histogram(spiketrain, edges)[0].astype('bool')
+
+    # figure out what to output
+    if not return_times:
+        return res
+    elif units is None:
+        return res, np.arange(t_start, t_stop + sampling_period, sampling_period)
+    else:
+        return res, pq.Quantity(np.arange(t_start, t_stop + sampling_period,
+                                          sampling_period), units=units)
+
+###########################################################################
+#
+# Methods to calculate parameters, t_start, t_stop, bin size,
+# number of bins
+#
+###########################################################################
+
+
+def _calc_tstart(num_bins, binsize, t_stop):
+    """
+    Calculates the start point from given parameter.
+
+    Calculates the start point :attr:`t_start` from the three parameter
+    :attr:`t_stop`, :attr:`num_bins` and :attr`binsize`.
+
+    Parameters
+    ----------
+    num_bins: int
+        Number of bins
+    binsize: quantities.Quantity
+        Size of Bins
+    t_stop: quantities.Quantity
+        Stop time
+
+    Returns
+    -------
+    t_start : quantities.Quantity
+        Starting point calculated from given parameter.
+    """
+    if num_bins is not None and binsize is not None and t_stop is not None:
+        return t_stop.rescale(binsize.units) - num_bins * binsize
+
+
+def _calc_tstop(num_bins, binsize, t_start):
+    """
+    Calculates the stop point from given parameter.
+
+    Calculates the stop point :attr:`t_stop` from the three parameter
+    :attr:`t_start`, :attr:`num_bins` and :attr`binsize`.
+
+    Parameters
+    ----------
+    num_bins: int
+        Number of bins
+    binsize: quantities.Quantity
+        Size of bins
+    t_start: quantities.Quantity
+        Start time
+
+    Returns
+    -------
+    t_stop : quantities.Quantity
+        Stoping point calculated from given parameter.
+    """
+    if num_bins is not None and binsize is not None and t_start is not None:
+        return t_start.rescale(binsize.units) + num_bins * binsize
+
+
+def _calc_num_bins(binsize, t_start, t_stop):
+    """
+    Calculates the number of bins from given parameter.
+
+    Calculates the number of bins :attr:`num_bins` from the three parameter
+    :attr:`t_start`, :attr:`t_stop` and :attr`binsize`.
+
+    Parameters
+    ----------
+    binsize: quantities.Quantity
+        Size of Bins
+    t_start : quantities.Quantity
+        Start time
+    t_stop: quantities.Quantity
+        Stop time
+
+    Returns
+    -------
+    num_bins : int
+       Number of bins  calculated from given parameter.
+
+    Raises
+    ------
+    ValueError :
+        Raised when :attr:`t_stop` is smaller than :attr:`t_start`".
+
+    """
+    if binsize is not None and t_start is not None and t_stop is not None:
+        if t_stop < t_start:
+            raise ValueError("t_stop (%s) is smaller than t_start (%s)"
+                             % (t_stop, t_start))
+        return int(((t_stop - t_start).rescale(
+            binsize.units) / binsize).magnitude)
+
+
+def _calc_binsize(num_bins, t_start, t_stop):
+    """
+    Calculates the stop point from given parameter.
+
+    Calculates the size of bins :attr:`binsize` from the three parameter
+    :attr:`num_bins`, :attr:`t_start` and :attr`t_stop`.
+
+    Parameters
+    ----------
+    num_bins: int
+        Number of bins
+    t_start: quantities.Quantity
+        Start time
+    t_stop
+       Stop time
+
+    Returns
+    -------
+    binsize : quantities.Quantity
+        Size of bins calculated from given parameter.
+
+    Raises
+    ------
+    ValueError :
+        Raised when :attr:`t_stop` is smaller than :attr:`t_start`".
+    """
+
+    if num_bins is not None and t_start is not None and t_stop is not None:
+        if t_stop < t_start:
+            raise ValueError("t_stop (%s) is smaller than t_start (%s)"
+                             % (t_stop, t_start))
+        return (t_stop - t_start) / num_bins
+
+
+def _get_start_stop_from_input(spiketrains):
+    """
+    Returns the start :attr:`t_start`and stop :attr:`t_stop` point
+    from given input.
+
+    If one neo.SpikeTrain objects is given the start :attr:`t_stop `and stop
+    :attr:`t_stop` of the spike train is returned.
+    Otherwise the aligned times are returned, which are the maximal start point
+    and minimal stop point.
+
+    Parameters
+    ----------
+    spiketrains: neo.SpikeTrain object, list or array of neo.core.SpikeTrain
+                 objects
+        List of neo.core SpikeTrain objects to extract `t_start` and
+        `t_stop` from.
+
+    Returns
+    -------
+    start : quantities.Quantity
+        Start point extracted from input :attr:`spiketrains`
+    stop : quantities.Quantity
+        Stop point extracted from input :attr:`spiketrains`
+    """
+    if isinstance(spiketrains, neo.SpikeTrain):
+        return spiketrains.t_start, spiketrains.t_stop
+    else:
+        start = max([elem.t_start for elem in spiketrains])
+        stop = min([elem.t_stop for elem in spiketrains])
+    return start, stop
+
+
+class BinnedSpikeTrain(object):
+    """
+    Class which calculates a binned spike train and provides methods to
+    transform the binned spike train to a boolean matrix or a matrix with
+    counted time points.
+
+    A binned spike train represents the occurrence of spikes in a certain time
+    frame.
+    I.e., a time series like [0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] is
+    represented as [0, 0, 1, 3, 4, 5, 6]. The outcome is dependent on given
+    parameter such as size of bins, number of bins, start and stop points.
+
+    A boolean matrix represents the binned spike train in a binary (True/False)
+    manner.
+    Its rows represent the number of spike trains
+    and the columns represent the binned index position of a spike in a
+    spike train.
+    The calculated matrix columns contains **Trues**, which indicate
+    a spike.
+
+    A matrix with counted time points is calculated the same way, but its
+    columns contain the number of spikes that occurred in the spike train(s).
+    It counts the occurrence of the timing of a spike in its respective
+    spike train.
+
+    Parameters
+    ----------
+    spiketrains : List of `neo.SpikeTrain` or a `neo.SpikeTrain` object
+        Spiketrain(s)to be binned.
+    binsize : quantities.Quantity
+        Width of each time bin.
+        Default is `None`
+    num_bins : int
+        Number of bins of the binned spike train.
+        Default is `None`
+    t_start : quantities.Quantity
+        Time of the first bin (left extreme; included).
+        Default is `None`
+    t_stop : quantities.Quantity
+        Stopping time of the last bin (right extreme; excluded).
+        Default is `None`
+
+    See also
+    --------
+    _convert_to_binned
+    spike_indices
+    to_bool_array
+    to_array
+
+    Notes
+    -----
+    There are four cases the given parameters must fulfill.
+    Each parameter must be a combination of following order or it will raise
+    a value error:
+    * t_start, num_bins, binsize
+    * t_start, num_bins, t_stop
+    * t_start, bin_size, t_stop
+    * t_stop, num_bins, binsize
+
+    It is possible to give the SpikeTrain objects and one parameter
+    (:attr:`num_bins` or :attr:`binsize`). The start and stop time will be
+    calculated from given SpikeTrain objects (max start and min stop point).
+    Missing parameter will also be calculated automatically.
+    All parameters will be checked for consistency. A corresponding error will
+    be raised, if one of the four parameters does not match the consistency
+    requirements.
+
+    """
+
+    def __init__(self, spiketrains, binsize=None, num_bins=None, t_start=None,
+                 t_stop=None):
+        """
+        Defines a binned spike train class
+
+        """
+        # Converting spiketrains to a list, if spiketrains is one
+        # SpikeTrain object
+        if isinstance(spiketrains, neo.SpikeTrain):
+            spiketrains = [spiketrains]
+
+        # Check that spiketrains is a list of neo Spike trains.
+        if not all([type(elem) == neo.core.SpikeTrain for elem in spiketrains]):
+            raise TypeError(
+                "All elements of the input list must be neo.core.SpikeTrain "
+                "objects ")
+        # Link to input
+        self.lst_input = spiketrains
+        # Set given parameter
+        self.t_start = t_start
+        self.t_stop = t_stop
+        self.num_bins = num_bins
+        self.binsize = binsize
+        self.matrix_columns = num_bins
+        self.matrix_rows = len(spiketrains)
+        # Empty matrix for storage, time points matrix
+        self._mat_u = None
+        # Check all parameter, set also missing values
+        self._calc_start_stop(spiketrains)
+        self._check_init_params(binsize, num_bins, self.t_start, self.t_stop)
+        self._check_consistency(spiketrains, self.binsize, self.num_bins,
+                                self.t_start, self.t_stop)
+        # Variables to store the sparse matrix
+        self._sparse_mat_u = None
+        # Now create sparse matrix
+        self._convert_to_binned(spiketrains)
+
+    # =========================================================================
+    # There are four cases the given parameters must fulfill
+    # Each parameter must be a combination of following order or it will raise
+    # a value error:
+    # t_start, num_bins, binsize
+    # t_start, num_bins, t_stop
+    # t_start, bin_size, t_stop
+    # t_stop, num_bins, binsize
+    # ==========================================================================
+
+    def _check_init_params(self, binsize, num_bins, t_start, t_stop):
+        """
+        Checks given parameter.
+        Calculates also missing parameter.
+
+        Parameters
+        ----------
+        binsize : quantity.Quantity
+            Size of Bins
+        num_bins : int
+            Number of Bins
+        t_start: quantity.Quantity
+            Start time of the spike
+        t_stop: quantity.Quantity
+            Stop time of the spike
+
+        Raises
+        ------
+        ValueError :
+            If all parameters are `None`, a ValueError is raised.
+        TypeError:
+            If type of :attr:`num_bins` is not an Integer.
+
+        """
+        # Check if num_bins is an integer (special case)
+        if num_bins is not None:
+            if not isinstance(num_bins, int):
+                raise TypeError("num_bins is not an integer!")
+        # Check if all parameters can be calculated, otherwise raise ValueError
+        if t_start is None:
+            self.t_start = _calc_tstart(num_bins, binsize, t_stop)
+        elif t_stop is None:
+            self.t_stop = _calc_tstop(num_bins, binsize, t_start)
+        elif num_bins is None:
+            self.num_bins = _calc_num_bins(binsize, t_start, t_stop)
+            if self.matrix_columns is None:
+                self.matrix_columns = self.num_bins
+        elif binsize is None:
+            self.binsize = _calc_binsize(num_bins, t_start, t_stop)
+
+    def _calc_start_stop(self, spiketrains):
+        """
+        Calculates start, stop from given spike trains.
+
+        The start and stop points are calculated from given spike trains, only
+        if they are not calculable from given parameter or the number of
+        parameters is less than three.
+
+        """
+        if self._count_params() is False:
+            start, stop = _get_start_stop_from_input(spiketrains)
+            if self.t_start is None:
+                self.t_start = start
+            if self.t_stop is None:
+                self.t_stop = stop
+
+    def _count_params(self):
+        """
+        Counts up if one parameter is not `None` and returns **True** if the
+        count is greater or equal `3`.
+
+        The calculation of the binned matrix is only possible if there are at
+        least three parameter (fourth parameter will be calculated out of
+        them).
+        This method checks if the necessary parameter are not `None` and
+        returns **True** if the count is greater or equal to `3`.
+
+        Returns
+        -------
+        bool :
+            True, if the count is greater or equal to `3`.
+            False, otherwise.
+
+        """
+        return sum(x is not None for x in
+                   [self.t_start, self.t_stop, self.binsize,
+                    self.num_bins]) >= 3
+
+    def _check_consistency(self, spiketrains, binsize, num_bins, t_start,
+                           t_stop):
+        """
+        Checks the given parameters for consistency
+
+        Raises
+        ------
+        ValueError :
+            A ValueError is raised if an inconsistency regarding the parameter
+            appears.
+        AttributeError :
+            An AttributeError is raised if there is an insufficient number of
+            parameters.
+
+        """
+        if self._count_params() is False:
+            raise AttributeError("Too less parameter given. Please provide "
+                                 "at least one of the parameter which are "
+                                 "None.\n"
+                                 "t_start: %s, t_stop: %s, binsize: %s, "
+                                 "numb_bins: %s" % (
+                                     self.t_start,
+                                     self.t_stop,
+                                     self.binsize,
+                                     self.num_bins))
+        t_starts = [elem.t_start for elem in spiketrains]
+        t_stops = [elem.t_stop for elem in spiketrains]
+        max_tstart = max(t_starts)
+        min_tstop = min(t_stops)
+        if max_tstart >= min_tstop:
+            raise ValueError(
+                "Starting time of each spike train must be smaller than each "
+                "stopping time")
+        elif t_start < max_tstart or t_start > min_tstop:
+            raise ValueError(
+                'some spike trains are not defined in the time given '
+                'by t_start')
+        elif num_bins != int((
+                (t_stop - t_start).rescale(binsize.units) / binsize).magnitude):
+            raise ValueError(
+                "Inconsistent arguments t_start (%s), " % t_start +
+                "t_stop (%s), binsize (%d) " % (t_stop, binsize) +
+                "and num_bins (%d)" % num_bins)
+        elif not (t_start < t_stop <= min_tstop):
+            raise ValueError(
+                'too many / too large time bins. Some spike trains are '
+                'not defined in the ending time')
+        elif num_bins - int(num_bins) != 0 or num_bins < 0:
+            raise TypeError(
+                "Number of bins (num_bins) is not an integer or < 0: " + str(
+                    num_bins))
+
+    @property
+    def bin_edges(self):
+        """
+        Returns all time edges with :attr:`num_bins` bins as a quantity array.
+
+        The borders of all time steps between start and stop [start, stop]
+        with:attr:`num_bins` bins are regarded as edges.
+        The border of the last bin is included.
+
+        Returns
+        -------
+        bin_edges : quantities.Quantity array
+            All edges in interval [start, stop] with :attr:`num_bins` bins
+            are returned as a quantity array.
+
+        """
+        return pq.Quantity(np.linspace(self.t_start.magnitude,
+                                       self.t_stop.magnitude,
+                                       self.num_bins + 1, endpoint=True),
+                           units=self.binsize.units)
+
+    @property
+    def bin_centers(self):
+        """
+        Returns each center time point of all bins between start and stop
+        points.
+
+        The center of each bin of all time steps between start and stop
+        (start, stop).
+
+        Returns
+        -------
+        bin_edges : quantities.Quantity array
+            All center edges in interval (start, stop) are returned as
+            a quantity array.
+
+        """
+        return self.bin_edges[:-1] + self.binsize / 2
+
+    def to_sparse_array(self):
+        """
+        Getter for sparse matrix with time points.
+
+        Returns
+        -------
+        matrix: scipy.sparse.csr_matrix
+            Sparse matrix, counted version.
+
+        See also
+        --------
+        scipy.sparse.csr_matrix
+        to_array
+
+        """
+        return self._sparse_mat_u
+
+    def to_sparse_bool_array(self):
+        """
+        Getter for **boolean** version of the sparse matrix, calculated from
+        sparse matrix with counted time points.
+
+        Returns
+        -------
+        matrix: scipy.sparse.csr_matrix
+            Sparse matrix, binary, boolean version.
+
+        See also
+        --------
+        scipy.sparse.csr_matrix
+        to_bool_array
+
+        """
+        # Return sparse Matrix as a copy
+        tmp_mat = self._sparse_mat_u.copy()
+        tmp_mat[tmp_mat.nonzero()] = 1
+        return tmp_mat.astype(bool)
+
+    @property
+    def spike_indices(self):
+        """
+        A list of lists for each spike train (i.e., rows of the binned matrix),
+        that in turn contains for each spike the index into the binned matrix
+        where this spike enters.
+
+        In contrast to `to_sparse_array().nonzero()`, this function will report
+        two spikes falling in the same bin as two entries.
+
+        Examples
+        --------
+        >>> import elephant.conversion as conv
+        >>> import neo as n
+        >>> import quantities as pq
+        >>> st = n.SpikeTrain([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
+        >>> x = conv.BinnedSpikeTrain(st, num_bins=10, binsize=1 * pq.s, t_start=0 * pq.s)
+        >>> print(x.spike_indices)
+        [[0, 0, 1, 3, 4, 5, 6]]
+        >>> print(x.to_sparse_array().nonzero()[1])
+        [0 1 3 4 5 6]
+
+        """
+        spike_idx = []
+        for row in self._sparse_mat_u:
+            l = []
+            # Extract each non-zeros column index and how often it exists,
+            # i.e., how many spikes fall in this column
+            for col, count in zip(row.nonzero()[1], row.data):
+                # Append the column index for each spike
+                l.extend([col] * count)
+            spike_idx.append(l)
+        return spike_idx
+
+    def to_bool_array(self):
+        """
+        Returns a dense matrix (`scipy.sparse.csr_matrix`), which rows
+        represent the number of spike trains and the columns represent the
+        binned index position of a spike in a spike train.
+        The matrix columns contain **True**, which indicate a spike and
+        **False** for non spike.
+
+        Returns
+        -------
+        bool matrix : numpy.ndarray
+            Returns a dense matrix representation of the sparse matrix,
+            with **True** indicating a spike and **False*** for
+            non spike.
+            The **Trues** in the columns represent the index
+            position of the spike in the spike train and rows represent the
+            number of spike trains.
+
+        Examples
+        --------
+        >>> import elephant.conversion as conv
+        >>> import neo as n
+        >>> import quantities as pq
+        >>> a = n.SpikeTrain([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
+        >>> x = conv.BinnedSpikeTrain(a, num_bins=10, binsize=1 * pq.s, t_start=0 * pq.s)
+        >>> print(x.to_bool_array())
+        [[ True  True False  True  True  True  True False False False]]
+
+        See also
+        --------
+        scipy.sparse.csr_matrix
+        scipy.sparse.csr_matrix.toarray
+        """
+        return abs(scipy.sign(self.to_array())).astype(bool)
+
+    def to_array(self, store_array=False):
+        """
+        Returns a dense matrix, calculated from the sparse matrix with counted
+        time points, which rows represents the number of spike trains and the
+        columns represents the binned index position of a spike in a
+        spike train.
+        The  matrix columns contain the number of spikes that
+        occurred in the spike train(s).
+        If the **boolean** :attr:`store_array` is set to **True** the matrix
+        will be stored in memory.
+
+        Returns
+        -------
+        matrix : numpy.ndarray
+            Matrix with spike times. Columns represent the index position of
+            the binned spike and rows represent the number of spike trains.
+
+        Examples
+        --------
+        >>> import elephant.conversion as conv
+        >>> import neo as n
+        >>> a = n.SpikeTrain([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
+        >>> x = conv.BinnedSpikeTrain(a, num_bins=10, binsize=1 * pq.s, t_start=0 * pq.s)
+        >>> print(x.to_array())
+        [[2 1 0 1 1 1 1 0 0 0]]
+
+        See also
+        --------
+        scipy.sparse.csr_matrix
+        scipy.sparse.csr_matrix.toarray
+
+        """
+        if self._mat_u is not None:
+            return self._mat_u
+        if store_array:
+            self._store_array()
+            return self._mat_u
+        # Matrix on demand
+        else:
+            return self._sparse_mat_u.toarray()
+
+    def _store_array(self):
+        """
+        Stores the matrix with counted time points in memory.
+
+        """
+        if self._mat_u is None:
+            self._mat_u = self.to_sparse_array().toarray()
+
+    def remove_stored_array(self):
+        """
+        Removes the matrix with counted time points from memory.
+
+        """
+        if self._mat_u is not None:
+            del self._mat_u
+            self._mat_u = None
+
+    def _convert_to_binned(self, spiketrains):
+        """
+        Converts neo.core.SpikeTrain objects to a sparse matrix
+        (`scipy.sparse.csr_matrix`), which contains the binned times.
+
+        Parameters
+        ----------
+        spiketrains : neo.SpikeTrain object or list of SpikeTrain objects
+           The binned time array :attr:`spike_indices` is calculated from a
+           SpikeTrain object or from a list of SpikeTrain objects.
+
+        """
+        from distutils.version import StrictVersion
+        # column
+        filled = []
+        # row
+        indices = []
+        # data
+        counts = []
+        # to be downwards compatible compare numpy versions, if the used
+        # version is smaller than v1.9 use different functions
+        smaller_version = StrictVersion(np.__version__) < '1.9.0'
+        for idx, elem in enumerate(spiketrains):
+            ev = elem.view(pq.Quantity)
+            scale = np.array(((ev - self.t_start).rescale(
+                self.binsize.units) / self.binsize).magnitude, dtype=int)
+            l = np.logical_and(ev >= self.t_start.rescale(self.binsize.units),
+                               ev <= self.t_stop.rescale(self.binsize.units))
+            filled_tmp = scale[l]
+            filled_tmp = filled_tmp[filled_tmp < self.num_bins]
+            if smaller_version:
+                f = np.unique(filled_tmp)
+                c = np.bincount(f.searchsorted(filled_tmp))
+            else:
+                f, c = np.unique(filled_tmp, return_counts=True)
+            filled.extend(f)
+            counts.extend(c)
+            indices.extend([idx] * len(f))
+        csr_matrix = sps.csr_matrix((counts, (indices, filled)),
+                                    shape=(self.matrix_rows,
+                                           self.matrix_columns),
+                                    dtype=int)
+        self._sparse_mat_u = csr_matrix

+ 221 - 0
code/elephant/elephant/cubic.py

@@ -0,0 +1,221 @@
+# -*- coding: utf-8 -*-
+'''
+CuBIC is a statistical method for the detection of higher order of
+correlations in parallel spike trains based on the analysis of the
+cumulants of the population count.
+Given a list sts of SpikeTrains, the analysis comprises the following
+steps:
+
+1) compute the population histogram (PSTH) with the desired bin size
+       >>> binsize = 5 * pq.ms
+       >>> pop_count = elephant.statistics.time_histogram(sts, binsize)
+
+2) apply CuBIC to the population count
+       >>> alpha = 0.05  # significance level of the tests used
+       >>> xi, p_val, k = cubic(data, ximax=100, alpha=0.05, errorval=4.):
+
+:copyright: Copyright 2016 by the Elephant team, see AUTHORS.txt.
+:license: BSD, see LICENSE.txt for details.
+'''
+# -*- coding: utf-8 -*-
+from __future__ import division
+import scipy.stats
+import scipy.special
+import math
+import warnings
+
+
+# Based on matlab code by Benjamin Staude
+# Adaptation to python by Pietro Quaglio and Emiliano Torre
+
+
+def cubic(data, ximax=100, alpha=0.05):
+    '''
+    Performs the CuBIC analysis [1] on a population histogram, calculated from
+    a population of spiking neurons.
+
+    The null hypothesis :math:`H_0: k_3(data)<=k^*_{3,\\xi}` is iteratively
+    tested with increasing correlation order :math:`\\xi` (correspondent to
+    variable xi) until it is possible to accept, with a significance level alpha,
+    that :math:`\\hat{\\xi}` (corresponding to variable xi_hat) is the minimum
+    order of correlation necessary to explain the third cumulant
+    :math:`k_3(data)`.
+
+    :math:`k^*_{3,\\xi}` is the maximized third cumulant, supposing a Compund
+    Poisson Process (CPP) model for correlated spike trains (see [1])
+    with maximum order of correlation equal to :math:`\\xi`.
+
+    Parameters
+    ----------
+    data : neo.AnalogSignal
+        The population histogram (count of spikes per time bin) of the entire
+        population of neurons.
+    ximax : int
+         The maximum number of iteration of the hypothesis test:
+         if it is not possible to compute the :math:`\\hat{\\xi}` before ximax
+         iteration the CuBIC procedure is aborted.
+         Default: 100
+    alpha : float
+         The significance level of the hypothesis tests perfomed.
+         Default: 0.05
+
+    Returns
+    -------
+    xi_hat : int
+        The minimum correlation order estimated by CuBIC, necessary to
+        explain the value of the third cumulant calculated from the population.
+    p : list
+        The ordred list of all the p-values of the hypothesis tests that have
+        been performed. If the maximum number of iteration ximax is reached the
+        last p-value is set to -4
+    kappa : list
+        The list of the first three cumulants of the data.
+    test_aborted : bool
+        Wheter the test was aborted because reached the maximum number of
+        iteration ximax
+
+    References
+    ----------
+    [1]Staude, Rotter, Gruen, (2009) J. Comp. Neurosci
+    '''
+    # alpha in in the interval [0,1]
+    if alpha < 0 or alpha > 1:
+        raise ValueError(
+            'the significance level alpha (= %s) has to be in [0,1]' % alpha)
+
+    if not isinstance(ximax, int) or ximax < 0:
+        raise ValueError(
+            'The maximum number of iterations ximax(= %i) has to be a positive'
+            % alpha + ' integer')
+
+    # dict of all possible rate functions
+    try:
+        data = data.magnitude
+    except AttributeError:
+        pass
+    L = len(data)
+
+    # compute first three cumulants
+    kappa = _kstat(data)
+    xi_hat = 1
+    xi = 1
+    pval = 0.
+    p = []
+    test_aborted = False
+
+    # compute xi_hat iteratively
+    while pval < alpha:
+        xi_hat = xi
+        if xi > ximax:
+            warnings.warn('Test aborted, xihat= %i > ximax= %i' % (xi, ximax))
+            test_aborted = True
+            break
+
+        # compute p-value
+        pval = _H03xi(kappa, xi, L)
+        p.append(pval)
+        xi = xi + 1
+
+    return xi_hat, p, kappa, test_aborted
+
+
+def _H03xi(kappa, xi, L):
+    '''
+    Computes the p_value for testing  the :math:`H_0: k_3(data)<=k^*_{3,\\xi}`
+    hypothesis of CuBIC in the stationary rate version
+
+    Parameters
+    -----
+    kappa : list
+        The first three cumulants of the populaton of spike trains
+    xi : int
+        The the maximum order of correlation :math:`\\xi` supposed in the
+        hypothesis for which is computed the p value of :math:`H_0`
+    L : float
+        The length of the orginal population histogram on which is performed
+        the CuBIC analysis
+
+    Returns
+    -----
+    p : float
+        The p-value of the hypothesis tests
+    '''
+
+    # Check the order condition of the cumulants necessary to perform CuBIC
+    if kappa[1] < kappa[0]:
+        # p = errorval
+        kstar = [0]
+        raise ValueError(
+            'H_0 can not be tested:'
+            'kappa(2)= %f<%f=kappa(1)!!!' % (kappa[1], kappa[0]))
+    else:
+        # computation of the maximized cumulants
+        kstar = [_kappamstar(kappa[:2], i, xi) for i in range(2, 7)]
+        k3star = kstar[1]
+
+        # variance of third cumulant (from Stuart & Ord)
+        sigmak3star = math.sqrt(
+            kstar[4] / L + 9 * (kstar[2] * kstar[0] + kstar[1] ** 2) /
+            (L - 1) + 6 * L * kstar[0] ** 3 / ((L - 1) * (L - 2)))
+        # computation of the p-value (the third cumulant is supposed to
+        # be gaussian istribuited)
+        p = 1 - scipy.stats.norm(k3star, sigmak3star).cdf(kappa[2])
+        return p
+
+
+def _kappamstar(kappa, m, xi):
+    '''
+    Computes maximized cumulant of order m
+
+    Parameters
+    -----
+    kappa : list
+        The first two cumulants of the data
+    xi : int
+        The :math:`\\xi` for which is computed the p value of :math:`H_0`
+    m : float
+        The order of the cumulant
+
+    Returns
+    -----
+    k_out : list
+        The maximized cumulant of order m
+    '''
+
+    if xi == 1:
+        kappa_out = kappa[1]
+    else:
+        kappa_out = \
+            (kappa[1] * (xi ** (m - 1) - 1) -
+                kappa[0] * (xi ** (m - 1) - xi)) / (xi - 1)
+    return kappa_out
+
+
+def _kstat(data):
+    '''
+    Compute first three cumulants of a population count of a population of
+    spiking
+    See http://mathworld.wolfram.com/k-Statistic.html
+
+    Parameters
+    -----
+    data : numpy.aray
+        The population histogram of the population on which are computed
+        the cumulants
+
+    Returns
+    -----
+    kappa : list
+        The first three cumulants of the population count
+    '''
+    L = len(data)
+    if L == 0:
+        raise ValueError('The input data must be a non-empty array')
+    S = [(data ** r).sum() for r in range(1, 4)]
+    kappa = []
+    kappa.append(S[0] / float(L))
+    kappa.append((L * S[1] - S[0] ** 2) / (L * (L - 1)))
+    kappa.append(
+        (2 * S[0] ** 3 - 3 * L * S[0] * S[1] + L ** 2 * S[2]) / (
+            L * (L - 1) * (L - 2)))
+    return kappa

+ 332 - 0
code/elephant/elephant/current_source_density.py

@@ -0,0 +1,332 @@
+# -*- coding: utf-8 -*-
+"""'Current Source Density analysis (CSD) is a class of methods of analysis of
+extracellular electric potentials recorded at multiple sites leading to
+estimates of current sources generating the measured potentials. It is usually
+applied to low-frequency part of the potential (called the Local Field
+Potential, LFP) and to simultaneous recordings or to recordings taken with
+fixed time reference to the onset of specific stimulus (Evoked Potentials)'
+(Definition by Prof.Daniel K. Wójcik for Encyclopedia of Computational
+Neuroscience)
+
+CSD is also called as Source Localization or Source Imaging in the EEG circles.
+Here are CSD methods for different types of electrode configurations.
+
+1D - laminar probe like electrodes.
+2D - Microelectrode Array like
+3D - UtahArray or multiple laminar probes.
+
+The following methods have been implemented so far
+
+1D - StandardCSD, DeltaiCSD, SplineiCSD, StepiCSD, KCSD1D
+2D - KCSD2D, MoIKCSD (Saline layer on top of slice)
+3D - KCSD3D
+
+Each of these methods listed have some advantages. The KCSD methods for
+instance can handle broken or irregular electrode configurations electrode
+
+Keywords: LFP; CSD; Multielectrode; Laminar electrode; Barrel cortex
+
+Citation Policy: See ./current_source_density_src/README.md
+
+Contributors to this  current source density estimation module are:
+Chaitanya Chintaluri(CC), Espen Hagen(EH) and Michał Czerwinski(MC).
+EH implemented the iCSD methods and StandardCSD
+CC implemented the kCSD methods, kCSD1D(MC and CC)
+CC and EH developed the interface to elephant.
+"""
+
+from __future__ import division
+
+import neo
+import quantities as pq
+import numpy as np
+from scipy import io
+from scipy.integrate import simps
+
+from elephant.current_source_density_src import KCSD
+from elephant.current_source_density_src import icsd
+import elephant.current_source_density_src.utility_functions as utils
+
+utils.patch_quantities()
+
+available_1d = ['StandardCSD', 'DeltaiCSD', 'StepiCSD', 'SplineiCSD', 'KCSD1D']
+available_2d = ['KCSD2D', 'MoIKCSD']
+available_3d = ['KCSD3D']
+
+kernel_methods = ['KCSD1D', 'KCSD2D', 'KCSD3D', 'MoIKCSD']
+icsd_methods = ['DeltaiCSD', 'StepiCSD', 'SplineiCSD']
+
+py_iCSD_toolbox = ['StandardCSD'] + icsd_methods
+
+
+def estimate_csd(lfp, coords=None, method=None,
+                 process_estimate=True, **kwargs):
+    """
+    Fuction call to compute the current source density (CSD) from extracellular
+    potential recordings(local-field potentials - LFP) using laminar electrodes
+    or multi-contact electrodes with 2D or 3D geometries.
+
+    Parameters
+    ----------
+    lfp : list(neo.AnalogSignal type objects)
+        positions of electrodes can be added as neo.RecordingChannel
+        coordinate or sent externally as a func argument (See coords)
+    coords : [Optional] corresponding spatial coordinates of the electrodes
+        Defaults to None
+        Otherwise looks for RecordingChannels coordinate
+    method : string
+        Pick a method corresonding to the setup, in this implementation
+        For Laminar probe style (1D), use 'KCSD1D' or 'StandardCSD',
+         or 'DeltaiCSD' or 'StepiCSD' or 'SplineiCSD'
+        For MEA probe style (2D),  use 'KCSD2D', or 'MoIKCSD'
+        For array of laminar probes (3D), use 'KCSD3D'
+        Defaults to None
+    process_estimate : bool
+        In the py_iCSD_toolbox this corresponds to the filter_csd -
+        the parameters are passed as kwargs here ie., f_type and f_order
+        In the kcsd methods this corresponds to cross_validate -
+        the parameters are passed as kwargs here ie., lambdas and Rs
+        Defaults to True
+    kwargs : parameters to each method
+        The parameters corresponding to the method chosen
+        See the documentation of the individual method
+        Default is {} - picks the best parameters,
+
+    Returns
+    -------
+    Estimated CSD
+       neo.AnalogSignal Object
+       annotated with the spatial coordinates
+
+    Raises
+    ------
+    AttributeError
+        No units specified for electrode spatial coordinates
+    ValueError
+        Invalid function arguments, wrong method name, or
+        mismatching coordinates
+    TypeError
+        Invalid cv_param argument passed
+    """
+    if not isinstance(lfp, neo.AnalogSignal):
+        raise TypeError('Parameter `lfp` must be a list(neo.AnalogSignal \
+                         type objects')
+    if coords is None:
+        coords = lfp.channel_index.coordinates
+        # for ii in lfp:
+        #     coords.append(ii.channel_index.coordinate.rescale(pq.mm))
+    else:
+        scaled_coords = []
+        for coord in coords:
+            try:
+                scaled_coords.append(coord.rescale(pq.mm))
+            except AttributeError:
+                raise AttributeError('No units given for electrode spatial \
+                coordinates')
+        coords = scaled_coords
+    if method is None:
+        raise ValueError('Must specify a method of CSD implementation')
+    if len(coords) != len(lfp):
+        raise ValueError('Number of signals and coords is not same')
+    for ii in coords:  # CHECK for Dimensionality of electrodes
+        if len(ii) > 3:
+            raise ValueError('Invalid number of coordinate positions')
+    dim = len(coords[0])  # TODO : Generic co-ordinates!
+    if dim == 1 and (method not in available_1d):
+        raise ValueError('Invalid method, Available options are:',
+                         available_1d)
+    if dim == 2 and (method not in available_2d):
+        raise ValueError('Invalid method, Available options are:',
+                         available_2d)
+    if dim == 3 and (method not in available_3d):
+        raise ValueError('Invalid method, Available options are:',
+                         available_3d)
+    if method in kernel_methods:
+        input_array = np.zeros((len(lfp), lfp[0].magnitude.shape[0]))
+        for ii, jj in enumerate(lfp):
+            input_array[ii, :] = jj.rescale(pq.mV).magnitude
+        kernel_method = getattr(KCSD, method)  # fetch the class 'KCSD1D'
+        lambdas = kwargs.pop('lambdas', None)
+        Rs = kwargs.pop('Rs', None)
+        k = kernel_method(np.array(coords), input_array, **kwargs)
+        if process_estimate:
+            k.cross_validate(lambdas, Rs)
+        estm_csd = k.values()
+        estm_csd = np.rollaxis(estm_csd, -1, 0)
+        output = neo.AnalogSignal(estm_csd * pq.uA / pq.mm**3,
+                                  t_start=lfp.t_start,
+                                  sampling_rate=lfp.sampling_rate)
+
+        if dim == 1:
+            output.annotate(x_coords=k.estm_x)
+        elif dim == 2:
+            output.annotate(x_coords=k.estm_x, y_coords=k.estm_y)
+        elif dim == 3:
+            output.annotate(x_coords=k.estm_x, y_coords=k.estm_y,
+                            z_coords=k.estm_z)
+    elif method in py_iCSD_toolbox:
+
+        coords = np.array(coords) * coords[0].units
+
+        if method in icsd_methods:
+            try:
+                coords = coords.rescale(kwargs['diam'].units)
+            except KeyError:  # Then why specify as a default in icsd?
+                              # All iCSD methods explicitly assume a source
+                              # diameter in contrast to the stdCSD  that
+                              # implicitly assume infinite source radius
+                raise ValueError("Parameter diam must be specified for iCSD \
+                                  methods: {}".format(", ".join(icsd_methods)))
+
+        if 'f_type' in kwargs:
+            if (kwargs['f_type'] is not 'identity') and  \
+               (kwargs['f_order'] is None):
+                raise ValueError("The order of {} filter must be \
+                                  specified".format(kwargs['f_type']))
+
+        lfp = neo.AnalogSignal(np.asarray(lfp).T, units=lfp.units,
+                                    sampling_rate=lfp.sampling_rate)
+        csd_method = getattr(icsd, method)  # fetch class from icsd.py file
+        csd_estimator = csd_method(lfp=lfp.magnitude.T * lfp.units,
+                                   coord_electrode=coords.flatten(),
+                                   **kwargs)
+        csd_pqarr = csd_estimator.get_csd()
+
+        if process_estimate:
+            csd_pqarr_filtered = csd_estimator.filter_csd(csd_pqarr)
+            output = neo.AnalogSignal(csd_pqarr_filtered.T,
+                                           t_start=lfp.t_start,
+                                           sampling_rate=lfp.sampling_rate)
+        else:
+            output = neo.AnalogSignal(csd_pqarr.T, t_start=lfp.t_start,
+                                           sampling_rate=lfp.sampling_rate)
+        output.annotate(x_coords=coords)
+    return output
+
+
+def generate_lfp(csd_profile, ele_xx, ele_yy=None, ele_zz=None,
+                 xlims=[0., 1.], ylims=[0., 1.], zlims=[0., 1.], res=50):
+    """Forward modelling for the getting the potentials for testing CSD
+
+        Parameters
+        ----------
+        csd_profile : fuction that computes True CSD profile
+            Available options are (see ./csd/utility_functions.py)
+            1D : gauss_1d_dipole
+            2D : large_source_2D and small_source_2D
+            3D : gauss_3d_dipole
+        ele_xx : np.array
+            Positions of the x coordinates of the electrodes
+        ele_yy : np.array
+            Positions of the y coordinates of the electrodes
+            Defaults ot None, use in 2D or 3D cases only
+        ele_zz : np.array
+            Positions of the z coordinates of the electrodes
+            Defaults ot None, use in 3D case only
+        x_lims : [start, end]
+            The starting spatial coordinate and the ending for integration
+            Defaults to [0.,1.]
+        y_lims : [start, end]
+            The starting spatial coordinate and the ending for integration
+            Defaults to [0.,1.], use only in 2D and 3D case
+        z_lims : [start, end]
+            The starting spatial coordinate and the ending for integration
+            Defaults to [0.,1.], use only in 3D case
+        res : int
+            The resolution of the integration
+            Defaults to 50
+
+        Returns
+        -------
+        LFP : list(neo.AnalogSignal type objects)
+           The potentials created by the csd profile at the electrode positions
+           The electrode postions are attached as RecordingChannel's coordinate
+    """
+    def integrate_1D(x0, csd_x, csd, h):
+        m = np.sqrt((csd_x - x0)**2 + h**2) - abs(csd_x - x0)
+        y = csd * m
+        I = simps(y, csd_x)
+        return I
+
+    def integrate_2D(x, y, xlin, ylin, csd, h, X, Y):
+        Ny = ylin.shape[0]
+        m = np.sqrt((x - X)**2 + (y - Y)**2)
+        m[m < 0.0000001] = 0.0000001
+        y = np.arcsinh(2 * h / m) * csd
+        I = np.zeros(Ny)
+        for i in range(Ny):
+            I[i] = simps(y[:, i], ylin)
+        F = simps(I, xlin)
+        return F
+
+    def integrate_3D(x, y, z, xlim, ylim, zlim, csd, xlin, ylin, zlin,
+                     X, Y, Z):
+        Nz = zlin.shape[0]
+        Ny = ylin.shape[0]
+        m = np.sqrt((x - X)**2 + (y - Y)**2 + (z - Z)**2)
+        m[m < 0.0000001] = 0.0000001
+        z = csd / m
+        Iy = np.zeros(Ny)
+        for j in range(Ny):
+            Iz = np.zeros(Nz)
+            for i in range(Nz):
+                Iz[i] = simps(z[:, j, i], zlin)
+            Iy[j] = simps(Iz, ylin)
+        F = simps(Iy, xlin)
+        return F
+    dim = 1
+    if ele_zz is not None:
+        dim = 3
+    elif ele_yy is not None:
+        dim = 2
+    x = np.linspace(xlims[0], xlims[1], res)
+    if dim >= 2:
+        y = np.linspace(ylims[0], ylims[1], res)
+    if dim == 3:
+        z = np.linspace(zlims[0], zlims[1], res)
+    sigma = 1.0
+    h = 50.
+    pots = np.zeros(len(ele_xx))
+    if dim == 1:
+        chrg_x = np.linspace(xlims[0], xlims[1], res)
+        csd = csd_profile(chrg_x)
+        for ii in range(len(ele_xx)):
+            pots[ii] = integrate_1D(ele_xx[ii], chrg_x, csd, h)
+        pots /= 2. * sigma  # eq.: 26 from Potworowski et al
+        ele_pos = ele_xx
+    elif dim == 2:
+        chrg_x, chrg_y = np.mgrid[xlims[0]:xlims[1]:np.complex(0, res),
+                                  ylims[0]:ylims[1]:np.complex(0, res)]
+        csd = csd_profile(chrg_x, chrg_y)
+        for ii in range(len(ele_xx)):
+            pots[ii] = integrate_2D(ele_xx[ii], ele_yy[ii],
+                                    x, y, csd, h, chrg_x, chrg_y)
+        pots /= 2 * np.pi * sigma
+        ele_pos = np.vstack((ele_xx, ele_yy)).T
+    elif dim == 3:
+        chrg_x, chrg_y, chrg_z = np.mgrid[xlims[0]:xlims[1]:np.complex(0, res),
+                                          ylims[0]:ylims[1]:np.complex(0, res),
+                                          zlims[0]:zlims[1]:np.complex(0, res)]
+        csd = csd_profile(chrg_x, chrg_y, chrg_z)
+        xlin = chrg_x[:, 0, 0]
+        ylin = chrg_y[0, :, 0]
+        zlin = chrg_z[0, 0, :]
+        for ii in range(len(ele_xx)):
+            pots[ii] = integrate_3D(ele_xx[ii], ele_yy[ii], ele_zz[ii],
+                                    xlims, ylims, zlims, csd,
+                                    xlin, ylin, zlin,
+                                    chrg_x, chrg_y, chrg_z)
+        pots /= 4 * np.pi * sigma
+        ele_pos = np.vstack((ele_xx, ele_yy, ele_zz)).T
+    pots = np.reshape(pots, (-1, 1)) * pq.mV
+    ele_pos = ele_pos * pq.mm
+    lfp = []
+    ch = neo.ChannelIndex(index=range(len(pots)))
+    for ii in range(len(pots)):
+        lfp.append(pots[ii])
+    # lfp = neo.AnalogSignal(lfp, sampling_rate=1000*pq.Hz, units='mV')
+    asig = neo.AnalogSignal(lfp, sampling_rate=pq.kHz, units='mV')
+    ch.coordinates = ele_pos
+    ch.analogsignals.append(asig)
+    ch.create_relationship()
+    return asig

File diff suppressed because it is too large
+ 1059 - 0
code/elephant/elephant/current_source_density_src/KCSD.py


+ 96 - 0
code/elephant/elephant/current_source_density_src/README.md

@@ -0,0 +1,96 @@
+Here, are CSD methods for different electrode configurations.
+
+Keywords: Local field potentials; Current-source density; CSD;
+Multielectrode; Laminar electrode; Barrel cortex
+
+1D - laminar probe like electrodes. 
+2D - Microelectrode Array like
+3D - UtahArray or multiple laminar probes.
+
+The following methods have been implemented here, for, 
+
+1D - StandardCSD, DeltaiCSD, SplineiCSD, StepiCSD, KCSD1D
+2D - KCSD2D, MoIKCSD (Saline layer on top of slice)
+3D - KCSD3D
+
+Each of these methods listed have some advantages - except StandardCSD which is
+not recommended. The KCSD methods can handle broken or irregular electrode
+configurations electrode
+
+iCSD
+----
+Python-implementation of the inverse current source density (iCSD) methods from
+http://software.incf.org/software/csdplotter
+
+The Python iCSD toolbox lives on GitHub as well:
+https://github.com/espenhgn/iCSD
+
+The methods were originally developed by Klas H. Pettersen, as described in:
+Klas H. Pettersen, Anna Devor, Istvan Ulbert, Anders M. Dale, Gaute
+T. Einevoll, Current-source density estimation based on inversion of
+electrostatic forward solution: Effects of finite extent of neuronal activity
+and conductivity discontinuities, Journal of Neuroscience Methods, Volume 154,
+Issues 1Ð2, 30 June 2006, Pages 116-133, ISSN 0165-0270,
+http://dx.doi.org/10.1016/j.jneumeth.2005.12.005.
+(http://www.sciencedirect.com/science/article/pii/S0165027005004541)
+
+To see an example of usage of the methods, see the file demo_icsd.py
+
+KCSD 
+---- 
+This is 1.0 version of kCSD inverse method proposed in
+
+J. Potworowski, W. Jakuczun, S. Łęski, D. K. Wójcik
+"Kernel Current Source Density Method"
+Neural Computation 24 (2012), 541–575
+
+Some key advantages for KCSD methods are
+-- irregular grid of electrodes - accepts arbitrary electrode placement.
+-- crossvalidation to ensure no over fitting
+-- CSD is not limited to electrode positions - it can obtained at any location
+
+For citation purposes, 
+If you use this software in published research please cite the following work
+- kCSD1D - [1, 2]
+- kCSD2D - [1, 3]
+- kCSD3D - [1, 4]
+- MoIkCSD - [1, 3, 5]
+
+[1] Potworowski, J., Jakuczun, W., Łęski, S. & Wójcik, D. (2012) 'Kernel
+current source density method.' Neural Comput 24(2), 541-575.
+
+[2] Pettersen, K. H., Devor, A., Ulbert, I., Dale, A. M. & Einevoll,
+G. T. (2006) 'Current-source density estimation based on inversion of
+electrostatic forward solution: effects of finite extent of neuronal activity
+and conductivity discontinuities.' J Neurosci Methods 154(1-2), 116-133.
+
+[3] Łęski, S., Pettersen, K. H., Tunstall, B., Einevoll, G. T., Gigg, J. &
+Wójcik, D. K. (2011) 'Inverse Current Source Density method in two dimensions:
+Inferring neural activation from multielectrode recordings.' Neuroinformatics
+9(4), 401-425.
+
+[4] Łęski, S., Wójcik, D. K., Tereszczuk, J., Świejkowski, D. A., Kublik, E. &
+Wróbel, A. (2007) 'Inverse current-source density method in 3D: reconstruction
+fidelity, boundary effects, and influence of distant sources.' Neuroinformatics
+5(4), 207-222.
+
+[5] Ness, T. V., Chintaluri, C., Potworowski, J., Łeski, S., Głabska, H.,
+Wójcik, D. K. & Einevoll, G. T. (2015) 'Modelling and Analysis of Electrical
+Potentials Recorded in Microelectrode Arrays (MEAs).' Neuroinformatics 13(4),
+403-426.
+
+For your research interests of Kernel methods of CSD please see,
+https://github.com/Neuroinflab/kCSD-python 
+
+Contact: Prof. Daniel K. Wojcik
+
+Here (https://github.com/Neuroinflab/kCSD-python/tree/master/tests), are
+scripts to compare different KCSD methods with different CSD sources. You can
+play around with the different parameters of the methods.
+
+The implentation is based on the Matlab version at INCF
+(http://software.incf.org/software/kcsd), which is now out-dated. A python
+version based on this was developed by Grzegorz Parka
+(https://github.com/INCF/pykCSD), which is also not supported at this
+point. This current version of KCSD methods in elephant is a mirror of
+https://github.com/Neuroinflab/kCSD-python/commit/8e2ae26b00da7b96884f2192ec9ea612b195ec30

+ 3 - 0
code/elephant/elephant/current_source_density_src/__init__.py

@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+from . import icsd
+from . import KCSD

+ 201 - 0
code/elephant/elephant/current_source_density_src/basis_functions.py

@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+"""
+This script is used to generate basis sources for the
+kCSD method Jan et.al (2012) for 1D,2D and 3D cases.
+Two 'types' are described here, gaussian and step source,
+These can be easily extended.
+These scripts are based on Grzegorz Parka's,
+Google Summer of Code 2014, INFC/pykCSD
+This was written by :
+Michal Czerwinski, Chaitanya Chintaluri
+Laboratory of Neuroinformatics,
+Nencki Institute of Experimental Biology, Warsaw.
+"""
+from __future__ import division
+
+import numpy as np
+
+def gauss(d, stdev, dim):
+    """Gaussian function
+    Parameters
+    ----------
+    d : floats or np.arrays
+        Distance array to the point of evaluation
+    stdev : float
+        cutoff range
+    dim : int
+        dimension of the gaussian function
+    Returns
+    -------
+    Z : floats or np.arrays
+        function evaluated
+    """
+    Z = np.exp(-(d**2) / (2* stdev**2) ) / (np.sqrt(2*np.pi)*stdev)**dim
+    return Z
+
+def step_1D(d, R):
+    """Returns normalized 1D step function.
+    Parameters
+    ----------
+    d : floats or np.arrays
+        Distance array to the point of evaluation
+    R : float
+        cutoff range
+    Returns
+    -------
+    s : Value of the function (d  <= R) / R
+    """
+    s = (d  <= R)
+    s = s / R #normalize with width
+    return s
+
+def gauss_1D(d, three_stdev):
+    """Returns normalized gaussian 2D scale function
+    Parameters
+    ----------
+    d : floats or np.arrays
+        Distance array to the point of evaluation
+    three_stdev : float
+        3 * standard deviation of the distribution
+    Returns
+    -------
+    Z : (three_std/3)*(1/2*pi)*(exp(-0.5)*stddev**(-2) *(d**2))
+    """
+    stdev = three_stdev/3.0
+    Z = gauss(d, stdev, 1)
+    return Z
+
+def gauss_lim_1D(d, three_stdev):
+    """Returns gausian 2D function cut off after 3 standard deviations.
+    Parameters
+    ----------
+    d : floats or np.arrays
+        Distance array to the point of evaluation
+    three_stdev : float
+        3 * standard deviation of the distribution
+    Returns
+    -------
+    Z : (three_std/3)*(1/2*pi)*(exp(-0.5)*stddev**(-2) *((x-mu)**2)),
+        cut off = three_stdev
+    """
+    Z = gauss_1D(d, three_stdev)
+    Z *= (d < three_stdev)
+    return Z
+
+def step_2D(d, R):
+    """Returns normalized 2D step function.
+    Parameters
+    ----------
+    d : float or np.arrays
+        Distance array to the point of evaluation
+    R : float
+        cutoff range
+
+    Returns
+    -------
+    s : step function
+    """
+    s = (d <= R) / (np.pi*(R**2))
+    return s
+
+def gauss_2D(d, three_stdev):
+    """Returns normalized gaussian 2D scale function
+    Parameters
+    ----------
+    d : floats or np.arrays
+         distance at which we need the function evaluated
+    three_stdev : float
+        3 * standard deviation of the distribution
+    Returns
+    -------
+    Z : function
+        Normalized gaussian 2D function
+    """
+    stdev = three_stdev/3.0
+    Z = gauss(d, stdev, 2)
+    return Z
+
+def gauss_lim_2D(d, three_stdev):
+    """Returns gausian 2D function cut off after 3 standard deviations.
+    Parameters
+    ----------
+    d : floats or np.arrays
+         distance at which we need the function evaluated
+    three_stdev : float
+        3 * standard deviation of the distribution
+    Returns
+    -------
+    Z : function
+        Normalized gaussian 2D function cut off after three_stdev
+    """
+    Z = (d <= three_stdev)*gauss_2D(d, three_stdev)
+    return Z
+
+def gauss_3D(d, three_stdev):
+    """Returns normalized gaussian 3D scale function
+    Parameters
+    ----------
+    d : floats or np.arrays
+        distance at which we need the function evaluated
+    three_stdev : float
+        3 * standard deviation of the distribution
+    Returns
+    -------
+    Z : funtion
+        Normalized gaussian 3D function
+    """
+    stdev = three_stdev/3.0
+    Z = gauss(d, stdev, 3)
+    return Z
+
+def gauss_lim_3D(d, three_stdev):
+    """Returns normalized gaussian 3D scale function cut off after 3stdev
+    Parameters
+    ----------
+    d : floats or np.arrays
+        distance at which we need the function evaluated
+    three_stdev : float
+        3 * standard deviation of the distribution
+    Returns
+    -------
+    Z : funtion
+        Normalized gaussian 3D function cutoff three_Stdev
+    """
+    Z = gauss_3D(d, three_stdev)
+    Z = Z * (d < (three_stdev))
+    return Z
+
+def step_3D(d, R):
+    """Returns normalized 3D step function.
+    Parameters
+    ----------
+    d : floats or np.arrays
+        distance at which we need the function evaluated
+    R : float
+        cutoff range
+    Returns
+    -------
+    s : step function in 3D
+    """
+
+    s = 3/(4*np.pi*R**3)*(d <= R)
+    return s
+
+basis_1D = {
+    "step": step_1D,
+    "gauss": gauss_1D,
+    "gauss_lim": gauss_lim_1D,
+}
+
+
+basis_2D = {
+    "step": step_2D,
+    "gauss": gauss_2D,
+    "gauss_lim": gauss_lim_2D,
+}
+
+basis_3D = {
+    "step": step_3D,
+    "gauss": gauss_3D,
+    "gauss_lim": gauss_lim_3D,
+}

+ 887 - 0
code/elephant/elephant/current_source_density_src/icsd.py

@@ -0,0 +1,887 @@
+# -*- coding: utf-8 -*-
+'''
+py-iCSD toolbox!
+Translation of the core functionality of the CSDplotter MATLAB package
+to python.
+
+The methods were originally developed by Klas H. Pettersen, as described in:
+Klas H. Pettersen, Anna Devor, Istvan Ulbert, Anders M. Dale, Gaute T. Einevoll,
+Current-source density estimation based on inversion of electrostatic forward
+solution: Effects of finite extent of neuronal activity and conductivity
+discontinuities, Journal of Neuroscience Methods, Volume 154, Issues 1-2,
+30 June 2006, Pages 116-133, ISSN 0165-0270,
+http://dx.doi.org/10.1016/j.jneumeth.2005.12.005.
+(http://www.sciencedirect.com/science/article/pii/S0165027005004541)
+
+The method themselves are implemented as callable subclasses of the base
+CSD class object, which sets some common attributes,
+and a basic function for calculating the iCSD, and a generic spatial filter
+implementation.
+
+The raw- and filtered CSD estimates are returned as Quantity arrays.
+
+Requires pylab environment to work, i.e numpy+scipy+matplotlib, with the
+addition of quantities (http://pythonhosted.org/quantities) and
+neo (https://pythonhosted.org/neo)-
+
+Original implementation from CSDplotter-0.1.1
+(http://software.incf.org/software/csdplotter) by Klas. H. Pettersen 2005.
+
+Written by:
+- Espen.Hagen@umb.no, 2010,
+- e.hagen@fz-juelich.de, 2015-2016
+
+'''
+
+import numpy as np
+import scipy.integrate as si
+import scipy.signal as ss
+import quantities as pq
+
+
+class CSD(object):
+    '''Base iCSD class'''
+    def __init__(self, lfp, f_type='gaussian', f_order=(3, 1)):
+        '''Initialize parent class iCSD
+
+        Parameters
+        ----------
+        lfp : np.ndarray * quantity.Quantity
+            LFP signal of shape (# channels, # time steps)
+        f_type : str
+            type of spatial filter, must be a scipy.signal filter design method
+        f_order : list
+            settings for spatial filter, arg passed to  filter design function
+        '''
+        self.name = 'CSD estimate parent class'
+        self.lfp = lfp
+        self.f_matrix = np.eye(lfp.shape[0]) * pq.m**3 / pq.S
+        self.f_type = f_type
+        self.f_order = f_order
+
+    def get_csd(self, ):
+        '''
+        Perform the CSD estimate from the LFP and forward matrix F, i.e as
+        CSD=F**-1*LFP
+
+        Arguments
+        ---------
+
+        Returns
+        -------
+        csd : np.ndarray * quantity.Quantity
+            Array with the csd estimate
+        '''
+        csd = np.linalg.solve(self.f_matrix, self.lfp)
+
+        return csd * (self.f_matrix.units**-1 * self.lfp.units).simplified
+
+    def filter_csd(self, csd, filterfunction='convolve'):
+        '''
+        Spatial filtering of the CSD estimate, using an N-point filter
+
+        Arguments
+        ---------
+        csd : np.ndarrray * quantity.Quantity
+            Array with the csd estimate
+        filterfunction : str
+            'filtfilt' or 'convolve'. Apply spatial filter using
+            scipy.signal.filtfilt or scipy.signal.convolve.
+        '''
+        if self.f_type == 'gaussian':
+            try:
+                assert(len(self.f_order) == 2)
+            except AssertionError as ae:
+                raise ae('filter order f_order must be a tuple of length 2')
+        else:
+            try:
+                assert(self.f_order > 0 and isinstance(self.f_order, int))
+            except AssertionError as ae:
+                raise ae('Filter order must be int > 0!')
+        try:
+            assert(filterfunction in ['filtfilt', 'convolve'])
+        except AssertionError as ae:
+            raise ae("{} not equal to 'filtfilt' or \
+                     'convolve'".format(filterfunction))
+
+        if self.f_type == 'boxcar':
+            num = ss.boxcar(self.f_order)
+            denom = np.array([num.sum()])
+        elif self.f_type == 'hamming':
+            num = ss.hamming(self.f_order)
+            denom = np.array([num.sum()])
+        elif self.f_type == 'triangular':
+            num = ss.triang(self.f_order)
+            denom = np.array([num.sum()])
+        elif self.f_type == 'gaussian':
+            num = ss.gaussian(self.f_order[0], self.f_order[1])
+            denom = np.array([num.sum()])
+        elif self.f_type == 'identity':
+            num = np.array([1.])
+            denom = np.array([1.])
+        else:
+            print('%s Wrong filter type!' % self.f_type)
+            raise
+
+        num_string = '[ '
+        for i in num:
+            num_string = num_string + '%.3f ' % i
+        num_string = num_string + ']'
+        denom_string = '[ '
+        for i in denom:
+            denom_string = denom_string + '%.3f ' % i
+        denom_string = denom_string + ']'
+
+        print(('discrete filter coefficients: \nb = {}, \
+               \na = {}'.format(num_string, denom_string)))
+
+        if filterfunction == 'filtfilt':
+            return ss.filtfilt(num, denom, csd, axis=0) * csd.units
+        elif filterfunction == 'convolve':
+            csdf = csd / csd.units
+            for i in range(csdf.shape[1]):
+                csdf[:, i] = ss.convolve(csdf[:, i], num / denom.sum(), 'same')
+            return csdf * csd.units
+
+
+class StandardCSD(CSD):
+    '''
+    Standard CSD method with and without Vaknin electrodes
+    '''
+
+    def __init__(self, lfp, coord_electrode, **kwargs):
+        '''
+        Initialize standard CSD method class with & without Vaknin electrodes.
+
+        Parameters
+        ----------
+        lfp : np.ndarray * quantity.Quantity
+            LFP signal of shape (# channels, # time steps) in units of V
+        coord_electrode : np.ndarray * quantity.Quantity
+            depth of evenly spaced electrode contact points of shape
+            (# contacts, ) in units of m, must be monotonously increasing
+        sigma : float * quantity.Quantity
+            conductivity of tissue in units of S/m or 1/(ohm*m)
+            Defaults to 0.3 S/m
+        vaknin_el : bool
+            flag for using method of Vaknin to endpoint electrodes
+            Defaults to True
+        f_type : str
+            type of spatial filter, must be a scipy.signal filter design method
+            Defaults to 'gaussian'
+        f_order : list
+            settings for spatial filter, arg passed to  filter design function
+            Defaults to (3,1) for the gaussian
+        '''
+        self.parameters(**kwargs)
+        CSD.__init__(self, lfp, self.f_type, self.f_order)
+
+        diff_diff_coord = np.diff(np.diff(coord_electrode)).magnitude
+        zeros_ddc = np.zeros_like(diff_diff_coord)
+        try:
+            assert(np.all(np.isclose(diff_diff_coord, zeros_ddc, atol=1e-12)))
+        except AssertionError as ae:
+            print('coord_electrode not monotonously varying')
+            raise ae
+
+        if self.vaknin_el:
+            # extend lfps array by duplicating potential at endpoint contacts
+            if lfp.ndim == 1:
+                self.lfp = np.empty((lfp.shape[0] + 2, )) * lfp.units
+            else:
+                self.lfp = np.empty((lfp.shape[0] + 2, lfp.shape[1])) * lfp.units
+            self.lfp[0, ] = lfp[0, ]
+            self.lfp[1:-1, ] = lfp
+            self.lfp[-1, ] = lfp[-1, ]
+        else:
+            self.lfp = lfp
+
+        self.name = 'Standard CSD method'
+        self.coord_electrode = coord_electrode
+
+        self.f_inv_matrix = self.get_f_inv_matrix()
+
+    def parameters(self, **kwargs):
+        '''Defining the default values of the method passed as kwargs
+        Parameters
+        ----------
+        **kwargs
+            Same as those passed to initialize the Class
+        '''
+        self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
+        self.vaknin_el = kwargs.pop('vaknin_el', True)
+        self.f_type = kwargs.pop('f_type', 'gaussian')
+        self.f_order = kwargs.pop('f_order', (3, 1))
+        if kwargs:
+            raise TypeError('Invalid keyword arguments:', kwargs.keys())
+
+    def get_f_inv_matrix(self):
+        '''Calculate the inverse F-matrix for the standard CSD method'''
+        h_val = abs(np.diff(self.coord_electrode)[0])
+        f_inv = -np.eye(self.lfp.shape[0])
+
+        # Inner matrix elements  is just the discrete laplacian coefficients
+        for j in range(1, f_inv.shape[0] - 1):
+            f_inv[j, j - 1: j + 2] = np.array([1., -2., 1.])
+        return f_inv * -self.sigma / h_val
+
+    def get_csd(self):
+        '''
+        Perform the iCSD calculation, i.e: iCSD=F_inv*LFP
+
+        Returns
+        -------
+        csd : np.ndarray * quantity.Quantity
+            Array with the csd estimate
+        '''
+        csd = np.dot(self.f_inv_matrix, self.lfp)[1:-1, ]
+        # `np.dot()` does not return correct units, so the units of `csd` must
+        # be assigned manually
+        csd_units = (self.f_inv_matrix.units * self.lfp.units).simplified
+        csd = csd.magnitude * csd_units
+
+        return csd
+
+
+class DeltaiCSD(CSD):
+    '''
+    delta-iCSD method
+    '''
+    def __init__(self, lfp, coord_electrode, **kwargs):
+        '''
+        Initialize the delta-iCSD method class object
+
+        Parameters
+        ----------
+        lfp : np.ndarray * quantity.Quantity
+            LFP signal of shape (# channels, # time steps) in units of V
+        coord_electrode : np.ndarray * quantity.Quantity
+            depth of evenly spaced electrode contact points of shape
+            (# contacts, ) in units of m
+        diam : float * quantity.Quantity
+            diamater of the assumed circular planar current sources centered
+            at each contact
+            Defaults to 500E-6 meters
+        sigma : float * quantity.Quantity
+            conductivity of tissue in units of S/m or 1/(ohm*m)
+            Defaults to 0.3 S / m
+        sigma_top : float * quantity.Quantity
+            conductivity on top of tissue in units of S/m or 1/(ohm*m)
+            Defaults to 0.3 S / m
+        f_type : str
+            type of spatial filter, must be a scipy.signal filter design method
+            Defaults to 'gaussian'
+        f_order : list
+            settings for spatial filter, arg passed to  filter design function
+            Defaults to (3,1) for gaussian
+        '''
+        self.parameters(**kwargs)
+        CSD.__init__(self, lfp, self.f_type, self.f_order)
+
+        try:  # Should the class not take care of this?!
+            assert(self.diam.units == coord_electrode.units)
+        except AssertionError as ae:
+            print('units of coord_electrode ({}) and diam ({}) differ'
+                  .format(coord_electrode.units, self.diam.units))
+            raise ae
+
+        try:
+            assert(np.all(np.diff(coord_electrode) > 0))
+        except AssertionError as ae:
+            print('values of coord_electrode not continously increasing')
+            raise ae
+
+        try:
+            assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
+            if self.diam.size == coord_electrode.size:
+                assert(np.all(self.diam > 0 * self.diam.units))
+            else:
+                assert(self.diam > 0 * self.diam.units)
+        except AssertionError as ae:
+            print('diam must be positive scalar or of same shape \
+                   as coord_electrode')
+            raise ae
+        if self.diam.size == 1:
+            self.diam = np.ones(coord_electrode.size) * self.diam
+
+        self.name = 'delta-iCSD method'
+        self.coord_electrode = coord_electrode
+
+        # initialize F- and iCSD-matrices
+        self.f_matrix = np.empty((self.coord_electrode.size,
+                                  self.coord_electrode.size))
+        self.f_matrix = self.get_f_matrix()
+
+    def parameters(self, **kwargs):
+        '''Defining the default values of the method passed as kwargs
+        Parameters
+        ----------
+        **kwargs
+            Same as those passed to initialize the Class
+        '''
+        self.diam = kwargs.pop('diam', 500E-6 * pq.m)
+        self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
+        self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
+        self.f_type = kwargs.pop('f_type', 'gaussian')
+        self.f_order = kwargs.pop('f_order', (3, 1))
+        if kwargs:
+            raise TypeError('Invalid keyword arguments:', kwargs.keys())
+
+    def get_f_matrix(self):
+        '''Calculate the F-matrix'''
+        f_matrix = np.empty((self.coord_electrode.size,
+                             self.coord_electrode.size)) * self.coord_electrode.units
+        for j in range(self.coord_electrode.size):
+            for i in range(self.coord_electrode.size):
+                f_matrix[j, i] = ((np.sqrt((self.coord_electrode[j] -
+                                            self.coord_electrode[i])**2 +
+                    (self.diam[j] / 2)**2) - abs(self.coord_electrode[j] -
+                                                 self.coord_electrode[i])) +
+                    (self.sigma - self.sigma_top) / (self.sigma +
+                                                     self.sigma_top) *
+                    (np.sqrt((self.coord_electrode[j] +
+                              self.coord_electrode[i])**2 + (self.diam[j] / 2)**2)-
+                    abs(self.coord_electrode[j] + self.coord_electrode[i])))
+
+        f_matrix /= (2 * self.sigma)
+        return f_matrix
+
+
+class StepiCSD(CSD):
+    '''step-iCSD method'''
+    def __init__(self, lfp, coord_electrode, **kwargs):
+
+        '''
+        Initializing step-iCSD method class object
+
+        Parameters
+        ----------
+        lfp : np.ndarray * quantity.Quantity
+            LFP signal of shape (# channels, # time steps) in units of V
+        coord_electrode : np.ndarray * quantity.Quantity
+            depth of evenly spaced electrode contact points of shape
+            (# contacts, ) in units of m
+        diam : float or np.ndarray * quantity.Quantity
+            diameter(s) of the assumed circular planar current sources centered
+            at each contact
+            Defaults to 500E-6 meters
+        h : float or np.ndarray * quantity.Quantity
+            assumed thickness of the source cylinders at all or each contact
+            Defaults to np.ones(15) * 100E-6 * pq.m
+        sigma : float * quantity.Quantity
+            conductivity of tissue in units of S/m or 1/(ohm*m)
+            Defaults to 0.3 S / m
+        sigma_top : float * quantity.Quantity
+            conductivity on top of tissue in units of S/m or 1/(ohm*m)
+            Defaults to 0.3 S / m
+        tol : float
+            tolerance of numerical integration
+            Defaults 1e-6
+        f_type : str
+            type of spatial filter, must be a scipy.signal filter design method
+            Defaults to 'gaussian'
+        f_order : list
+            settings for spatial filter, arg passed to  filter design function
+            Defaults to (3,1) for the gaussian
+        '''
+        self.parameters(**kwargs)
+        CSD.__init__(self, lfp, self.f_type, self.f_order)
+
+        try:  # Should the class not take care of this?
+            assert(self.diam.units == coord_electrode.units)
+        except AssertionError as ae:
+            print('units of coord_electrode ({}) and diam ({}) differ'
+                  .format(coord_electrode.units, self.diam.units))
+            raise ae
+        try:
+            assert(np.all(np.diff(coord_electrode) > 0))
+        except AssertionError as ae:
+            print('values of coord_electrode not continously increasing')
+            raise ae
+
+        try:
+            assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
+            if self.diam.size == coord_electrode.size:
+                assert(np.all(self.diam > 0 * self.diam.units))
+            else:
+                assert(self.diam > 0 * self.diam.units)
+        except AssertionError as ae:
+            print('diam must be positive scalar or of same shape \
+                   as coord_electrode')
+            raise ae
+        if self.diam.size == 1:
+            self.diam = np.ones(coord_electrode.size) * self.diam
+        try:
+            assert(self.h.size == 1 or self.h.size == coord_electrode.size)
+            if self.h.size == coord_electrode.size:
+                assert(np.all(self.h > 0 * self.h.units))
+        except AssertionError as ae:
+            print('h must be scalar or of same shape as coord_electrode')
+            raise ae
+        if self.h.size == 1:
+            self.h = np.ones(coord_electrode.size) * self.h
+
+        self.name = 'step-iCSD method'
+        self.coord_electrode = coord_electrode
+
+        # compute forward-solution matrix
+        self.f_matrix = self.get_f_matrix()
+
+    def parameters(self, **kwargs):
+        '''Defining the default values of the method passed as kwargs
+        Parameters
+        ----------
+        **kwargs
+            Same as those passed to initialize the Class
+        '''
+
+        self.diam = kwargs.pop('diam', 500E-6 * pq.m)
+        self.h = kwargs.pop('h', np.ones(23) * 100E-6 * pq.m)
+        self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
+        self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
+        self.tol = kwargs.pop('tol', 1e-6)
+        self.f_type = kwargs.pop('f_type', 'gaussian')
+        self.f_order = kwargs.pop('f_order', (3, 1))
+        if kwargs:
+            raise TypeError('Invalid keyword arguments:', kwargs.keys())
+
+    def get_f_matrix(self):
+        '''Calculate F-matrix for step iCSD method'''
+        el_len = self.coord_electrode.size
+        f_matrix = np.zeros((el_len, el_len))
+        for j in range(el_len):
+            for i in range(el_len):
+                lower_int = self.coord_electrode[i] - self.h[j] / 2
+                if lower_int < 0:
+                    lower_int = self.h[j].units
+                upper_int = self.coord_electrode[i] + self.h[j] / 2
+
+                # components of f_matrix object
+                f_cyl0 = si.quad(self._f_cylinder,
+                                 a=lower_int, b=upper_int,
+                                 args=(float(self.coord_electrode[j]),
+                                       float(self.diam[j]),
+                                       float(self.sigma)),
+                                 epsabs=self.tol)[0]
+                f_cyl1 = si.quad(self._f_cylinder, a=lower_int, b=upper_int,
+                                 args=(-float(self.coord_electrode[j]),
+                                       float(self.diam[j]), float(self.sigma)),
+                                 epsabs=self.tol)[0]
+
+                # method of images coefficient
+                mom = (self.sigma - self.sigma_top) / (self.sigma + self.sigma_top)
+
+                f_matrix[j, i] = f_cyl0 + mom * f_cyl1
+
+        # assume si.quad trash the units
+        return f_matrix * self.h.units**2 / self.sigma.units
+
+    def _f_cylinder(self, zeta, z_val, diam, sigma):
+        '''function used by class method'''
+        f_cyl = 1. / (2. * sigma) * \
+            (np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
+        return f_cyl
+
+
+class SplineiCSD(CSD):
+    '''spline iCSD method'''
+    def __init__(self, lfp, coord_electrode, **kwargs):
+
+        '''
+        Initializing spline-iCSD method class object
+
+        Parameters
+        ----------
+        lfp : np.ndarray * quantity.Quantity
+            LFP signal of shape (# channels, # time steps) in units of V
+        coord_electrode : np.ndarray * quantity.Quantity
+            depth of evenly spaced electrode contact points of shape
+            (# contacts, ) in units of m
+        diam : float * quantity.Quantity
+            diamater of the assumed circular planar current sources centered
+            at each contact
+            Defaults to 500E-6 meters
+        sigma : float * quantity.Quantity
+            conductivity of tissue in units of S/m or 1/(ohm*m)
+            Defaults to 0.3 S / m
+        sigma_top : float * quantity.Quantity
+            conductivity on top of tissue in units of S/m or 1/(ohm*m)
+            Defaults to 0.3 S / m
+        tol : float
+            tolerance of numerical integration
+            Defaults 1e-6
+        f_type : str
+            type of spatial filter, must be a scipy.signal filter design method
+            Defaults to 'gaussian'
+        f_order : list
+            settings for spatial filter, arg passed to  filter design function
+            Defaults to (3,1) for the gaussian
+        num_steps : int
+            number of data points for the spatially upsampled LFP/CSD data
+            Defaults to 200
+        '''
+        self.parameters(**kwargs)
+        CSD.__init__(self, lfp, self.f_type, self.f_order)
+
+        try:  # Should the class not take care of this?!
+            assert(self.diam.units == coord_electrode.units)
+        except AssertionError as ae:
+            print('units of coord_electrode ({}) and diam ({}) differ'
+                  .format(coord_electrode.units, self.diam.units))
+            raise
+        try:
+            assert(np.all(np.diff(coord_electrode) > 0))
+        except AssertionError as ae:
+            print('values of coord_electrode not continously increasing')
+            raise ae
+
+        try:
+            assert(self.diam.size == 1 or self.diam.size == coord_electrode.size)
+            if self.diam.size == coord_electrode.size:
+                assert(np.all(self.diam > 0 * self.diam.units))
+        except AssertionError as ae:
+            print('diam must be scalar or of same shape as coord_electrode')
+            raise ae
+        if self.diam.size == 1:
+            self.diam = np.ones(coord_electrode.size) * self.diam
+
+        self.name = 'spline-iCSD method'
+        self.coord_electrode = coord_electrode
+
+        # compute stuff
+        self.f_matrix = self.get_f_matrix()
+
+    def parameters(self, **kwargs):
+        '''Defining the default values of the method passed as kwargs
+        Parameters
+        ----------
+        **kwargs
+            Same as those passed to initialize the Class
+        '''
+        self.diam = kwargs.pop('diam', 500E-6 * pq.m)
+        self.sigma = kwargs.pop('sigma', 0.3 * pq.S / pq.m)
+        self.sigma_top = kwargs.pop('sigma_top', 0.3 * pq.S / pq.m)
+        self.tol = kwargs.pop('tol', 1e-6)
+        self.num_steps = kwargs.pop('num_steps', 200)
+        self.f_type = kwargs.pop('f_type', 'gaussian')
+        self.f_order = kwargs.pop('f_order', (3, 1))
+        if kwargs:
+            raise TypeError('Invalid keyword arguments:', kwargs.keys())
+
+    def get_f_matrix(self):
+        '''Calculate the F-matrix for cubic spline iCSD method'''
+        el_len = self.coord_electrode.size
+        z_js = np.zeros(el_len + 1)
+        z_js[:-1] = np.array(self.coord_electrode)
+        z_js[-1] = z_js[-2] + float(np.diff(self.coord_electrode).mean())
+
+        # Define integration matrixes
+        f_mat0 = np.zeros((el_len, el_len + 1))
+        f_mat1 = np.zeros((el_len, el_len + 1))
+        f_mat2 = np.zeros((el_len, el_len + 1))
+        f_mat3 = np.zeros((el_len, el_len + 1))
+
+        # Calc. elements
+        for j in range(el_len):
+            for i in range(el_len):
+                f_mat0[j, i] = si.quad(self._f_mat0, a=z_js[i], b=z_js[i + 1],
+                                       args=(z_js[j + 1],
+                                             float(self.sigma),
+                                             float(self.diam[j])),
+                                       epsabs=self.tol)[0]
+                f_mat1[j, i] = si.quad(self._f_mat1, a=z_js[i], b=z_js[i + 1],
+                                       args=(z_js[j + 1], z_js[i],
+                                             float(self.sigma),
+                                             float(self.diam[j])),
+                                       epsabs=self.tol)[0]
+                f_mat2[j, i] = si.quad(self._f_mat2, a=z_js[i], b=z_js[i + 1],
+                                       args=(z_js[j + 1], z_js[i],
+                                             float(self.sigma),
+                                             float(self.diam[j])),
+                                       epsabs=self.tol)[0]
+                f_mat3[j, i] = si.quad(self._f_mat3, a=z_js[i], b=z_js[i + 1],
+                                       args=(z_js[j + 1], z_js[i],
+                                             float(self.sigma),
+                                             float(self.diam[j])),
+                                       epsabs=self.tol)[0]
+
+                # image technique if conductivity not constant:
+                if self.sigma != self.sigma_top:
+                    f_mat0[j, i] = f_mat0[j, i] + (self.sigma-self.sigma_top) / \
+                                                (self.sigma + self.sigma_top) * \
+                            si.quad(self._f_mat0, a=z_js[i], b=z_js[i+1], \
+                                    args=(-z_js[j+1],
+                                          float(self.sigma), float(self.diam[j])), \
+                                    epsabs=self.tol)[0]
+                    f_mat1[j, i] = f_mat1[j, i] + (self.sigma-self.sigma_top) / \
+                        (self.sigma + self.sigma_top) * \
+                            si.quad(self._f_mat1, a=z_js[i], b=z_js[i+1], \
+                                args=(-z_js[j+1], z_js[i], float(self.sigma),
+                                      float(self.diam[j])), epsabs=self.tol)[0]
+                    f_mat2[j, i] = f_mat2[j, i] + (self.sigma-self.sigma_top) / \
+                        (self.sigma + self.sigma_top) * \
+                            si.quad(self._f_mat2, a=z_js[i], b=z_js[i+1], \
+                                args=(-z_js[j+1], z_js[i], float(self.sigma),
+                                      float(self.diam[j])), epsabs=self.tol)[0]
+                    f_mat3[j, i] = f_mat3[j, i] + (self.sigma-self.sigma_top) / \
+                        (self.sigma + self.sigma_top) * \
+                            si.quad(self._f_mat3, a=z_js[i], b=z_js[i+1], \
+                                args=(-z_js[j+1], z_js[i], float(self.sigma),
+                                      float(self.diam[j])), epsabs=self.tol)[0]
+
+        e_mat0, e_mat1, e_mat2, e_mat3 = self._calc_e_matrices()
+
+        # Calculate the F-matrix
+        f_matrix = np.eye(el_len + 2)
+        f_matrix[1:-1, :] = np.dot(f_mat0, e_mat0) + \
+                            np.dot(f_mat1, e_mat1) + \
+                            np.dot(f_mat2, e_mat2) + \
+                            np.dot(f_mat3, e_mat3)
+
+        return f_matrix * self.coord_electrode.units**2 / self.sigma.units
+
+    def get_csd(self):
+        '''
+        Calculate the iCSD using the spline iCSD method
+
+        Returns
+        -------
+        csd : np.ndarray * quantity.Quantity
+            Array with csd estimate
+
+
+        '''
+        e_mat = self._calc_e_matrices()
+
+        el_len = self.coord_electrode.size
+
+        # padding the lfp with zeros on top/bottom
+        if self.lfp.ndim == 1:
+            cs_lfp = np.r_[[0], np.asarray(self.lfp), [0]].reshape(1, -1).T
+            csd = np.zeros(self.num_steps)
+        else:
+            cs_lfp = np.vstack((np.zeros(self.lfp.shape[1]),
+                                np.asarray(self.lfp),
+                                np.zeros(self.lfp.shape[1])))
+            csd = np.zeros((self.num_steps, self.lfp.shape[1]))
+        cs_lfp *= self.lfp.units
+
+        # CSD coefficients
+        csd_coeff = np.linalg.solve(self.f_matrix, cs_lfp)
+
+        # The cubic spline polynomial coefficients
+        a_mat0 = np.dot(e_mat[0], csd_coeff)
+        a_mat1 = np.dot(e_mat[1], csd_coeff)
+        a_mat2 = np.dot(e_mat[2], csd_coeff)
+        a_mat3 = np.dot(e_mat[3], csd_coeff)
+
+        # Extend electrode coordinates in both end by min contact interdistance
+        h = np.diff(self.coord_electrode).min()
+        z_js = np.zeros(el_len + 2)
+        z_js[0] = self.coord_electrode[0] - h
+        z_js[1: -1] = self.coord_electrode
+        z_js[-1] = self.coord_electrode[-1] + h
+
+        # create high res spatial grid
+        out_zs = np.linspace(z_js[1], z_js[-2], self.num_steps)
+
+        # Calculate iCSD estimate on grid from polynomial coefficients.
+        i = 0
+        for j in range(self.num_steps):
+            if out_zs[j] >= z_js[i + 1]:
+                i += 1
+            csd[j, ] = a_mat0[i, :] + a_mat1[i, :] * \
+                             (out_zs[j] - z_js[i]) + \
+                a_mat2[i, :] * (out_zs[j] - z_js[i])**2 + \
+                a_mat3[i, :] * (out_zs[j] - z_js[i])**3
+
+        csd_unit = (self.f_matrix.units**-1 * self.lfp.units).simplified
+
+        return csd * csd_unit
+
+    def _f_mat0(self, zeta, z_val, sigma, diam):
+        '''0'th order potential function'''
+        return 1. / (2. * sigma) * \
+            (np.sqrt((diam / 2)**2 + ((z_val - zeta))**2) - abs(z_val - zeta))
+
+    def _f_mat1(self, zeta, z_val, zi_val, sigma, diam):
+        '''1'th order potential function'''
+        return (zeta - zi_val) * self._f_mat0(zeta, z_val, sigma, diam)
+
+    def _f_mat2(self, zeta, z_val, zi_val, sigma, diam):
+        '''2'nd order potential function'''
+        return (zeta - zi_val)**2 * self._f_mat0(zeta, z_val, sigma, diam)
+
+    def _f_mat3(self, zeta, z_val, zi_val, sigma, diam):
+        '''3'rd order potential function'''
+        return (zeta - zi_val)**3 * self._f_mat0(zeta, z_val, sigma, diam)
+
+    def _calc_k_matrix(self):
+        '''Calculate the K-matrix used by to calculate E-matrices'''
+        el_len = self.coord_electrode.size
+        h = float(np.diff(self.coord_electrode).min())
+
+        c_jm1 = np.eye(el_len + 2, k=0) / h
+        c_jm1[0, 0] = 0
+
+        c_j0 = np.eye(el_len + 2) / h
+        c_j0[-1, -1] = 0
+
+        c_jall = c_j0
+        c_jall[0, 0] = 1
+        c_jall[-1, -1] = 1
+
+        tjp1 = np.eye(el_len + 2, k=1)
+        tjm1 = np.eye(el_len + 2, k=-1)
+
+        tj0 = np.eye(el_len + 2)
+        tj0[0, 0] = 0
+        tj0[-1, -1] = 0
+
+        # Defining K-matrix used to calculate e_mat1-3
+        return np.dot(np.linalg.inv(np.dot(c_jm1, tjm1) +
+                                    2 * np.dot(c_jm1, tj0) +
+                                    2 * c_jall +
+                                    np.dot(c_j0, tjp1)),
+                      3 * (np.dot(np.dot(c_jm1, c_jm1), tj0) -
+                           np.dot(np.dot(c_jm1, c_jm1), tjm1) +
+                           np.dot(np.dot(c_j0, c_j0), tjp1) -
+                           np.dot(np.dot(c_j0, c_j0), tj0)))
+
+    def _calc_e_matrices(self):
+        '''Calculate the E-matrices used by cubic spline iCSD method'''
+        el_len = self.coord_electrode.size
+        # expanding electrode grid
+        h = float(np.diff(self.coord_electrode).min())
+
+        # Define transformation matrices
+        c_mat3 = np.eye(el_len + 1) / h
+
+        # Get K-matrix
+        k_matrix = self._calc_k_matrix()
+
+        # Define matrixes for C to A transformation:
+        tja = np.eye(el_len + 2)[:-1, ]
+        tjp1a = np.eye(el_len + 2, k=1)[:-1, ]
+
+        # Define spline coefficients
+        e_mat0 = tja
+        e_mat1 = np.dot(tja, k_matrix)
+        e_mat2 = 3 * np.dot(c_mat3**2, (tjp1a - tja)) - \
+                            np.dot(np.dot(c_mat3, (tjp1a + 2 * tja)), k_matrix)
+        e_mat3 = 2 * np.dot(c_mat3**3, (tja - tjp1a)) + \
+                            np.dot(np.dot(c_mat3**2, (tjp1a + tja)), k_matrix)
+
+        return e_mat0, e_mat1, e_mat2, e_mat3
+
+
+if __name__ == '__main__':
+    from scipy.io import loadmat
+    import matplotlib.pyplot as plt
+
+    
+    #loading test data
+    test_data = loadmat('test_data.mat')
+    
+    #prepare lfp data for use, by changing the units to SI and append quantities,
+    #along with electrode geometry, conductivities and assumed source geometry
+    lfp_data = test_data['pot1'] * 1E-6 * pq.V        # [uV] -> [V]
+    z_data = np.linspace(100E-6, 2300E-6, 23) * pq.m  # [m]
+    diam = 500E-6 * pq.m                              # [m]
+    h = 100E-6 * pq.m                                 # [m]
+    sigma = 0.3 * pq.S / pq.m                         # [S/m] or [1/(ohm*m)]
+    sigma_top = 0.3 * pq.S / pq.m                     # [S/m] or [1/(ohm*m)]
+    
+    # Input dictionaries for each method
+    delta_input = {
+        'lfp' : lfp_data,
+        'coord_electrode' : z_data,
+        'diam' : diam,          # source diameter
+        'sigma' : sigma,        # extracellular conductivity
+        'sigma_top' : sigma,    # conductivity on top of cortex
+        'f_type' : 'gaussian',  # gaussian filter
+        'f_order' : (3, 1),     # 3-point filter, sigma = 1.
+    }
+    step_input = {
+        'lfp' : lfp_data,
+        'coord_electrode' : z_data,
+        'diam' : diam,
+        'h' : h,                # source thickness
+        'sigma' : sigma,
+        'sigma_top' : sigma,
+        'tol' : 1E-12,          # Tolerance in numerical integration
+        'f_type' : 'gaussian',
+        'f_order' : (3, 1),
+    }
+    spline_input = {
+        'lfp' : lfp_data,
+        'coord_electrode' : z_data,
+        'diam' : diam,
+        'sigma' : sigma,
+        'sigma_top' : sigma,
+        'num_steps' : 201,      # Spatial CSD upsampling to N steps
+        'tol' : 1E-12,
+        'f_type' : 'gaussian',
+        'f_order' : (20, 5),
+    }
+    std_input = {
+        'lfp' : lfp_data,
+        'coord_electrode' : z_data,
+        'sigma' : sigma,
+        'f_type' : 'gaussian',
+        'f_order' : (3, 1),
+    }
+    
+    
+    #Create the different CSD-method class instances. We use the class methods
+    #get_csd() and filter_csd() below to get the raw and spatially filtered
+    #versions of the current-source density estimates.
+    csd_dict = dict(
+        delta_icsd = DeltaiCSD(**delta_input),
+        step_icsd = StepiCSD(**step_input),
+        spline_icsd = SplineiCSD(**spline_input),
+        std_csd = StandardCSD(**std_input),
+    )
+    
+    #plot
+    for method, csd_obj in list(csd_dict.items()):
+        fig, axes = plt.subplots(3,1, figsize=(8,8))
+    
+        #plot LFP signal
+        ax = axes[0]
+        im = ax.imshow(np.array(lfp_data), origin='upper', vmin=-abs(lfp_data).max(), \
+                  vmax=abs(lfp_data).max(), cmap='jet_r', interpolation='nearest')
+        ax.axis(ax.axis('tight'))
+        cb = plt.colorbar(im, ax=ax)
+        cb.set_label('LFP (%s)' % lfp_data.dimensionality.string)
+        ax.set_xticklabels([])
+        ax.set_title('LFP')
+        ax.set_ylabel('ch #')
+    
+        #plot raw csd estimate
+        csd = csd_obj.get_csd()
+        ax = axes[1]
+        im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
+              vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
+        ax.axis(ax.axis('tight'))
+        ax.set_title(csd_obj.name)
+        cb = plt.colorbar(im, ax=ax)
+        cb.set_label('CSD (%s)' % csd.dimensionality.string)
+        ax.set_xticklabels([])
+        ax.set_ylabel('ch #')
+    
+        #plot spatially filtered csd estimate
+        ax = axes[2]
+        csd = csd_obj.filter_csd(csd)
+        im = ax.imshow(np.array(csd), origin='upper', vmin=-abs(csd).max(), \
+              vmax=abs(csd).max(), cmap='jet_r', interpolation='nearest')
+        ax.axis(ax.axis('tight'))
+        ax.set_title(csd_obj.name + ', filtered')
+        cb = plt.colorbar(im, ax=ax)
+        cb.set_label('CSD (%s)' % csd.dimensionality.string)
+        ax.set_ylabel('ch #')
+        ax.set_xlabel('timestep')
+    
+    
+    plt.show()
+

BIN
code/elephant/elephant/current_source_density_src/test_data.mat


+ 362 - 0
code/elephant/elephant/current_source_density_src/utility_functions.py

@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+"""
+These are some useful functions used in CSD methods,
+They include CSD source profiles to be used as ground truths,
+placement of electrodes in 1D, 2D and 3D., etc
+These scripts are based on Grzegorz Parka's,
+Google Summer of Code 2014, INFC/pykCSD
+This was written by :
+Michal Czerwinski, Chaitanya Chintaluri
+Laboratory of Neuroinformatics,
+Nencki Institute of Experimental Biology, Warsaw.
+"""
+from __future__ import division
+
+import numpy as np
+from numpy import exp
+import quantities as pq
+
+
+def patch_quantities():
+    """patch quantities with the SI unit Siemens if it does not exist"""
+    for symbol, prefix, definition, u_symbol in zip(
+        ['siemens', 'S', 'mS', 'uS', 'nS', 'pS'],
+        ['', '', 'milli', 'micro', 'nano', 'pico'],
+        [pq.A / pq.V, pq.A / pq.V, 'S', 'mS', 'uS', 'nS'],
+        [None, None, None, None, u'µS', None]):
+        if type(definition) is str:
+            definition = lastdefinition / 1000
+        if not hasattr(pq, symbol):
+            setattr(pq, symbol, pq.UnitQuantity(
+                prefix + 'siemens',
+                definition,
+                symbol=symbol,
+                u_symbol=u_symbol))
+        lastdefinition = definition
+    return
+
+def check_for_duplicated_electrodes(elec_pos):
+    """Checks for duplicate electrodes
+    Parameters
+    ----------
+    elec_pos : np.array
+    Returns
+    -------
+    has_duplicated_elec : Boolean
+    """
+    unique_elec_pos = np.vstack({tuple(row) for row in elec_pos})
+    has_duplicated_elec = unique_elec_pos.shape == elec_pos.shape
+    return has_duplicated_elec
+
+
+def distribute_srcs_1D(X, n_src, ext_x, R_init):
+    """Distribute sources in 1D equally spaced
+    Parameters
+    ----------
+    X : np.arrays
+        points at which CSD will be estimated
+    n_src : int
+        number of sources to be included in the model
+    ext_x : floats
+        how much should the sources extend the area X
+    R_init : float
+        Same as R in 1D case
+    Returns
+    -------
+    X_src : np.arrays
+        positions of the sources
+    R : float
+        effective radius of the basis element
+    """
+    X_src = np.mgrid[(np.min(X) - ext_x):(np.max(X) + ext_x):
+                     np.complex(0, n_src)]
+    R = R_init
+    return X_src, R
+
+
+def distribute_srcs_2D(X, Y, n_src, ext_x, ext_y, R_init):
+    """Distribute n_src's in the given area evenly
+    Parameters
+    ----------
+    X, Y : np.arrays
+        points at which CSD will be estimated
+    n_src : int
+        demanded number of sources to be included in the model
+    ext_x, ext_y : floats
+        how should the sources extend the area X, Y
+    R_init : float
+        demanded radius of the basis element
+    Returns
+    -------
+    X_src, Y_src : np.arrays
+        positions of the sources
+    nx, ny : ints
+        number of sources in directions x,y
+        new n_src = nx * ny may not be equal to the demanded number of sources
+    R : float
+        effective radius of the basis element
+    """
+    Lx = np.max(X) - np.min(X)
+    Ly = np.max(Y) - np.min(Y)
+    Lx_n = Lx + (2 * ext_x)
+    Ly_n = Ly + (2 * ext_y)
+    [nx, ny, Lx_nn, Ly_nn, ds] = get_src_params_2D(Lx_n, Ly_n, n_src)
+    ext_x_n = (Lx_nn - Lx) / 2
+    ext_y_n = (Ly_nn - Ly) / 2
+    X_src, Y_src = np.mgrid[(np.min(X) - ext_x_n):(np.max(X) + ext_x_n):
+                            np.complex(0, nx),
+                            (np.min(Y) - ext_y_n):(np.max(Y) + ext_y_n):
+                            np.complex(0, ny)]
+    # d = round(R_init / ds)
+    R = R_init  # R = d * ds
+    return X_src, Y_src, R
+
+
+def get_src_params_2D(Lx, Ly, n_src):
+    """Distribute n_src sources evenly in a rectangle of size Lx * Ly
+    Parameters
+    ----------
+    Lx, Ly : floats
+        lengths in the directions x, y of the area,
+        the sources should be placed
+    n_src : int
+        demanded number of sources
+
+    Returns
+    -------
+    nx, ny : ints
+        number of sources in directions x, y
+        new n_src = nx * ny may not be equal to the demanded number of sources
+    Lx_n, Ly_n : floats
+        updated lengths in the directions x, y
+    ds : float
+        spacing between the sources
+    """
+    coeff = [Ly, Lx - Ly, -Lx * n_src]
+    rts = np.roots(coeff)
+    r = [r for r in rts if type(r) is not complex and r > 0]
+    nx = r[0]
+    ny = n_src / nx
+    ds = Lx / (nx - 1)
+    nx = np.floor(nx) + 1
+    ny = np.floor(ny) + 1
+    Lx_n = (nx - 1) * ds
+    Ly_n = (ny - 1) * ds
+    return (nx, ny, Lx_n, Ly_n, ds)
+
+
+def distribute_srcs_3D(X, Y, Z, n_src, ext_x, ext_y, ext_z, R_init):
+    """Distribute n_src sources evenly in a rectangle of size Lx * Ly * Lz
+    Parameters
+    ----------
+    X, Y, Z : np.arrays
+        points at which CSD will be estimated
+    n_src : int
+        desired number of sources we want to include in the model
+    ext_x, ext_y, ext_z : floats
+        how should the sources extend over the area X,Y,Z
+    R_init : float
+        demanded radius of the basis element
+
+    Returns
+    -------
+    X_src, Y_src, Z_src : np.arrays
+        positions of the sources in 3D space
+    nx, ny, nz : ints
+        number of sources in directions x,y,z
+        new n_src = nx * ny * nz may not be equal to the demanded number of
+        sources
+
+    R : float
+        updated radius of the basis element
+    """
+    Lx = np.max(X) - np.min(X)
+    Ly = np.max(Y) - np.min(Y)
+    Lz = np.max(Z) - np.min(Z)
+    Lx_n = Lx + 2 * ext_x
+    Ly_n = Ly + 2 * ext_y
+    Lz_n = Lz + 2 * ext_z
+    (nx, ny, nz, Lx_nn, Ly_nn, Lz_nn, ds) = get_src_params_3D(Lx_n,
+                                                              Ly_n,
+                                                              Lz_n,
+                                                              n_src)
+    ext_x_n = (Lx_nn - Lx) / 2
+    ext_y_n = (Ly_nn - Ly) / 2
+    ext_z_n = (Lz_nn - Lz) / 2
+    X_src, Y_src, Z_src = np.mgrid[(np.min(X) - ext_x_n):(np.max(X) + ext_x_n):
+                                   np.complex(0, nx),
+                                   (np.min(Y) - ext_y_n):(np.max(Y) + ext_y_n):
+                                   np.complex(0, ny),
+                                   (np.min(Z) - ext_z_n):(np.max(Z) + ext_z_n):
+                                   np.complex(0, nz)]
+    # d = np.round(R_init / ds)
+    R = R_init
+    return (X_src, Y_src, Z_src, R)
+
+
+def get_src_params_3D(Lx, Ly, Lz, n_src):
+    """Helps to evenly distribute n_src sources in a cuboid of size Lx * Ly * Lz
+    Parameters
+    ----------
+    Lx, Ly, Lz : floats
+        lengths in the directions x, y, z of the area,
+        the sources should be placed
+    n_src : int
+        demanded number of sources to be included in the model
+    Returns
+    -------
+    nx, ny, nz : ints
+        number of sources in directions x, y, z
+        new n_src = nx * ny * nz may not be equal to the demanded number of
+        sources
+    Lx_n, Ly_n, Lz_n : floats
+        updated lengths in the directions x, y, z
+    ds : float
+        spacing between the sources (grid nodes)
+    """
+    V = Lx * Ly * Lz
+    V_unit = V / n_src
+    L_unit = V_unit**(1. / 3.)
+    nx = np.ceil(Lx / L_unit)
+    ny = np.ceil(Ly / L_unit)
+    nz = np.ceil(Lz / L_unit)
+    ds = Lx / (nx - 1)
+    Lx_n = (nx - 1) * ds
+    Ly_n = (ny - 1) * ds
+    Lz_n = (nz - 1) * ds
+    return (nx, ny, nz, Lx_n, Ly_n, Lz_n, ds)
+
+
+def generate_electrodes(dim, xlims=[0.1, 0.9], ylims=[0.1, 0.9],
+                        zlims=[0.1, 0.9], res=5):
+    """Generates electrodes, helpful for FWD funtion.
+        Parameters
+        ----------
+        dim : int
+            Dimensionality of the electrodes, 1,2 or 3
+        xlims : [start, end]
+            Spatial limits of the electrodes
+        ylims : [start, end]
+            Spatial limits of the electrodes
+        zlims : [start, end]
+            Spatial limits of the electrodes
+        res : int
+            How many electrodes in each dimension
+        Returns
+        -------
+        ele_x, ele_y, ele_z : flattened np.array of the electrode pos
+
+    """
+    if dim == 1:
+        ele_x = np.mgrid[xlims[0]: xlims[1]: np.complex(0, res)]
+        ele_x = ele_x.flatten()
+        return ele_x
+    elif dim == 2:
+        ele_x, ele_y = np.mgrid[xlims[0]: xlims[1]: np.complex(0, res),
+                                ylims[0]: ylims[1]: np.complex(0, res)]
+        ele_x = ele_x.flatten()
+        ele_y = ele_y.flatten()
+        return ele_x, ele_y
+    elif dim == 3:
+        ele_x, ele_y, ele_z = np.mgrid[xlims[0]: xlims[1]: np.complex(0, res),
+                                       ylims[0]: ylims[1]: np.complex(0, res),
+                                       zlims[0]: zlims[1]: np.complex(0, res)]
+        ele_x = ele_x.flatten()
+        ele_y = ele_y.flatten()
+        ele_z = ele_z.flatten()
+        return ele_x, ele_y, ele_z
+
+
+def gauss_1d_dipole(x):
+    """1D Gaussian dipole source is placed between 0 and 1
+       to be used to test the CSD
+
+       Parameters
+       ----------
+       x : np.array
+           Spatial pts. at which the true csd is evaluated
+
+       Returns
+       -------
+       f : np.array
+           The value of the csd at the requested points
+    """
+    src = 0.5*exp(-((x-0.7)**2)/(2.*0.3))*(2*np.pi*0.3)**-0.5
+    snk = -0.5*exp(-((x-0.3)**2)/(2.*0.3))*(2*np.pi*0.3)**-0.5
+    f = src+snk
+    return f
+
+def large_source_2D(x, y):
+    """2D Gaussian large source profile - to use to test csd
+       Parameters
+       ----------
+       x : np.array
+           Spatial x pts. at which the true csd is evaluated
+       y : np.array
+           Spatial y pts. at which the true csd is evaluated
+       Returns
+       -------
+       f : np.array
+           The value of the csd at the requested points
+    """
+    zz = [0.4, -0.3, -0.1, 0.6]
+    zs = [0.2, 0.3, 0.4, 0.2]
+    f1 = 0.5965*exp( (-1*(x-0.1350)**2 - (y-0.8628)**2) /0.4464)* exp(-(-zz[0])**2 / zs[0]) /exp(-(zz[0])**2/zs[0])
+    f2 = -0.9269*exp( (-2*(x-0.1848)**2 - (y-0.0897)**2) /0.2046)* exp(-(-zz[1])**2 / zs[1]) /exp(-(zz[1])**2/zs[1]);
+    f3 = 0.5910*exp( (-3*(x-1.3189)**2 - (y-0.3522)**2) /0.2129)* exp(-(-zz[2])**2 / zs[2]) /exp(-(zz[2])**2/zs[2]);
+    f4 = -0.1963*exp( (-4*(x-1.3386)**2 - (y-0.5297)**2) /0.2507)* exp(-(-zz[3])**2 / zs[3]) /exp(-(zz[3])**2/zs[3]);
+    f = f1+f2+f3+f4
+    return f
+
+def small_source_2D(x, y):
+    """2D Gaussian small source profile - to be used to test csd
+       Parameters
+       ----------
+       x : np.array
+           Spatial x pts. at which the true csd is evaluated
+       y : np.array
+           Spatial y pts. at which the true csd is evaluated
+       Returns
+       -------
+       f : np.array
+           The value of the csd at the requested points
+    """
+    def gauss2d(x,y,p):
+        rcen_x = p[0] * np.cos(p[5]) - p[1] * np.sin(p[5])
+        rcen_y = p[0] * np.sin(p[5]) + p[1] * np.cos(p[5])
+        xp = x * np.cos(p[5]) - y * np.sin(p[5])
+        yp = x * np.sin(p[5]) + y * np.cos(p[5])
+
+        g = p[4]*exp(-(((rcen_x-xp)/p[2])**2+
+                          ((rcen_y-yp)/p[3])**2)/2.)
+        return g
+    f1 = gauss2d(x,y,[0.3,0.7,0.038,0.058,0.5,0.])
+    f2 = gauss2d(x,y,[0.3,0.6,0.038,0.058,-0.5,0.])
+    f3 = gauss2d(x,y,[0.45,0.7,0.038,0.058,0.5,0.])
+    f4 = gauss2d(x,y,[0.45,0.6,0.038,0.058,-0.5,0.])
+    f = f1+f2+f3+f4
+    return f
+
+def gauss_3d_dipole(x, y, z):
+    """3D Gaussian dipole profile - to be used to test csd.
+       Parameters
+       ----------
+       x : np.array
+           Spatial x pts. at which the true csd is evaluated
+       y : np.array
+           Spatial y pts. at which the true csd is evaluated
+       z : np.array
+           Spatial z pts. at which the true csd is evaluated
+       Returns
+       -------
+       f : np.array
+           The value of the csd at the requested points
+    """
+    x0, y0, z0 = 0.3, 0.7, 0.3
+    x1, y1, z1 = 0.6, 0.5, 0.7
+    sig_2 = 0.023
+    A = (2*np.pi*sig_2)**-1
+    f1 = A*exp( (-(x-x0)**2 -(y-y0)**2 -(z-z0)**2) / (2*sig_2) )
+    f2 = -1*A*exp( (-(x-x1)**2 -(y-y1)**2 -(z-z1)**2) / (2*sig_2) )
+    f = f1+f2
+    return f

+ 525 - 0
code/elephant/elephant/kernels.py

@@ -0,0 +1,525 @@
+# -*- coding: utf-8 -*-
+"""
+Definition of a hierarchy of classes for kernel functions to be used
+in convolution, e.g., for data smoothing (low pass filtering) or
+firing rate estimation.
+
+Examples of usage:
+    >>> kernel1 = kernels.GaussianKernel(sigma=100*ms)
+    >>> kernel2 = kernels.ExponentialKernel(sigma=8*mm, invert=True)
+    
+:copyright: Copyright 2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import quantities as pq
+import numpy as np
+import scipy.special
+
+
+def inherit_docstring(fromfunc, sep=""):
+    """
+    Decorator: Copy the docstring of `fromfunc`
+
+    based on:
+    http://stackoverflow.com/questions/13741998/
+    is-there-a-way-to-let-classes-inherit-the-documentation-of-their-superclass-with
+    """
+    def _decorator(func):
+        parent_doc = fromfunc.__doc__
+        if func.__doc__ is None:
+            func.__doc__ = parent_doc
+        else:
+            func.__doc__ = sep.join([parent_doc, func.__doc__])
+        return func
+    return _decorator
+
+
+class Kernel(object):
+    """
+    This is the base class for commonly used kernels.
+
+    General definition of kernel:
+    A function :math:`K(x, y)` is called a kernel function if
+    :math:`\\int K(x, y) g(x) g(y)\\ \\textrm{d}x\\ \\textrm{d}y
+    \\ \\geq 0\\ \\ \\ \\forall\\ g \\in L_2`
+
+    Currently implemented kernels are:
+        - rectangular
+        - triangular
+        - epanechnikovlike
+        - gaussian
+        - laplacian
+        - exponential (asymmetric)
+        - alpha function (asymmetric)
+
+    In neuroscience a popular application of kernels is in performing smoothing
+    operations via convolution. In this case, the kernel has the properties of
+    a probability density, i.e., it is positive and normalized to one. Popular
+    choices are the rectangular or Gaussian kernels.
+
+    Exponential and alpha kernels may also be used to represent the postynaptic
+    current / potentials in a linear (current-based) model.
+
+    Parameters
+    ----------
+    sigma : Quantity scalar
+        Standard deviation of the kernel.
+    invert: bool, optional
+        If true, asymmetric kernels (e.g., exponential
+        or alpha kernels) are inverted along the time axis.
+        Default: False
+    """
+
+    def __init__(self, sigma, invert=False):
+
+        if not (isinstance(sigma, pq.Quantity)):
+            raise TypeError("sigma must be a quantity!")
+
+        if sigma.magnitude < 0:
+            raise ValueError("sigma cannot be negative!")
+
+        if not isinstance(invert, bool):
+            raise ValueError("invert must be bool!")
+
+        self.sigma = sigma
+        self.invert = invert
+
+    def __call__(self, t):
+        """
+        Evaluates the kernel at all points in the array `t`.
+
+        Parameter
+        ---------
+        t : Quantity 1D
+            Interval on which the kernel is evaluated, not necessarily
+            a time interval.
+
+        Returns
+        -------
+            Quantity 1D
+            The result of the kernel evaluations.
+        """
+        if not (isinstance(t, pq.Quantity)):
+            raise TypeError("The argument of the kernel callable must be "
+                            "of type quantity!")
+
+        if t.dimensionality.simplified != self.sigma.dimensionality.simplified:
+            raise TypeError("The dimensionality of sigma and the input array "
+                            "to the callable kernel object must be the same. "
+                            "Otherwise a normalization to 1 of the kernel "
+                            "cannot be performed.")
+
+        self._sigma_scaled = self.sigma.rescale(t.units)
+        # A hidden variable _sigma_scaled is introduced here in order to avoid
+        # accumulation of floating point errors of sigma upon multiple
+        # usages of the __call__ - function for the same Kernel instance.
+
+        return self._evaluate(t)
+
+    def _evaluate(self, t):
+        """
+        Evaluates the kernel.
+
+        Parameter
+        ---------
+        t : Quantity 1D
+            Interval on which the kernel is evaluated, not necessarily
+            a time interval.
+
+        Returns
+        -------
+            Quantity 1D
+            The result of the kernel evaluation.
+        """
+        raise NotImplementedError("The Kernel class should not be used directly, "
+                                  "instead the subclasses for the single kernels.")
+
+    def boundary_enclosing_area_fraction(self, fraction):
+        """
+        Calculates the boundary :math:`b` so that the integral from
+        :math:`-b` to :math:`b` encloses a certain fraction of the
+        integral over the complete kernel. By definition the returned value
+        of the method boundary_enclosing_area_fraction is hence non-negative,
+        even if the whole probability mass of the kernel is concentrated over
+        negative support for inverted kernels.
+
+        Parameter
+        ---------
+        fraction : float
+            Fraction of the whole area which has to be enclosed.
+
+        Returns
+        -------
+            Quantity scalar
+            Boundary of the kernel containing area `fraction` under the
+            kernel density.
+        """
+        self._check_fraction(fraction)
+        sigma_division = 500            # arbitrary choice
+        interval = self.sigma / sigma_division
+        self._sigma_scaled = self.sigma
+        area = 0
+        counter = 0
+        while area < fraction:
+            area += (self._evaluate((counter + 1) * interval) +
+                     self._evaluate(counter * interval)) * interval / 2
+            area += (self._evaluate(-1 * (counter + 1) * interval) +
+                     self._evaluate(-1 * counter * interval)) * interval / 2
+            counter += 1
+            if(counter > 250000):
+                raise ValueError("fraction was chosen too close to one such "
+                                 "that in combination with integral "
+                                 "approximation errors the calculation of a "
+                                 "boundary was not possible.")
+        return counter * interval
+
+    def _check_fraction(self, fraction):
+        """
+        Checks the input variable of the method boundary_enclosing_area_fraction
+        for validity of type and value.
+
+        Parameter
+        ---------
+        fraction : float or int
+            Fraction of the area under the kernel function.
+        """
+        if not isinstance(fraction, (float, int)):
+            raise TypeError("`fraction` must be float or integer!")
+        if not 0 <= fraction < 1:
+            raise ValueError("`fraction` must be in the interval [0, 1)!")
+
+    def median_index(self, t):
+        """
+        Estimates the index of the Median of the kernel.
+        This parameter is not mandatory for symmetrical kernels but it is
+        required when asymmetrical kernels have to be aligned at their median.
+
+        Parameter
+        ---------
+        t : Quantity 1D
+            Interval on which the kernel is evaluated,
+
+        Returns
+        -------
+            int
+            Index of the estimated value of the kernel median.
+
+        Remarks
+        -------
+        The formula in this method using retrieval of the sampling interval
+        from t only works for t with equidistant intervals!
+        The formula calculates the Median slightly wrong by the potentially
+        ignored probability in the distribution corresponding to lower values
+        than the minimum in the array t.
+        """
+        return np.nonzero(self(t).cumsum() *
+                          (t[len(t) - 1] - t[0]) / (len(t) - 1) >= 0.5)[0].min()
+
+    def is_symmetric(self):
+        """
+        In the case of symmetric kernels, this method is overwritten in the
+        class SymmetricKernel, where it returns 'True', hence leaving the
+        here returned value 'False' for the asymmetric kernels.
+        """
+        return False
+
+
+class SymmetricKernel(Kernel):
+    """
+    Base class for symmetric kernels.
+
+    Derived from:
+    """
+    __doc__ += Kernel.__doc__
+
+    def is_symmetric(self):
+        return True
+
+
+class RectangularKernel(SymmetricKernel):
+    """
+    Class for rectangular kernels
+
+    .. math::
+        K(t) = \\left\\{\\begin{array}{ll} \\frac{1}{2 \\tau}, & |t| < \\tau \\\\
+        0, & |t| \\geq \\tau \\end{array} \\right.
+
+    with :math:`\\tau = \\sqrt{3} \\sigma` corresponding to the half width
+    of the kernel.
+
+    Besides the standard deviation `sigma`, for consistency of interfaces the
+    parameter `invert` needed for asymmetric kernels also exists without
+    having any effect in the case of symmetric kernels.
+
+    Derived from:
+    """
+    __doc__ += SymmetricKernel.__doc__
+
+    @property
+    def min_cutoff(self):
+        min_cutoff = np.sqrt(3.0)
+        return min_cutoff
+
+    @inherit_docstring(Kernel._evaluate)
+    def _evaluate(self, t):
+        return (0.5 / (np.sqrt(3.0) * self._sigma_scaled)) * \
+               (np.absolute(t) < np.sqrt(3.0) * self._sigma_scaled)
+
+    @inherit_docstring(Kernel.boundary_enclosing_area_fraction)
+    def boundary_enclosing_area_fraction(self, fraction):
+        self._check_fraction(fraction)
+        return np.sqrt(3.0) * self.sigma * fraction
+
+
+class TriangularKernel(SymmetricKernel):
+    """
+    Class for triangular kernels
+
+    .. math::
+        K(t) = \\left\\{ \\begin{array}{ll} \\frac{1}{\\tau} (1
+        - \\frac{|t|}{\\tau}), & |t| < \\tau \\\\
+         0, & |t| \\geq \\tau \\end{array} \\right.
+
+    with :math:`\\tau = \\sqrt{6} \\sigma` corresponding to the half width of 
+    the kernel.
+
+    Besides the standard deviation `sigma`, for consistency of interfaces the
+    parameter `invert` needed for asymmetric kernels also exists without
+    having any effect in the case of symmetric kernels.
+
+    Derived from:
+    """
+    __doc__ += SymmetricKernel.__doc__
+
+    @property
+    def min_cutoff(self):
+        min_cutoff = np.sqrt(6.0)
+        return min_cutoff
+
+    @inherit_docstring(Kernel._evaluate)
+    def _evaluate(self, t):
+        return (1.0 / (np.sqrt(6.0) * self._sigma_scaled)) * np.maximum(
+            0.0,
+            (1.0 - (np.absolute(t) /
+                    (np.sqrt(6.0) * self._sigma_scaled)).magnitude))
+
+    @inherit_docstring(Kernel.boundary_enclosing_area_fraction)
+    def boundary_enclosing_area_fraction(self, fraction):
+        self._check_fraction(fraction)
+        return np.sqrt(6.0) * self.sigma * (1 - np.sqrt(1 - fraction))
+
+
+class EpanechnikovLikeKernel(SymmetricKernel):
+    """
+    Class for epanechnikov-like kernels
+
+    .. math::
+        K(t) = \\left\\{\\begin{array}{ll} (3 /(4 d)) (1 - (t / d)^2),
+        & |t| < d \\\\
+        0, & |t| \\geq d \\end{array} \\right.
+
+    with :math:`d = \\sqrt{5} \\sigma` being the half width of the kernel.
+
+    The Epanechnikov kernel under full consideration of its axioms has a half
+    width of :math:`\\sqrt{5}`. Ignoring one axiom also the respective kernel
+    with half width = 1 can be called Epanechnikov kernel.
+    ( https://de.wikipedia.org/wiki/Epanechnikov-Kern )
+    However, arbitrary width of this type of kernel is here preferred to be
+    called 'Epanechnikov-like' kernel.
+
+    Besides the standard deviation `sigma`, for consistency of interfaces the
+    parameter `invert` needed for asymmetric kernels also exists without
+    having any effect in the case of symmetric kernels.
+
+    Derived from:
+    """
+    __doc__ += SymmetricKernel.__doc__
+
+    @property
+    def min_cutoff(self):
+        min_cutoff = np.sqrt(5.0)
+        return min_cutoff
+
+    @inherit_docstring(Kernel._evaluate)
+    def _evaluate(self, t):
+        return (3.0 / (4.0 * np.sqrt(5.0) * self._sigma_scaled)) * np.maximum(
+            0.0,
+            1 - (t / (np.sqrt(5.0) * self._sigma_scaled)).magnitude ** 2)
+
+    @inherit_docstring(Kernel.boundary_enclosing_area_fraction)
+    def boundary_enclosing_area_fraction(self, fraction):
+        """
+        For Epanechnikov-like kernels, integration of its density within
+        the boundaries 0 and :math:`b`, and then solving for :math:`b` leads
+        to the problem of finding the roots of a polynomial of third order.
+        The implemented formulas are based on the solution of this problem
+        given in https://en.wikipedia.org/wiki/Cubic_function,
+        where the following 3 solutions are given:
+            - :math:`u_1 = 1`: Solution on negative side
+            - :math:`u_2 = \\frac{-1 + i\\sqrt{3}}{2}`: Solution for larger
+              values than zero crossing of the density
+            - :math:`u_3 = \\frac{-1 - i\\sqrt{3}}{2}`: Solution for smaller
+              values than zero crossing of the density
+        The solution :math:`u_3` is the relevant one for the problem at hand,
+        since it involves only positive area contributions.
+        """
+        self._check_fraction(fraction)
+        # Python's complex-operator cannot handle quantities, hence the
+        # following construction on quantities is necessary:
+        Delta_0 = complex(1.0 / (5.0 * self.sigma.magnitude**2), 0) / \
+                  self.sigma.units**2
+        Delta_1 = complex(2.0 * np.sqrt(5.0) * fraction /
+                          (25.0 * self.sigma.magnitude**3), 0) / \
+                  self.sigma.units**3
+        C = ((Delta_1 + (Delta_1**2.0 - 4.0 * Delta_0**3.0)**(1.0 / 2.0)) /
+             2.0)**(1.0 / 3.0)
+        u_3 = complex(-1.0 / 2.0, -np.sqrt(3.0) / 2.0)
+        b = -5.0 * self.sigma**2 * (u_3 * C + Delta_0 / (u_3 * C))
+        return b.real
+
+
+class GaussianKernel(SymmetricKernel):
+    """
+    Class for gaussian kernels
+
+    .. math::
+        K(t) = (\\frac{1}{\\sigma \\sqrt{2 \\pi}})
+        \\exp(-\\frac{t^2}{2 \\sigma^2})
+
+    with :math:`\\sigma` being the standard deviation.
+
+    Besides the standard deviation `sigma`, for consistency of interfaces the
+    parameter `invert` needed for asymmetric kernels also exists without
+    having any effect in the case of symmetric kernels.
+
+    Derived from:
+    """
+    __doc__ += SymmetricKernel.__doc__
+
+    @property
+    def min_cutoff(self):
+        min_cutoff = 3.0
+        return min_cutoff
+
+    @inherit_docstring(Kernel._evaluate)
+    def _evaluate(self, t):
+        return (1.0 / (np.sqrt(2.0 * np.pi) * self._sigma_scaled)) * np.exp(
+            -0.5 * (t / self._sigma_scaled).magnitude ** 2)
+
+    @inherit_docstring(Kernel.boundary_enclosing_area_fraction)
+    def boundary_enclosing_area_fraction(self, fraction):
+        self._check_fraction(fraction)
+        return self.sigma * np.sqrt(2.0) * scipy.special.erfinv(fraction)
+
+
+class LaplacianKernel(SymmetricKernel):
+    """
+    Class for laplacian kernels
+
+    .. math::
+        K(t) = \\frac{1}{2 \\tau} \\exp(-|\\frac{t}{\\tau}|)
+
+    with :math:`\\tau = \\sigma / \\sqrt{2}`.
+
+    Besides the standard deviation `sigma`, for consistency of interfaces the
+    parameter `invert` needed for asymmetric kernels also exists without
+    having any effect in the case of symmetric kernels.
+
+    Derived from:
+    """
+    __doc__ += SymmetricKernel.__doc__
+
+    @property
+    def min_cutoff(self):
+        min_cutoff = 3.0
+        return min_cutoff
+
+    @inherit_docstring(Kernel._evaluate)
+    def _evaluate(self, t):
+        return (1 / (np.sqrt(2.0) * self._sigma_scaled)) * np.exp(
+            -(np.absolute(t) * np.sqrt(2.0) / self._sigma_scaled).magnitude)
+
+    @inherit_docstring(Kernel.boundary_enclosing_area_fraction)
+    def boundary_enclosing_area_fraction(self, fraction):
+        self._check_fraction(fraction)
+        return -self.sigma * np.log(1.0 - fraction) / np.sqrt(2.0)
+
+
+# Potential further symmetric kernels from Wiki Kernels (statistics):
+# Quartic (biweight), Triweight, Tricube, Cosine, Logistics, Silverman
+
+
+class ExponentialKernel(Kernel):
+    """
+    Class for exponential kernels
+
+    .. math::
+        K(t) = \\left\\{\\begin{array}{ll} (1 / \\tau) \\exp{(-t / \\tau)},
+        & t > 0 \\\\
+        0, & t \\leq 0 \\end{array} \\right.
+
+    with :math:`\\tau = \\sigma`.
+
+    Derived from:
+    """
+    __doc__ += Kernel.__doc__
+
+    @property
+    def min_cutoff(self):
+        min_cutoff = 3.0
+        return min_cutoff
+
+    @inherit_docstring(Kernel._evaluate)
+    def _evaluate(self, t):
+        if not self.invert:
+            kernel = (t >= 0) * (1. / self._sigma_scaled.magnitude) *\
+                np.exp((-t / self._sigma_scaled).magnitude) / t.units
+        elif self.invert:
+            kernel = (t <= 0) * (1. / self._sigma_scaled.magnitude) *\
+                np.exp((t / self._sigma_scaled).magnitude) / t.units
+        return kernel
+
+    @inherit_docstring(Kernel.boundary_enclosing_area_fraction)
+    def boundary_enclosing_area_fraction(self, fraction):
+        self._check_fraction(fraction)
+        return -self.sigma * np.log(1.0 - fraction)
+
+
+class AlphaKernel(Kernel):
+    """
+    Class for alpha kernels
+
+    .. math::
+        K(t) = \\left\\{\\begin{array}{ll} (1 / \\tau^2)
+        \\ t\\ \\exp{(-t / \\tau)}, & t > 0 \\\\
+        0, & t \\leq 0 \\end{array} \\right.
+
+    with :math:`\\tau = \\sigma / \\sqrt{2}`.
+
+    For the alpha kernel an analytical expression for the boundary of the
+    integral as a function of the area under the alpha kernel function
+    cannot be given. Hence in this case the value of the boundary is
+    determined by kernel-approximating numerical integration, inherited
+    from the Kernel class.
+
+    Derived from:
+    """
+    __doc__ += Kernel.__doc__
+
+    @property
+    def min_cutoff(self):
+        min_cutoff = 3.0
+        return min_cutoff
+
+    @inherit_docstring(Kernel._evaluate)
+    def _evaluate(self, t):
+        if not self.invert:
+            kernel = (t >= 0) * 2. * (t / self._sigma_scaled**2).magnitude *\
+                np.exp((
+                    -t * np.sqrt(2.) / self._sigma_scaled).magnitude) / t.units
+        elif self.invert:
+            kernel = (t <= 0) * -2. * (t / self._sigma_scaled**2).magnitude *\
+                np.exp((
+                    t * np.sqrt(2.) / self._sigma_scaled).magnitude) / t.units
+        return kernel

+ 199 - 0
code/elephant/elephant/neo_tools.py

@@ -0,0 +1,199 @@
+# -*- coding: utf-8 -*-
+"""
+Tools to manipulate Neo objects.
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+from __future__ import division, print_function
+
+from itertools import chain
+
+from neo.core.container import unique_objs
+
+
+def extract_neo_attrs(obj, parents=True, child_first=True,
+                      skip_array=False, skip_none=False):
+    """Given a neo object, return a dictionary of attributes and annotations.
+
+    Parameters
+    ----------
+
+    obj : neo object
+    parents : bool, optional
+              Also include attributes and annotations from parent neo
+              objects (if any).
+    child_first : bool, optional
+                  If True (default True), values of child attributes are used
+                  over parent attributes in the event of a name conflict.
+                  If False, parent attributes are used.
+                  This parameter does nothing if `parents` is False.
+    skip_array : bool, optional
+                 If True (default False), skip attributes that store non-scalar
+                 array values.
+    skip_none : bool, optional
+                If True (default False), skip annotations and attributes that
+                have a value of `None`.
+
+    Returns
+    -------
+
+    dict
+        A dictionary where the keys are annotations or attribute names and
+        the values are the corresponding annotation or attribute value.
+
+    """
+    attrs = obj.annotations.copy()
+    for attr in obj._necessary_attrs + obj._recommended_attrs:
+        if skip_array and len(attr) >= 3 and attr[2]:
+            continue
+        attr = attr[0]
+        if attr == getattr(obj, '_quantity_attr', None):
+            continue
+        attrs[attr] = getattr(obj, attr, None)
+
+    if skip_none:
+        for attr, value in attrs.copy().items():
+            if value is None:
+                del attrs[attr]
+
+    if not parents:
+        return attrs
+
+    for parent in getattr(obj, 'parents', []):
+        if parent is None:
+            continue
+        newattr = extract_neo_attrs(parent, parents=True,
+                                    child_first=child_first,
+                                    skip_array=skip_array,
+                                    skip_none=skip_none)
+        if child_first:
+            newattr.update(attrs)
+            attrs = newattr
+        else:
+            attrs.update(newattr)
+
+    return attrs
+
+
+def _get_all_objs(container, classname):
+    """Get all `neo` objects of a given type from a container.
+
+    The objects can be any list, dict, or other iterable or mapping containing
+    neo objects of a particular class, as well as any neo object that can hold
+    the object.
+    Objects are searched recursively, so the objects can be nested (such as a
+    list of blocks).
+
+    Parameters
+    ----------
+
+    container : list, tuple, iterable, dict, neo container
+                The container for the neo objects.
+    classname : str
+                The name of the class, with proper capitalization
+                (so `SpikeTrain`, not `Spiketrain` or `spiketrain`)
+
+    Returns
+    -------
+
+    list
+        A list of unique `neo` objects
+
+    """
+    if container.__class__.__name__ == classname:
+        return [container]
+    classholder = classname.lower() + 's'
+    if hasattr(container, classholder):
+        vals = getattr(container, classholder)
+    elif hasattr(container, 'list_children_by_class'):
+        vals = container.list_children_by_class(classname)
+    elif hasattr(container, 'values') and not hasattr(container, 'ndim'):
+        vals = container.values()
+    elif hasattr(container, '__iter__') and not hasattr(container, 'ndim'):
+        vals = container
+    else:
+        raise ValueError('Cannot handle object of type %s' % type(container))
+    res = list(chain.from_iterable(_get_all_objs(obj, classname)
+                                   for obj in vals))
+    return unique_objs(res)
+
+
+def get_all_spiketrains(container):
+    """Get all `neo.Spiketrain` objects from a container.
+
+    The objects can be any list, dict, or other iterable or mapping containing
+    spiketrains, as well as any neo object that can hold spiketrains:
+    `neo.Block`, `neo.ChannelIndex`, `neo.Unit`, and `neo.Segment`.
+
+    Containers are searched recursively, so the objects can be nested
+    (such as a list of blocks).
+
+    Parameters
+    ----------
+
+    container : list, tuple, iterable, dict,
+                neo Block, neo Segment, neo Unit, neo ChannelIndex
+                The container for the spiketrains.
+
+    Returns
+    -------
+
+    list
+        A list of the unique `neo.SpikeTrain` objects in `container`.
+
+    """
+    return _get_all_objs(container, 'SpikeTrain')
+
+
+def get_all_events(container):
+    """Get all `neo.Event` objects from a container.
+
+    The objects can be any list, dict, or other iterable or mapping containing
+    events, as well as any neo object that can hold events:
+    `neo.Block` and `neo.Segment`.
+
+    Containers are searched recursively, so the objects can be nested
+    (such as a list of blocks).
+
+    Parameters
+    ----------
+
+    container : list, tuple, iterable, dict, neo Block, neo Segment
+                The container for the events.
+
+    Returns
+    -------
+
+    list
+        A list of the unique `neo.Event` objects in `container`.
+
+    """
+    return _get_all_objs(container, 'Event')
+
+
+def get_all_epochs(container):
+    """Get all `neo.Epoch` objects from a container.
+
+    The objects can be any list, dict, or other iterable or mapping containing
+    epochs, as well as any neo object that can hold epochs:
+    `neo.Block` and `neo.Segment`.
+
+    Containers are searched recursively, so the objects can be nested
+    (such as a list of blocks).
+
+    Parameters
+    ----------
+
+    container : list, tuple, iterable, dict, neo Block, neo Segment
+                The container for the epochs.
+
+    Returns
+    -------
+
+    list
+        A list of the unique `neo.Epoch` objects in `container`.
+
+    """
+    return _get_all_objs(container, 'Epoch')

+ 612 - 0
code/elephant/elephant/pandas_bridge.py

@@ -0,0 +1,612 @@
+# -*- coding: utf-8 -*-
+"""
+Bridge to the pandas library.
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+from __future__ import division, print_function, unicode_literals
+
+import numpy as np
+import pandas as pd
+import quantities as pq
+
+from elephant.neo_tools import (extract_neo_attrs, get_all_epochs,
+                                get_all_events, get_all_spiketrains)
+
+
+def _multiindex_from_dict(inds):
+    """Given a dictionary, return a `pandas.MultiIndex`.
+
+    Parameters
+    ----------
+    inds : dict
+           A dictionary where the keys are annotations or attribute names and
+           the values are the corresponding annotation or attribute value.
+
+    Returns
+    -------
+    pandas MultiIndex
+    """
+    names, indexes = zip(*sorted(inds.items()))
+    return pd.MultiIndex.from_tuples([indexes], names=names)
+
+
+def _sort_inds(obj, axis=0):
+    """Put the indexes and index levels of a pandas object in sorted order.
+
+    Paramters
+    ---------
+    obj : pandas Series, DataFrame, Panel, or Panel4D
+          The object whose indexes should be sorted.
+    axis : int, list, optional, 'all'
+           The axis whose indexes should be sorted.  Default is 0.
+           Can also be a list of indexes, in which case all of those axes
+           are sorted.  If 'all', sort all indexes.
+
+    Returns
+    -------
+    pandas Series, DataFrame, Panel, or Panel4D
+        A copy of the object with indexes sorted.
+        Indexes are sorted in-place.
+    """
+    if axis == 'all':
+        return _sort_inds(obj, axis=range(obj.ndim))
+
+    if hasattr(axis, '__iter__'):
+        for iax in axis:
+            obj = _sort_inds(obj, iax)
+        return obj
+
+    obj = obj.reorder_levels(sorted(obj.axes[axis].names), axis=axis)
+    return obj.sortlevel(0, axis=axis, sort_remaining=True)
+
+
+def _extract_neo_attrs_safe(obj, parents=True, child_first=True):
+    """Given a neo object, return a dictionary of attributes and annotations.
+
+    This is done in a manner that is safe for `pandas` indexes.
+
+    Parameters
+    ----------
+
+    obj : neo object
+    parents : bool, optional
+              Also include attributes and annotations from parent neo
+              objects (if any).
+    child_first : bool, optional
+                  If True (default True), values of child attributes are used
+                  over parent attributes in the event of a name conflict.
+                  If False, parent attributes are used.
+                  This parameter does nothing if `parents` is False.
+
+    Returns
+    -------
+
+    dict
+        A dictionary where the keys are annotations or attribute names and
+        the values are the corresponding annotation or attribute value.
+
+    """
+    res = extract_neo_attrs(obj, skip_array=True, skip_none=True,
+                            parents=parents, child_first=child_first)
+    for key, value in res.items():
+        res[key] = _convert_value_safe(value)
+        key2 = _convert_value_safe(key)
+        if key2 is not key:
+            res[key2] = res.pop(key)
+
+    return res
+
+
+def _convert_value_safe(value):
+    """Convert `neo` values to a value compatible with `pandas`.
+
+    Some types and dtypes used with neo are not safe to use with pandas in some
+    or all situations.
+
+    `quantities.Quantity` don't follow the normal python rule that values
+    with that are equal should have the same hash, making it fundamentally
+    incompatible with `pandas`.
+
+    On python 3, `pandas` coerces `S` dtypes to bytes, which are not always
+    safe to use.
+
+    Parameters
+    ----------
+
+    value : any
+            Value to convert (if it has any known issues).
+
+    Returns
+    -------
+
+    any
+        `value` or a version of value with potential problems fixed.
+
+    """
+    if hasattr(value, 'dimensionality'):
+        return (value.magnitude.tolist(), str(value.dimensionality))
+    if hasattr(value, 'dtype') and value.dtype.kind == 'S':
+        return value.astype('U').tolist()
+    if hasattr(value, 'tolist'):
+        return value.tolist()
+    if hasattr(value, 'decode') and not hasattr(value, 'encode'):
+        return value.decode('UTF8')
+    return value
+
+
+def spiketrain_to_dataframe(spiketrain, parents=True, child_first=True):
+    """Convert a `neo.SpikeTrain` to a `pandas.DataFrame`.
+
+    The `pandas.DataFrame` object has a single column, with each element
+    being the spike time converted to a `float` value in seconds.
+
+    The column heading is a `pandas.MultiIndex` with one index
+    for each of the scalar attributes and annotations.  The `index`
+    is the spike number.
+
+    Parameters
+    ----------
+
+    spiketrain : neo SpikeTrain
+                 The SpikeTrain to convert.
+    parents : bool, optional
+              Also include attributes and annotations from parent neo
+              objects (if any).
+
+    Returns
+    -------
+
+    pandas DataFrame
+        A DataFrame containing the spike times from `spiketrain`.
+
+    Notes
+    -----
+
+    The index name is `spike_number`.
+
+    Attributes that contain non-scalar values are skipped.  So are
+    annotations or attributes containing a value of `None`.
+
+    `quantity.Quantities` types are incompatible with `pandas`, so attributes
+    and annotations of that type are converted to a tuple where the first
+    element is the scalar value and the second is the string representation of
+    the units.
+
+    """
+    attrs = _extract_neo_attrs_safe(spiketrain,
+                                    parents=parents, child_first=child_first)
+    columns = _multiindex_from_dict(attrs)
+
+    times = spiketrain.magnitude
+    times = pq.Quantity(times, spiketrain.units).rescale('s').magnitude
+    times = times[np.newaxis].T
+
+    index = pd.Index(np.arange(len(spiketrain)), name='spike_number')
+
+    pdobj = pd.DataFrame(times, index=index, columns=columns)
+    return _sort_inds(pdobj, axis=1)
+
+
+def event_to_dataframe(event, parents=True, child_first=True):
+    """Convert a `neo.core.Event` to a `pandas.DataFrame`.
+
+    The `pandas.DataFrame` object has a single column, with each element
+    being the event label from the `event.label` attribute.
+
+    The column heading is a `pandas.MultiIndex` with one index
+    for each of the scalar attributes and annotations.  The `index`
+    is the time stamp from the `event.times` attribute.
+
+    Parameters
+    ----------
+
+    event : neo Event
+            The Event to convert.
+    parents : bool, optional
+              Also include attributes and annotations from parent neo
+              objects (if any).
+    child_first : bool, optional
+                  If True (default True), values of child attributes are used
+                  over parent attributes in the event of a name conflict.
+                  If False, parent attributes are used.
+                  This parameter does nothing if `parents` is False.
+
+    Returns
+    -------
+
+    pandas DataFrame
+        A DataFrame containing the labels from `event`.
+
+    Notes
+    -----
+
+    If the length of event.times and event.labels are not the same,
+    the longer will be truncated to the length of the shorter.
+
+    The index name is `times`.
+
+    Attributes that contain non-scalar values are skipped.  So are
+    annotations or attributes containing a value of `None`.
+
+    `quantity.Quantities` types are incompatible with `pandas`, so attributes
+    and annotations of that type are converted to a tuple where the first
+    element is the scalar value and the second is the string representation of
+    the units.
+
+    """
+    attrs = _extract_neo_attrs_safe(event,
+                                    parents=parents, child_first=child_first)
+    columns = _multiindex_from_dict(attrs)
+
+    times = event.times.rescale('s').magnitude
+    labels = event.labels.astype('U')
+
+    times = times[:len(labels)]
+    labels = labels[:len(times)]
+
+    index = pd.Index(times, name='times')
+
+    pdobj = pd.DataFrame(labels[np.newaxis].T, index=index, columns=columns)
+    return _sort_inds(pdobj, axis=1)
+
+
+def epoch_to_dataframe(epoch, parents=True, child_first=True):
+    """Convert a `neo.core.Epoch` to a `pandas.DataFrame`.
+
+    The `pandas.DataFrame` object has a single column, with each element
+    being the epoch label from the `epoch.label` attribute.
+
+    The column heading is a `pandas.MultiIndex` with one index
+    for each of the scalar attributes and annotations.  The `index`
+    is a `pandas.MultiIndex`, with the first index being the time stamp from
+    the `epoch.times` attribute and the second being the duration from
+    the `epoch.durations` attribute.
+
+    Parameters
+    ----------
+
+    epoch : neo Epoch
+            The Epoch to convert.
+    parents : bool, optional
+              Also include attributes and annotations from parent neo
+              objects (if any).
+    child_first : bool, optional
+                  If True (default True), values of child attributes are used
+                  over parent attributes in the event of a name conflict.
+                  If False, parent attributes are used.
+                  This parameter does nothing if `parents` is False.
+
+    Returns
+    -------
+
+    pandas DataFrame
+        A DataFrame containing the labels from `epoch`.
+
+    Notes
+    -----
+
+    If the length of `epoch.times`, `epoch.duration`, and `epoch.labels` are
+    not the same, the longer will be truncated to the length of the shortest.
+
+    The index names for `epoch.times` and `epoch.durations` are `times` and
+    `durations`, respectively.
+
+    Attributes that contain non-scalar values are skipped.  So are
+    annotations or attributes containing a value of `None`.
+
+    `quantity.Quantities` types are incompatible with `pandas`, so attributes
+    and annotations of that type are converted to a tuple where the first
+    element is the scalar value and the second is the string representation of
+    the units.
+
+    """
+    attrs = _extract_neo_attrs_safe(epoch,
+                                    parents=parents, child_first=child_first)
+    columns = _multiindex_from_dict(attrs)
+
+    times = epoch.times.rescale('s').magnitude
+    durs = epoch.durations.rescale('s').magnitude
+    labels = epoch.labels.astype('U')
+
+    minlen = min([len(durs), len(times), len(labels)])
+    index = pd.MultiIndex.from_arrays([times[:minlen], durs[:minlen]],
+                                      names=['times', 'durations'])
+
+    pdobj = pd.DataFrame(labels[:minlen][np.newaxis].T,
+                         index=index, columns=columns)
+    return _sort_inds(pdobj, axis='all')
+
+
+def _multi_objs_to_dataframe(container, conv_func, get_func,
+                             parents=True, child_first=True):
+    """Convert one or more of a given `neo` object to a `pandas.DataFrame`.
+
+    The objects can be any list, dict, or other iterable or mapping containing
+    the object, as well as any neo object that can hold the object.
+    Objects are searched recursively, so the objects can be nested (such as a
+    list of blocks).
+
+    The column heading is a `pandas.MultiIndex` with one index
+    for each of the scalar attributes and annotations of the respective
+    object.
+
+    Parameters
+    ----------
+
+    container : list, tuple, iterable, dict, neo container object
+                The container for the objects to convert.
+    parents : bool, optional
+              Also include attributes and annotations from parent neo
+              objects (if any).
+    child_first : bool, optional
+                  If True (default True), values of child attributes are used
+                  over parent attributes in the event of a name conflict.
+                  If False, parent attributes are used.
+                  This parameter does nothing if `parents` is False.
+
+    Returns
+    -------
+
+    pandas DataFrame
+        A DataFrame containing the converted objects.
+
+    Attributes that contain non-scalar values are skipped.  So are
+    annotations or attributes containing a value of `None`.
+
+    `quantity.Quantities` types are incompatible with `pandas`, so attributes
+    and annotations of that type are converted to a tuple where the first
+    element is the scalar value and the second is the string representation of
+    the units.
+
+    """
+    res = pd.concat([conv_func(obj, parents=parents, child_first=child_first)
+                     for obj in get_func(container)], axis=1)
+    return _sort_inds(res, axis=1)
+
+
+def multi_spiketrains_to_dataframe(container,
+                                   parents=True, child_first=True):
+    """Convert one or more `neo.SpikeTrain` objects to a `pandas.DataFrame`.
+
+    The objects can be any list, dict, or other iterable or mapping containing
+    spiketrains, as well as any neo object that can hold spiketrains:
+    `neo.Block`, `neo.ChannelIndex`, `neo.Unit`, and `neo.Segment`.
+    Objects are searched recursively, so the objects can be nested (such as a
+    list of blocks).
+
+    The `pandas.DataFrame` object has one column for each spiketrain, with each
+    element being the spike time converted to a `float` value in seconds.
+    columns are padded to the same length with `NaN` values.
+
+    The column heading is a `pandas.MultiIndex` with one index
+    for each of the scalar attributes and annotations of the respective
+    spiketrain.  The `index` is the spike number.
+
+    Parameters
+    ----------
+
+    container : list, tuple, iterable, dict,
+                neo Block, neo Segment, neo Unit, neo ChannelIndex
+                The container for the spiketrains to convert.
+    parents : bool, optional
+              Also include attributes and annotations from parent neo
+              objects (if any).
+    child_first : bool, optional
+                  If True (default True), values of child attributes are used
+                  over parent attributes in the event of a name conflict.
+                  If False, parent attributes are used.
+                  This parameter does nothing if `parents` is False.
+
+    Returns
+    -------
+
+    pandas DataFrame
+        A DataFrame containing the spike times from `container`.
+
+    Notes
+    -----
+
+    The index name is `spike_number`.
+
+    Attributes that contain non-scalar values are skipped.  So are
+    annotations or attributes containing a value of `None`.
+
+    `quantity.Quantities` types are incompatible with `pandas`, so attributes
+    and annotations of that type are converted to a tuple where the first
+    element is the scalar value and the second is the string representation of
+    the units.
+
+    """
+    return _multi_objs_to_dataframe(container,
+                                    spiketrain_to_dataframe,
+                                    get_all_spiketrains,
+                                    parents=parents, child_first=child_first)
+
+
+def multi_events_to_dataframe(container, parents=True, child_first=True):
+    """Convert one or more `neo.Event` objects to a `pandas.DataFrame`.
+
+    The objects can be any list, dict, or other iterable or mapping containing
+    events, as well as any neo object that can hold events:
+    `neo.Block` and `neo.Segment`.  Objects are searched recursively, so the
+    objects can be nested (such as a list of blocks).
+
+    The `pandas.DataFrame` object has one column for each event, with each
+    element being the event label. columns are padded to the same length with
+    `NaN` values.
+
+    The column heading is a `pandas.MultiIndex` with one index
+    for each of the scalar attributes and annotations of the respective
+    event.  The `index` is the time stamp from the `event.times` attribute.
+
+    Parameters
+    ----------
+
+    container : list, tuple, iterable, dict, neo Block, neo Segment
+                The container for the events to convert.
+    parents : bool, optional
+              Also include attributes and annotations from parent neo
+              objects (if any).
+    child_first : bool, optional
+                  If True (default True), values of child attributes are used
+                  over parent attributes in the event of a name conflict.
+                  If False, parent attributes are used.
+                  This parameter does nothing if `parents` is False.
+
+    Returns
+    -------
+
+    pandas DataFrame
+        A DataFrame containing the labels from `container`.
+
+    Notes
+    -----
+
+    If the length of event.times and event.labels are not the same for any
+    individual event, the longer will be truncated to the length of the
+    shorter for that event.  Between events, lengths can differ.
+
+    The index name is `times`.
+
+    Attributes that contain non-scalar values are skipped.  So are
+    annotations or attributes containing a value of `None`.
+
+    `quantity.Quantities` types are incompatible with `pandas`, so attributes
+    and annotations of that type are converted to a tuple where the first
+    element is the scalar value and the second is the string representation of
+    the units.
+
+    """
+    return _multi_objs_to_dataframe(container,
+                                    event_to_dataframe, get_all_events,
+                                    parents=parents, child_first=child_first)
+
+
+def multi_epochs_to_dataframe(container, parents=True, child_first=True):
+    """Convert one or more `neo.Epoch` objects to a `pandas.DataFrame`.
+
+    The objects can be any list, dict, or other iterable or mapping containing
+    epochs, as well as any neo object that can hold epochs:
+    `neo.Block` and `neo.Segment`.  Objects are searched recursively, so the
+    objects can be nested (such as a list of blocks).
+
+    The `pandas.DataFrame` object has one column for each epoch, with each
+    element being the epoch label. columns are padded to the same length with
+    `NaN` values.
+
+    The column heading is a `pandas.MultiIndex` with one index
+    for each of the scalar attributes and annotations of the respective
+    epoch.  The `index` is a `pandas.MultiIndex`, with the first index being
+    the time stamp from the `epoch.times` attribute and the second being the
+    duration from the `epoch.durations` attribute.
+
+    Parameters
+    ----------
+
+    container : list, tuple, iterable, dict, neo Block, neo Segment
+                The container for the epochs to convert.
+    parents : bool, optional
+              Also include attributes and annotations from parent neo
+              objects (if any).
+    child_first : bool, optional
+                  If True (default True), values of child attributes are used
+                  over parent attributes in the event of a name conflict.
+                  If False, parent attributes are used.
+                  This parameter does nothing if `parents` is False.
+
+    Returns
+    -------
+
+    pandas DataFrame
+        A DataFrame containing the labels from `container`.
+
+    Notes
+    -----
+
+    If the length of `epoch.times`, `epoch.duration`, and `epoch.labels` are
+    not the same for any individual epoch, the longer will be truncated to the
+    length of the shorter for that epoch.  Between epochs, lengths can differ.
+
+    The index level names for `epoch.times` and `epoch.durations` are
+    `times` and `durations`, respectively.
+
+    Attributes that contain non-scalar values are skipped.  So are
+    annotations or attributes containing a value of `None`.
+
+    `quantity.Quantities` types are incompatible with `pandas`, so attributes
+    and annotations of that type are converted to a tuple where the first
+    element is the scalar value and the second is the string representation of
+    the units.
+
+    """
+    return _multi_objs_to_dataframe(container,
+                                    epoch_to_dataframe, get_all_epochs,
+                                    parents=parents, child_first=child_first)
+
+
+def slice_spiketrain(pdobj, t_start=None, t_stop=None):
+    """Slice a `pandas.DataFrame`, changing indices appropriately.
+
+    Values outside the sliced range are converted to `NaN` values.
+
+    Slicing happens over columns.
+
+    This sets the `t_start` and `t_stop` column indexes to be the new values.
+    Otherwise it is the same as setting values outside the range to `NaN`.
+
+    Parameters
+    ----------
+    pdobj : pandas DataFrame
+            The DataFrame to slice.
+    t_start : float, optional.
+              If specified, the returned DataFrame values less than this set
+              to `NaN`.
+              Default is `None` (do not use this argument).
+    t_stop : float, optional.
+             If specified, the returned DataFrame values greater than this set
+             to `NaN`.
+             Default is `None` (do not use this argument).
+
+    Returns
+    -------
+
+    pdobj : scalar, pandas Series, DataFrame, or Panel
+            The returned data type is the same as the type of `pdobj`
+
+    Note
+    ----
+
+    The order of the index and/or column levels of the returned object may
+    differ  from the order of the original.
+
+    If `t_start` or `t_stop` is specified, all columns indexes will be changed
+    to  the respective values, including those already within the new range.
+    If `t_start` or `t_stop` is not specified, those column indexes will not
+    be changed.
+
+    Returns a copy, even if `t_start` and `t_stop` are both `None`.
+
+    """
+    if t_start is None and t_stop is None:
+        return pdobj.copy()
+
+    if t_stop is not None:
+        pdobj[pdobj > t_stop] = np.nan
+
+        pdobj = pdobj.T.reset_index(level='t_stop')
+        pdobj['t_stop'] = t_stop
+        pdobj = pdobj.set_index('t_stop', append=True).T
+        pdobj = _sort_inds(pdobj, axis=1)
+
+    if t_start is not None:
+        pdobj[pdobj < t_start] = np.nan
+
+        pdobj = pdobj.T.reset_index(level='t_start')
+        pdobj['t_start'] = t_start
+        pdobj = pdobj.set_index('t_start', append=True).T
+        pdobj = _sort_inds(pdobj, axis=1)
+
+    return pdobj

+ 334 - 0
code/elephant/elephant/signal_processing.py

@@ -0,0 +1,334 @@
+# -*- coding: utf-8 -*-
+'''
+Basic processing procedures for analog signals (e.g., performing a z-score of a
+signal, or filtering a signal).
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+'''
+
+from __future__ import division, print_function
+import numpy as np
+import scipy.signal
+import quantities as pq
+import neo
+
+
+def zscore(signal, inplace=True):
+    '''
+    Apply a z-score operation to one or several AnalogSignal objects.
+
+    The z-score operation subtracts the mean :math:`\\mu` of the signal, and
+    divides by its standard deviation :math:`\\sigma`:
+
+    .. math::
+         Z(x(t))= \\frac{x(t)-\\mu}{\\sigma}
+
+    If an AnalogSignal containing multiple signals is provided, the
+    z-transform is always calculated for each signal individually.
+
+    If a list of AnalogSignal objects is supplied, the mean and standard
+    deviation are calculated across all objects of the list. Thus, all list
+    elements are z-transformed by the same values of :math:`\\mu` and
+    :math:`\\sigma`. For AnalogSignals, each signal of the array is
+    treated separately across list elements. Therefore, the number of signals
+    must be identical for each AnalogSignal of the list.
+
+    Parameters
+    ----------
+    signal : neo.AnalogSignal or list of neo.AnalogSignal
+        Signals for which to calculate the z-score.
+    inplace : bool
+        If True, the contents of the input signal(s) is replaced by the
+        z-transformed signal. Otherwise, a copy of the original
+        AnalogSignal(s) is returned. Default: True
+
+    Returns
+    -------
+    neo.AnalogSignal or list of neo.AnalogSignal
+        The output format matches the input format: for each supplied
+        AnalogSignal object a corresponding object is returned containing
+        the z-transformed signal with the unit dimensionless.
+
+    Use Case
+    --------
+    You may supply a list of AnalogSignal objects, where each object in
+    the list contains the data of one trial of the experiment, and each signal
+    of the AnalogSignal corresponds to the recordings from one specific
+    electrode in a particular trial. In this scenario, you will z-transform the
+    signal of each electrode separately, but transform all trials of a given
+    electrode in the same way.
+
+    Examples
+    --------
+    >>> a = neo.AnalogSignal(
+    ...       np.array([1, 2, 3, 4, 5, 6]).reshape(-1,1)*mV,
+    ...       t_start=0*s, sampling_rate=1000*Hz)
+
+    >>> b = neo.AnalogSignal(
+    ...       np.transpose([[1, 2, 3, 4, 5, 6], [11, 12, 13, 14, 15, 16]])*mV,
+    ...       t_start=0*s, sampling_rate=1000*Hz)
+
+    >>> c = neo.AnalogSignal(
+    ...       np.transpose([[21, 22, 23, 24, 25, 26], [31, 32, 33, 34, 35, 36]])*mV,
+    ...       t_start=0*s, sampling_rate=1000*Hz)
+
+    >>> print zscore(a)
+    [[-1.46385011]
+     [-0.87831007]
+     [-0.29277002]
+     [ 0.29277002]
+     [ 0.87831007]
+     [ 1.46385011]] dimensionless
+
+    >>> print zscore(b)
+    [[-1.46385011 -1.46385011]
+     [-0.87831007 -0.87831007]
+     [-0.29277002 -0.29277002]
+     [ 0.29277002  0.29277002]
+     [ 0.87831007  0.87831007]
+     [ 1.46385011  1.46385011]] dimensionless
+
+    >>> print zscore([b,c])
+    [<AnalogSignal(array([[-1.11669108, -1.08361877],
+       [-1.0672076 , -1.04878252],
+       [-1.01772411, -1.01394628],
+       [-0.96824063, -0.97911003],
+       [-0.91875714, -0.94427378],
+       [-0.86927366, -0.90943753]]) * dimensionless, [0.0 s, 0.006 s],
+       sampling rate: 1000.0 Hz)>,
+       <AnalogSignal(array([[ 0.78170952,  0.84779261],
+       [ 0.86621866,  0.90728682],
+       [ 0.9507278 ,  0.96678104],
+       [ 1.03523694,  1.02627526],
+       [ 1.11974608,  1.08576948],
+       [ 1.20425521,  1.1452637 ]]) * dimensionless, [0.0 s, 0.006 s],
+       sampling rate: 1000.0 Hz)>]
+    '''
+    # Transform input to a list
+    if type(signal) is not list:
+        signal = [signal]
+
+    # Calculate mean and standard deviation
+    m = np.mean(np.concatenate(signal), axis=0)
+    s = np.std(np.concatenate(signal), axis=0)
+
+    if not inplace:
+        # Create new signal instance
+        result = []
+        for sig in signal:
+            sig_dimless = sig.duplicate_with_new_array(
+                (sig.magnitude - m.magnitude) / s.magnitude) / sig.units
+            result.append(sig_dimless)
+    else:
+        result = []
+        # Overwrite signal
+        for sig in signal:
+            sig[:] = pq.Quantity(
+                (sig.magnitude - m.magnitude) / s.magnitude,
+                units=sig.units)
+            sig_dimless = sig / sig.units
+            result.append(sig_dimless)
+    # Return single object, or list of objects
+    if len(result) == 1:
+        return result[0]
+    else:
+        return result
+
+
+def butter(signal, highpass_freq=None, lowpass_freq=None, order=4,
+           filter_function='filtfilt', fs=1.0, axis=-1):
+    """
+    Butterworth filtering function for neo.AnalogSignal. Filter type is
+    determined according to how values of `highpass_freq` and `lowpass_freq`
+    are given (see Parameters section for details).
+
+    Parameters
+    ----------
+    signal : AnalogSignal or Quantity array or NumPy ndarray
+        Time series data to be filtered. When given as Quantity array or NumPy
+        ndarray, the sampling frequency should be given through the keyword
+        argument `fs`.
+    highpass_freq, lowpass_freq : Quantity or float
+        High-pass and low-pass cut-off frequencies, respectively. When given as
+        float, the given value is taken as frequency in Hz.
+        Filter type is determined depending on values of these arguments:
+            * highpass_freq only (lowpass_freq = None):    highpass filter
+            * lowpass_freq only (highpass_freq = None):    lowpass filter
+            * highpass_freq < lowpass_freq:    bandpass filter
+            * highpass_freq > lowpass_freq:    bandstop filter
+    order : int
+        Order of Butterworth filter. Default is 4.
+    filter_function : string
+        Filtering function to be used. Either 'filtfilt'
+        (`scipy.signal.filtfilt()`) or 'lfilter' (`scipy.signal.lfilter()`). In
+        most applications 'filtfilt' should be used, because it doesn't bring
+        about phase shift due to filtering. Default is 'filtfilt'.
+    fs : Quantity or float
+        The sampling frequency of the input time series. When given as float,
+        its value is taken as frequency in Hz. When the input is given as neo
+        AnalogSignal, its attribute is used to specify the sampling
+        frequency and this parameter is ignored. Default is 1.0.
+    axis : int
+        Axis along which filter is applied. Default is -1.
+
+    Returns
+    -------
+    filtered_signal : AnalogSignal or Quantity array or NumPy ndarray
+        Filtered input data. The shape and type is identical to those of the
+        input.
+
+    """
+
+    def _design_butterworth_filter(Fs, hpfreq=None, lpfreq=None, order=4):
+        # set parameters for filter design
+        Fn = Fs / 2.
+        # - filter type is determined according to the values of cut-off
+        # frequencies
+        if lpfreq and hpfreq:
+            if hpfreq < lpfreq:
+                Wn = (hpfreq / Fn, lpfreq / Fn)
+                btype = 'bandpass'
+            else:
+                Wn = (lpfreq / Fn, hpfreq / Fn)
+                btype = 'bandstop'
+        elif lpfreq:
+            Wn = lpfreq / Fn
+            btype = 'lowpass'
+        elif hpfreq:
+            Wn = hpfreq / Fn
+            btype = 'highpass'
+        else:
+            raise ValueError(
+                "Either highpass_freq or lowpass_freq must be given"
+            )
+
+        # return filter coefficients
+        return scipy.signal.butter(order, Wn, btype=btype)
+
+    # design filter
+    Fs = signal.sampling_rate.rescale(pq.Hz).magnitude \
+        if hasattr(signal, 'sampling_rate') else fs
+    Fh = highpass_freq.rescale(pq.Hz).magnitude \
+        if isinstance(highpass_freq, pq.quantity.Quantity) else highpass_freq
+    Fl = lowpass_freq.rescale(pq.Hz).magnitude \
+        if isinstance(lowpass_freq, pq.quantity.Quantity) else lowpass_freq
+    b, a = _design_butterworth_filter(Fs, Fh, Fl, order)
+
+    # When the input is AnalogSignal, the axis for time index (i.e. the
+    # first axis) needs to be rolled to the last
+    data = np.asarray(signal)
+    if isinstance(signal, neo.AnalogSignal):
+        data = np.rollaxis(data, 0, len(data.shape))
+
+    # apply filter
+    if filter_function is 'lfilter':
+        filtered_data = scipy.signal.lfilter(b, a, data, axis=axis)
+    elif filter_function is 'filtfilt':
+        filtered_data = scipy.signal.filtfilt(b, a, data, axis=axis)
+    else:
+        raise ValueError(
+            "filter_func must to be either 'filtfilt' or 'lfilter'"
+        )
+
+    if isinstance(signal, neo.AnalogSignal):
+        return signal.duplicate_with_new_array(np.rollaxis(filtered_data, -1, 0))
+    elif isinstance(signal, pq.quantity.Quantity):
+        return filtered_data * signal.units
+    else:
+        return filtered_data
+
+
+def hilbert(signal, N='nextpow'):
+    '''
+    Apply a Hilbert transform to an AnalogSignal object in order to obtain its
+    (complex) analytic signal.
+
+    The time series of the instantaneous angle and amplitude can be obtained as
+    the angle (np.angle) and absolute value (np.abs) of the complex analytic
+    signal, respectively.
+
+    By default, the function will zero-pad the signal to a length corresponding
+    to the next higher power of 2. This will provide higher computational
+    efficiency at the expense of memory. In addition, this circumvents a
+    situation where for some specific choices of the length of the input,
+    scipy.signal.hilbert() will not terminate.
+
+    Parameters
+    -----------
+    signal : neo.AnalogSignal
+        Signal(s) to transform
+    N : string or int
+        Defines whether the signal is zero-padded.
+            'none': no padding
+            'nextpow':  zero-pad to the next length that is a power of 2
+            int: directly specify the length to zero-pad to (indicates the
+                number of Fourier components, see parameter N of
+                scipy.signal.hilbert()).
+        Default: 'nextpow'.
+
+    Returns
+    -------
+    neo.AnalogSignal
+        Contains the complex analytic signal(s) corresponding to the input
+        signals. The unit of the analytic signal is dimensionless.
+
+    Example
+    -------
+    Create a sine signal at 5 Hz with increasing amplitude and calculate the
+    instantaneous phases
+
+    >>> t = np.arange(0, 5000) * ms
+    >>> f = 5. * Hz
+    >>> a = neo.AnalogSignal(
+    ...       np.array(
+    ...           (1 + t.magnitude / t[-1].magnitude) * np.sin(
+    ...               2. * np.pi * f * t.rescale(s))).reshape((-1,1))*mV,
+    ...       t_start=0*s, sampling_rate=1000*Hz)
+
+    >>> analytic_signal = hilbert(a, N='nextpow')
+    >>> angles = np.angle(analytic_signal)
+    >>> amplitudes = np.abs(analytic_signal)
+    >>> print angles
+            [[-1.57079633]
+             [-1.51334228]
+             [-1.46047675]
+             ...,
+             [-1.73112977]
+             [-1.68211683]
+             [-1.62879501]]
+    >>> plt.plot(t,angles)
+    '''
+    # Length of input signals
+    n_org = signal.shape[0]
+
+    # Right-pad signal to desired length using the signal itself
+    if type(N) == int:
+        # User defined padding
+        n = N
+    elif N == 'nextpow':
+        # To speed up calculation of the Hilbert transform, make sure we change
+        # the signal to be of a length that is a power of two. Failure to do so
+        # results in computations of certain signal lengths to not finish (or
+        # finish in absurd time). This might be a bug in scipy (0.16), e.g.,
+        # the following code will not terminate for this value of k:
+        #
+        # import numpy
+        # import scipy.signal
+        # k=679346
+        # t = np.arange(0, k) / 1000.
+        # a = (1 + t / t[-1]) * np.sin(2 * np.pi * 5 * t)
+        # analytic_signal = scipy.signal.hilbert(a)
+        #
+        # For this reason, nextpow is the default setting for now.
+
+        n = 2 ** (int(np.log2(n_org - 1)) + 1)
+    elif N == 'none':
+        # No padding
+        n = n_org
+    else:
+        raise ValueError("'{}' is an unknown N.".format(N))
+
+    output = signal.duplicate_with_new_array(
+        scipy.signal.hilbert(signal.magnitude, N=n, axis=0)[:n_org])
+    return output / output.units

+ 467 - 0
code/elephant/elephant/spectral.py

@@ -0,0 +1,467 @@
+# -*- coding: utf-8 -*-
+"""
+Identification of spectral properties in analog signals (e.g., the power
+spectrum).
+
+:copyright: Copyright 2015-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import warnings
+
+import numpy as np
+import scipy.signal
+import scipy.fftpack as fftpack
+import scipy.signal.signaltools as signaltools
+from scipy.signal.windows import get_window
+from six import string_types
+import quantities as pq
+import neo
+
+
+def _welch(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None,
+          nfft=None, detrend='constant', scaling='density', axis=-1):
+    """
+    A helper function to estimate cross spectral density using Welch's method.
+    This function is a slightly modified version of `scipy.signal.welch()` with
+    modifications based on `matplotlib.mlab._spectral_helper()`.
+
+    Welch's method [1]_ computes an estimate of the cross spectral density
+    by dividing the data into overlapping segments, computing a modified
+    periodogram for each segment and averaging the cross-periodograms.
+
+    Parameters
+    ----------
+    x, y : array_like
+        Time series of measurement values
+    fs : float, optional
+        Sampling frequency of the `x` and `y` time series in units of Hz.
+        Defaults to 1.0.
+    window : str or tuple or array_like, optional
+        Desired window to use. See `get_window` for a list of windows and
+        required parameters. If `window` is array_like it will be used
+        directly as the window and its length will be used for nperseg.
+        Defaults to 'hanning'.
+    nperseg : int, optional
+        Length of each segment.  Defaults to 256.
+    noverlap: int, optional
+        Number of points to overlap between segments. If None,
+        ``noverlap = nperseg / 2``.  Defaults to None.
+    nfft : int, optional
+        Length of the FFT used, if a zero padded FFT is desired.  If None,
+        the FFT length is `nperseg`. Defaults to None.
+    detrend : str or function, optional
+        Specifies how to detrend each segment. If `detrend` is a string,
+        it is passed as the ``type`` argument to `detrend`. If it is a
+        function, it takes a segment and returns a detrended segment.
+        Defaults to 'constant'.
+    scaling : { 'density', 'spectrum' }, optional
+        Selects between computing the power spectral density ('density')
+        where Pxx has units of V**2/Hz if x is measured in V and computing
+        the power spectrum ('spectrum') where Pxx has units of V**2 if x is
+        measured in V. Defaults to 'density'.
+    axis : int, optional
+        Axis along which the periodogram is computed; the default is over
+        the last axis (i.e. ``axis=-1``).
+
+    Returns
+    -------
+    f : ndarray
+        Array of sample frequencies.
+    Pxy : ndarray
+        Cross spectral density or cross spectrum of x and y.
+
+    Notes
+    -----
+    An appropriate amount of overlap will depend on the choice of window
+    and on your requirements.  For the default 'hanning' window an
+    overlap of 50% is a reasonable trade off between accurately estimating
+    the signal power, while not over counting any of the data.  Narrower
+    windows may require a larger overlap.
+
+    If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
+
+    References
+    ----------
+    .. [1] P. Welch, "The use of the fast Fourier transform for the
+           estimation of power spectra: A method based on time averaging
+           over short, modified periodograms", IEEE Trans. Audio
+           Electroacoust. vol. 15, pp. 70-73, 1967.
+    .. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
+           Biometrika, vol. 37, pp. 1-16, 1950.
+    """
+    # TODO: This function should be replaced by `scipy.signal.csd()`, which
+    # will appear in SciPy 0.16.0.
+
+    # The checks for if y is x are so that we can use the same function to
+    # obtain both power spectrum and cross spectrum without doing extra
+    # calculations.
+    same_data = y is x
+    # Make sure we're dealing with a numpy array. If y and x were the same
+    # object to start with, keep them that way
+    x = np.asarray(x)
+    if same_data:
+        y = x
+    else:
+        if x.shape != y.shape:
+            raise ValueError("x and y must be of the same shape.")
+        y = np.asarray(y)
+
+    if x.size == 0:
+        return np.empty(x.shape), np.empty(x.shape)
+
+    if axis != -1:
+        x = np.rollaxis(x, axis, len(x.shape))
+        if not same_data:
+            y = np.rollaxis(y, axis, len(y.shape))
+
+    if x.shape[-1] < nperseg:
+        warnings.warn('nperseg = %d, is greater than x.shape[%d] = %d, using '
+                      'nperseg = x.shape[%d]'
+                      % (nperseg, axis, x.shape[axis], axis))
+        nperseg = x.shape[-1]
+
+    if isinstance(window, string_types) or type(window) is tuple:
+        win = get_window(window, nperseg)
+    else:
+        win = np.asarray(window)
+        if len(win.shape) != 1:
+            raise ValueError('window must be 1-D')
+        if win.shape[0] > x.shape[-1]:
+            raise ValueError('window is longer than x.')
+        nperseg = win.shape[0]
+
+    if scaling == 'density':
+        scale = 1.0 / (fs * (win * win).sum())
+    elif scaling == 'spectrum':
+        scale = 1.0 / win.sum()**2
+    else:
+        raise ValueError('Unknown scaling: %r' % scaling)
+
+    if noverlap is None:
+        noverlap = nperseg // 2
+    elif noverlap >= nperseg:
+        raise ValueError('noverlap must be less than nperseg.')
+
+    if nfft is None:
+        nfft = nperseg
+    elif nfft < nperseg:
+        raise ValueError('nfft must be greater than or equal to nperseg.')
+
+    if not hasattr(detrend, '__call__'):
+        detrend_func = lambda seg: signaltools.detrend(seg, type=detrend)
+    elif axis != -1:
+        # Wrap this function so that it receives a shape that it could
+        # reasonably expect to receive.
+        def detrend_func(seg):
+            seg = np.rollaxis(seg, -1, axis)
+            seg = detrend(seg)
+            return np.rollaxis(seg, axis, len(seg.shape))
+    else:
+        detrend_func = detrend
+
+    step = nperseg - noverlap
+    indices = np.arange(0, x.shape[-1] - nperseg + 1, step)
+
+    for k, ind in enumerate(indices):
+        x_dt = detrend_func(x[..., ind:ind + nperseg])
+        xft = fftpack.fft(x_dt * win, nfft)
+        if same_data:
+            yft = xft
+        else:
+            y_dt = detrend_func(y[..., ind:ind + nperseg])
+            yft = fftpack.fft(y_dt * win, nfft)
+        if k == 0:
+            Pxy = (xft * yft.conj())
+        else:
+            Pxy *= k / (k + 1.0)
+            Pxy += (xft * yft.conj()) / (k + 1.0)
+    Pxy *= scale
+    f = fftpack.fftfreq(nfft, 1.0 / fs)
+
+    if axis != -1:
+        Pxy = np.rollaxis(Pxy, -1, axis)
+
+    return f, Pxy
+
+
+def welch_psd(signal, num_seg=8, len_seg=None, freq_res=None, overlap=0.5,
+              fs=1.0, window='hanning', nfft=None, detrend='constant',
+              return_onesided=True, scaling='density', axis=-1):
+    """
+    Estimates power spectrum density (PSD) of a given AnalogSignal using
+    Welch's method, which works in the following steps:
+        1. cut the given data into several overlapping segments. The degree of
+            overlap can be specified by parameter *overlap* (default is 0.5,
+            i.e. segments are overlapped by the half of their length).
+            The number and the length of the segments are determined according
+            to parameter *num_seg*, *len_seg* or *freq_res*. By default, the
+            data is cut into 8 segments.
+        2. apply a window function to each segment. Hanning window is used by
+            default. This can be changed by giving a window function or an
+            array as parameter *window* (for details, see the docstring of
+            `scipy.signal.welch()`)
+        3. compute the periodogram of each segment
+        4. average the obtained periodograms to yield PSD estimate
+    These steps are implemented in `scipy.signal`, and this function is a
+    wrapper which provides a proper set of parameters to
+    `scipy.signal.welch()`. Some parameters for scipy.signal.welch(), such as
+    `nfft`, `detrend`, `window`, `return_onesided` and `scaling`, also works
+    for this function.
+
+    Parameters
+    ----------
+    signal: Neo AnalogSignal or Quantity array or Numpy ndarray
+        Time series data, of which PSD is estimated. When a Quantity array or
+        Numpy ndarray is given, sampling frequency should be given through the
+        keyword argument `fs`, otherwise the default value (`fs=1.0`) is used.
+    num_seg: int, optional
+        Number of segments. The length of segments is adjusted so that
+        overlapping segments cover the entire stretch of the given data. This
+        parameter is ignored if *len_seg* or *freq_res* is given. Default is 8.
+    len_seg: int, optional
+        Length of segments. This parameter is ignored if *freq_res* is given.
+        Default is None (determined from other parameters).
+    freq_res: Quantity or float, optional
+        Desired frequency resolution of the obtained PSD estimate in terms of
+        the interval between adjacent frequency bins. When given as a float, it
+        is taken as frequency in Hz. Default is None (determined from other
+        parameters).
+    overlap: float, optional
+        Overlap between segments represented as a float number between 0 (no
+        overlap) and 1 (complete overlap). Default is 0.5 (half-overlapped).
+    fs: Quantity array or float, optional
+        Specifies the sampling frequency of the input time series. When the
+        input is given as an AnalogSignal, the sampling frequency is taken
+        from its attribute and this parameter is ignored. Default is 1.0.
+    window, nfft, detrend, return_onesided, scaling, axis: optional
+        These arguments are directly passed on to scipy.signal.welch(). See the
+        respective descriptions in the docstring of `scipy.signal.welch()` for
+        usage.
+
+    Returns
+    -------
+    freqs: Quantity array or Numpy ndarray
+        Frequencies associated with the power estimates in `psd`. `freqs` is
+        always a 1-dimensional array irrespective of the shape of the input
+        data. Quantity array is returned if `signal` is AnalogSignal or
+        Quantity array. Otherwise Numpy ndarray containing frequency in Hz is
+        returned.
+    psd: Quantity array or Numpy ndarray
+        PSD estimates of the time series in `signal`. Quantity array is
+        returned if `data` is AnalogSignal or Quantity array. Otherwise
+        Numpy ndarray is returned.
+    """
+
+    # initialize a parameter dict (to be given to scipy.signal.welch()) with
+    # the parameters directly passed on to scipy.signal.welch()
+    params = {'window': window, 'nfft': nfft,
+              'detrend': detrend, 'return_onesided': return_onesided,
+              'scaling': scaling, 'axis': axis}
+
+    # add the input data to params. When the input is AnalogSignal, the
+    # data is added after rolling the axis for time index to the last
+    data = np.asarray(signal)
+    if isinstance(signal, neo.AnalogSignal):
+        data = np.rollaxis(data, 0, len(data.shape))
+    params['x'] = data
+
+    # if the data is given as AnalogSignal, use its attribute to specify
+    # the sampling frequency
+    if hasattr(signal, 'sampling_rate'):
+        params['fs'] = signal.sampling_rate.rescale('Hz').magnitude
+    else:
+        params['fs'] = fs
+
+    if overlap < 0:
+        raise ValueError("overlap must be greater than or equal to 0")
+    elif 1 <= overlap:
+        raise ValueError("overlap must be less then 1")
+
+    # determine the length of segments (i.e. *nperseg*) according to given
+    # parameters
+    if freq_res is not None:
+        if freq_res <= 0:
+            raise ValueError("freq_res must be positive")
+        dF = freq_res.rescale('Hz').magnitude \
+            if isinstance(freq_res, pq.quantity.Quantity) else freq_res
+        nperseg = int(params['fs'] / dF)
+        if nperseg > data.shape[axis]:
+            raise ValueError("freq_res is too high for the given data size")
+    elif len_seg is not None:
+        if len_seg <= 0:
+            raise ValueError("len_seg must be a positive number")
+        elif data.shape[axis] < len_seg:
+            raise ValueError("len_seg must be shorter than the data length")
+        nperseg = len_seg
+    else:
+        if num_seg <= 0:
+            raise ValueError("num_seg must be a positive number")
+        elif data.shape[axis] < num_seg:
+            raise ValueError("num_seg must be smaller than the data length")
+        # when only *num_seg* is given, *nperseg* is determined by solving the
+        # following equation:
+        #  num_seg * nperseg - (num_seg-1) * overlap * nperseg = data.shape[-1]
+        #  -----------------   ===============================   ^^^^^^^^^^^
+        # summed segment lengths        total overlap            data length
+        nperseg = int(data.shape[axis] / (num_seg - overlap * (num_seg - 1)))
+    params['nperseg'] = nperseg
+    params['noverlap'] = int(nperseg * overlap)
+
+    freqs, psd = scipy.signal.welch(**params)
+
+    # attach proper units to return values
+    if isinstance(signal, pq.quantity.Quantity):
+        if 'scaling' in params and params['scaling'] is 'spectrum':
+            psd = psd * signal.units * signal.units
+        else:
+            psd = psd * signal.units * signal.units / pq.Hz
+        freqs = freqs * pq.Hz
+
+    return freqs, psd
+
+
+def welch_cohere(x, y, num_seg=8, len_seg=None, freq_res=None, overlap=0.5,
+           fs=1.0, window='hanning', nfft=None, detrend='constant',
+           scaling='density', axis=-1):
+    """
+    Estimates coherence between a given pair of analog signals. The estimation
+    is performed with Welch's method: the given pair of data are cut into short
+    segments, cross-spectra are calculated for each pair of segments, and the
+    cross-spectra are averaged and normalized by respective auto_spectra. By
+    default the data are cut into 8 segments with 50% overlap between
+    neighboring segments. These numbers can be changed through respective
+    parameters.
+
+    Parameters
+    ----------
+    x, y: Neo AnalogSignal or Quantity array or Numpy ndarray
+        A pair of time series data, between which coherence is computed. The
+        shapes and the sampling frequencies of `x` and `y` must be identical.
+        When `x` and `y` are not of AnalogSignal, sampling frequency
+        should be specified through the keyword argument `fs`, otherwise the
+        default value (`fs=1.0`) is used.
+    num_seg: int, optional
+        Number of segments. The length of segments is adjusted so that
+        overlapping segments cover the entire stretch of the given data. This
+        parameter is ignored if *len_seg* or *freq_res* is given. Default is 8.
+    len_seg: int, optional
+        Length of segments. This parameter is ignored if *freq_res* is given.
+        Default is None (determined from other parameters).
+    freq_res: Quantity or float, optional
+        Desired frequency resolution of the obtained coherence estimate in
+        terms of the interval between adjacent frequency bins. When given as a
+        float, it is taken as frequency in Hz. Default is None (determined from
+        other parameters).
+    overlap: float, optional
+        Overlap between segments represented as a float number between 0 (no
+        overlap) and 1 (complete overlap). Default is 0.5 (half-overlapped).
+    fs: Quantity array or float, optional
+        Specifies the sampling frequency of the input time series. When the
+        input time series are given as AnalogSignal, the sampling
+        frequency is taken from their attribute and this parameter is ignored.
+        Default is 1.0.
+    window, nfft, detrend, scaling, axis: optional
+        These arguments are directly passed on to a helper function
+        `elephant.spectral._welch()`. See the respective descriptions in the
+        docstring of `elephant.spectral._welch()` for usage.
+
+    Returns
+    -------
+    freqs: Quantity array or Numpy ndarray
+        Frequencies associated with the estimates of coherency and phase lag.
+        `freqs` is always a 1-dimensional array irrespective of the shape of
+        the input data. Quantity array is returned if `x` and `y` are of
+        AnalogSignal or Quantity array. Otherwise Numpy ndarray containing
+        frequency in Hz is returned.
+    coherency: Numpy ndarray
+        Estimate of coherency between the input time series. For each frequency
+        coherency takes a value between 0 and 1, with 0 or 1 representing no or
+        perfect coherence, respectively. When the input arrays `x` and `y` are
+        multi-dimensional, `coherency` is of the same shape as the inputs and
+        frequency is indexed along either the first or the last axis depending
+        on the type of the input: when the input is AnalogSignal, the
+        first axis indexes frequency, otherwise the last axis does.
+    phase_lag: Quantity array or Numpy ndarray
+        Estimate of phase lag in radian between the input time series. For each
+        frequency phase lag takes a value between -PI and PI, positive values
+        meaning phase precession of `x` ahead of `y` and vice versa. Quantity
+        array is returned if `x` and `y` are of AnalogSignal or Quantity
+        array. Otherwise Numpy ndarray containing phase lag in radian is
+        returned. The axis for frequency index is determined in the same way as
+        for `coherency`.
+    """
+
+    # initialize a parameter dict (to be given to _welch()) with
+    # the parameters directly passed on to _welch()
+    params = {'window': window, 'nfft': nfft,
+              'detrend': detrend, 'scaling': scaling, 'axis': axis}
+
+    # When the input is AnalogSignal, the axis for time index is rolled to
+    # the last
+    xdata = np.asarray(x)
+    ydata = np.asarray(y)
+    if isinstance(x, neo.AnalogSignal):
+        xdata = np.rollaxis(xdata, 0, len(xdata.shape))
+        ydata = np.rollaxis(ydata, 0, len(ydata.shape))
+
+    # if the data is given as AnalogSignal, use its attribute to specify
+    # the sampling frequency
+    if hasattr(x, 'sampling_rate'):
+        params['fs'] = x.sampling_rate.rescale('Hz').magnitude
+    else:
+        params['fs'] = fs
+
+    if overlap < 0:
+        raise ValueError("overlap must be greater than or equal to 0")
+    elif 1 <= overlap:
+        raise ValueError("overlap must be less then 1")
+
+    # determine the length of segments (i.e. *nperseg*) according to given
+    # parameters
+    if freq_res is not None:
+        if freq_res <= 0:
+            raise ValueError("freq_res must be positive")
+        dF = freq_res.rescale('Hz').magnitude \
+            if isinstance(freq_res, pq.quantity.Quantity) else freq_res
+        nperseg = int(params['fs'] / dF)
+        if nperseg > xdata.shape[axis]:
+            raise ValueError("freq_res is too high for the given data size")
+    elif len_seg is not None:
+        if len_seg <= 0:
+            raise ValueError("len_seg must be a positive number")
+        elif xdata.shape[axis] < len_seg:
+            raise ValueError("len_seg must be shorter than the data length")
+        nperseg = len_seg
+    else:
+        if num_seg <= 0:
+            raise ValueError("num_seg must be a positive number")
+        elif xdata.shape[axis] < num_seg:
+            raise ValueError("num_seg must be smaller than the data length")
+        # when only *num_seg* is given, *nperseg* is determined by solving the
+        # following equation:
+        #  num_seg * nperseg - (num_seg-1) * overlap * nperseg = data.shape[-1]
+        #  -----------------   ===============================   ^^^^^^^^^^^
+        # summed segment lengths        total overlap            data length
+        nperseg = int(xdata.shape[axis] / (num_seg - overlap * (num_seg - 1)))
+    params['nperseg'] = nperseg
+    params['noverlap'] = int(nperseg * overlap)
+
+    freqs, Pxy = _welch(xdata, ydata, **params)
+    freqs, Pxx = _welch(xdata, xdata, **params)
+    freqs, Pyy = _welch(ydata, ydata, **params)
+    coherency = np.abs(Pxy)**2 / (np.abs(Pxx) * np.abs(Pyy))
+    phase_lag = np.angle(Pxy)
+
+    # attach proper units to return values
+    if isinstance(x, pq.quantity.Quantity):
+        freqs = freqs * pq.Hz
+        phase_lag = phase_lag * pq.rad
+
+    # When the input is AnalogSignal, the axis for frequency index is
+    # rolled to the first to comply with the Neo convention about time axis
+    if isinstance(x, neo.AnalogSignal):
+        coherency = np.rollaxis(coherency, -1)
+        phase_lag = np.rollaxis(phase_lag, -1)
+
+    return freqs, coherency, phase_lag

+ 601 - 0
code/elephant/elephant/spike_train_correlation.py

@@ -0,0 +1,601 @@
+# -*- coding: utf-8 -*-
+"""
+This modules provides functions to calculate correlations between spike trains.
+
+:copyright: Copyright 2015-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+from __future__ import division
+import numpy as np
+import neo
+import quantities as pq
+
+
+def covariance(binned_sts, binary=False):
+    '''
+    Calculate the NxN matrix of pairwise covariances between all combinations
+    of N binned spike trains.
+
+    For each pair of spike trains :math:`(i,j)`, the covariance :math:`C[i,j]`
+    is obtained by binning :math:`i` and :math:`j` at the desired bin size. Let
+    :math:`b_i` and :math:`b_j` denote the binary vectors and :math:`m_i` and
+    :math:`m_j` their respective averages. Then
+
+    .. math::
+         C[i,j] = <b_i-m_i, b_j-m_j> / (l-1)
+
+    where <..,.> is the scalar product of two vectors.
+
+    For an input of n spike trains, a n x n matrix is returned containing the
+    covariances for each combination of input spike trains.
+
+    If binary is True, the binned spike trains are clipped to 0 or 1 before
+    computing the covariance, so that the binned vectors :math:`b_i` and
+    :math:`b_j` are binary.
+
+    Parameters
+    ----------
+    binned_sts : elephant.conversion.BinnedSpikeTrain
+        A binned spike train containing the spike trains to be evaluated.
+    binary : bool, optional
+        If True, two spikes of a particular spike train falling in the same bin
+        are counted as 1, resulting in binary binned vectors :math:`b_i`. If
+        False, the binned vectors :math:`b_i` contain the spike counts per bin.
+        Default: False
+
+    Returns
+    -------
+    C : ndarrray
+        The square matrix of covariances. The element :math:`C[i,j]=C[j,i]` is
+        the covariance between binned_sts[i] and binned_sts[j].
+
+    Examples
+    --------
+    Generate two Poisson spike trains
+
+    >>> from elephant.spike_train_generation import homogeneous_poisson_process
+    >>> st1 = homogeneous_poisson_process(
+            rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s)
+    >>> st2 = homogeneous_poisson_process(
+            rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s)
+
+    Calculate the covariance matrix.
+
+    >>> from elephant.conversion import BinnedSpikeTrain
+    >>> cov_matrix = covariance(BinnedSpikeTrain([st1, st2], binsize=5*ms))
+
+    The covariance between the spike trains is stored in cc_matrix[0,1] (or
+    cov_matrix[1,0]).
+
+    Notes
+    -----
+    * The spike trains in the binned structure are assumed to all cover the
+      complete time span of binned_sts [t_start,t_stop).
+    '''
+    return __calculate_correlation_or_covariance(
+        binned_sts, binary, corrcoef_norm=False)
+
+
+def corrcoef(binned_sts, binary=False):
+    '''
+    Calculate the NxN matrix of pairwise Pearson's correlation coefficients
+    between all combinations of N binned spike trains.
+
+    For each pair of spike trains :math:`(i,j)`, the correlation coefficient
+    :math:`C[i,j]` is obtained by binning :math:`i` and :math:`j` at the
+    desired bin size. Let :math:`b_i` and :math:`b_j` denote the binary vectors
+    and :math:`m_i` and :math:`m_j` their respective averages. Then
+
+    .. math::
+         C[i,j] = <b_i-m_i, b_j-m_j> /
+                      \sqrt{<b_i-m_i, b_i-m_i>*<b_j-m_j,b_j-m_j>}
+
+    where <..,.> is the scalar product of two vectors.
+
+    For an input of n spike trains, a n x n matrix is returned.
+    Each entry in the matrix is a real number ranging between -1 (perfectly
+    anti-correlated spike trains) and +1 (perfectly correlated spike trains).
+
+    If binary is True, the binned spike trains are clipped to 0 or 1 before
+    computing the correlation coefficients, so that the binned vectors
+    :math:`b_i` and :math:`b_j` are binary.
+
+    Parameters
+    ----------
+    binned_sts : elephant.conversion.BinnedSpikeTrain
+        A binned spike train containing the spike trains to be evaluated.
+    binary : bool, optional
+        If True, two spikes of a particular spike train falling in the same bin
+        are counted as 1, resulting in binary binned vectors :math:`b_i`. If
+        False, the binned vectors :math:`b_i` contain the spike counts per bin.
+        Default: False
+
+    Returns
+    -------
+    C : ndarrray
+        The square matrix of correlation coefficients. The element
+        :math:`C[i,j]=C[j,i]` is the Pearson's correlation coefficient between
+        binned_sts[i] and binned_sts[j]. If binned_sts contains only one
+        SpikeTrain, C=1.0.
+
+    Examples
+    --------
+    Generate two Poisson spike trains
+
+    >>> from elephant.spike_train_generation import homogeneous_poisson_process
+    >>> st1 = homogeneous_poisson_process(
+            rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s)
+    >>> st2 = homogeneous_poisson_process(
+            rate=10.0*Hz, t_start=0.0*s, t_stop=10.0*s)
+
+    Calculate the correlation matrix.
+
+    >>> from elephant.conversion import BinnedSpikeTrain
+    >>> cc_matrix = corrcoef(BinnedSpikeTrain([st1, st2], binsize=5*ms))
+
+    The correlation coefficient between the spike trains is stored in
+    cc_matrix[0,1] (or cc_matrix[1,0]).
+
+    Notes
+    -----
+    * The spike trains in the binned structure are assumed to all cover the
+      complete time span of binned_sts [t_start,t_stop).
+    '''
+
+    return __calculate_correlation_or_covariance(
+        binned_sts, binary, corrcoef_norm=True)
+
+
+def __calculate_correlation_or_covariance(binned_sts, binary, corrcoef_norm):
+    '''
+    Helper function for covariance() and corrcoef() that performs the complete
+    calculation for either the covariance (corrcoef_norm=False) or correlation
+    coefficient (corrcoef_norm=True). Both calculations differ only by the
+    denominator.
+
+    Parameters
+    ----------
+    binned_sts : elephant.conversion.BinnedSpikeTrain
+        See covariance() or corrcoef(), respectively.
+    binary : bool
+        See covariance() or corrcoef(), respectively.
+    corrcoef_norm : bool
+        Use normalization factor for the correlation coefficient rather than
+        for the covariance.
+    '''
+    num_neurons = binned_sts.matrix_rows
+
+    # Pre-allocate correlation matrix
+    C = np.zeros((num_neurons, num_neurons))
+
+    # Retrieve unclipped matrix
+    spmat = binned_sts.to_sparse_array()
+
+    # For each row, extract the nonzero column indices and the corresponding
+    # data in the matrix (for performance reasons)
+    bin_idx_unique = []
+    bin_counts_unique = []
+    if binary:
+        for s in spmat:
+            bin_idx_unique.append(s.nonzero()[1])
+    else:
+        for s in spmat:
+            bin_counts_unique.append(s.data)
+
+    # All combinations of spike trains
+    for i in range(num_neurons):
+        for j in range(i, num_neurons):
+            # Enumerator:
+            # $$ <b_i-m_i, b_j-m_j>
+            #      = <b_i, b_j> + l*m_i*m_j - <b_i, M_j> - <b_j, M_i>
+            #      =:    ij     + l*m_i*m_j - n_i * m_j  - n_j * m_i
+            #      =     ij     - n_i*n_j/l                         $$
+            # where $n_i$ is the spike count of spike train $i$,
+            # $l$ is the number of bins used (i.e., length of $b_i$ or $b_j$),
+            # and $M_i$ is a vector [m_i, m_i,..., m_i].
+            if binary:
+                # Intersect indices to identify number of coincident spikes in
+                # i and j (more efficient than directly using the dot product)
+                ij = len(np.intersect1d(
+                    bin_idx_unique[i], bin_idx_unique[j], assume_unique=True))
+
+                # Number of spikes in i and j
+                n_i = len(bin_idx_unique[i])
+                n_j = len(bin_idx_unique[j])
+            else:
+                # Calculate dot product b_i*b_j between unclipped matrices
+                ij = spmat[i].dot(spmat[j].transpose()).toarray()[0][0]
+
+                # Number of spikes in i and j
+                n_i = np.sum(bin_counts_unique[i])
+                n_j = np.sum(bin_counts_unique[j])
+
+            enumerator = ij - n_i * n_j / binned_sts.num_bins
+
+            # Denominator:
+            if corrcoef_norm:
+                # Correlation coefficient
+
+                # Note:
+                # $$ <b_i-m_i, b_i-m_i>
+                #      = <b_i, b_i> + m_i^2 - 2 <b_i, M_i>
+                #      =:    ii     + m_i^2 - 2 n_i * m_i
+                #      =     ii     - n_i^2 /               $$
+                if binary:
+                    # Here, b_i*b_i is just the number of filled bins (since
+                    # each filled bin of a clipped spike train has value equal
+                    # to 1)
+                    ii = len(bin_idx_unique[i])
+                    jj = len(bin_idx_unique[j])
+                else:
+                    # directly calculate the dot product based on the counts of
+                    # all filled entries (more efficient than using the dot
+                    # product of the rows of the sparse matrix)
+                    ii = np.dot(bin_counts_unique[i], bin_counts_unique[i])
+                    jj = np.dot(bin_counts_unique[j], bin_counts_unique[j])
+
+                denominator = np.sqrt(
+                    (ii - (n_i ** 2) / binned_sts.num_bins) *
+                    (jj - (n_j ** 2) / binned_sts.num_bins))
+            else:
+                # Covariance
+
+                # $$ l-1 $$
+                denominator = (binned_sts.num_bins - 1)
+
+            # Fill entry of correlation matrix
+            C[i, j] = C[j, i] = enumerator / denominator
+    return np.squeeze(C)
+
+
+def cross_correlation_histogram(
+        binned_st1, binned_st2, window='full', border_correction=False, binary=False,
+        kernel=None, method='speed'):
+    """
+    Computes the cross-correlation histogram (CCH) between two binned spike
+    trains binned_st1 and binned_st2.
+
+    Parameters
+    ----------
+    binned_st1, binned_st2 : BinnedSpikeTrain
+        Binned spike trains to cross-correlate. The two spike trains must have
+        same t_start and t_stop
+    window : string or list (optional)
+        ‘full’: This returns the crosscorrelation at each point of overlap,
+        with an output shape of (N+M-1,). At the end-points of the
+        cross-correlogram, the signals do not overlap completely, and
+        boundary effects may be seen.
+        ‘valid’: Mode valid returns output of length max(M, N) - min(M, N) + 1.
+        The cross-correlation product is only given for points where the
+        signals overlap completely.
+        Values outside the signal boundary have no effect.
+        Default: 'full'
+        list of integer of of quantities (window[0]=minimum, window[1]=maximum
+        lag): The  entries of window can be integer (number of bins) or
+        quantities (time units of the lag), in the second case they have to be
+        a multiple of the binsize
+        Default: 'Full'
+    border_correction : bool (optional)
+        whether to correct for the border effect. If True, the value of the
+        CCH at bin b (for b=-H,-H+1, ...,H, where H is the CCH half-length)
+        is multiplied by the correction factor:
+                            (H+1)/(H+1-|b|),
+        which linearly corrects for loss of bins at the edges.
+        Default: False
+    binary : bool (optional)
+        whether to binary spikes from the same spike train falling in the
+        same bin. If True, such spikes are considered as a single spike;
+        otherwise they are considered as different spikes.
+        Default: False.
+    kernel : array or None (optional)
+        A one dimensional array containing an optional smoothing kernel applied
+        to the resulting CCH. The length N of the kernel indicates the
+        smoothing window. The smoothing window cannot be larger than the
+        maximum lag of the CCH. The kernel is normalized to unit area before
+        being applied to the resulting CCH. Popular choices for the kernel are
+          * normalized boxcar kernel: numpy.ones(N)
+          * hamming: numpy.hamming(N)
+          * hanning: numpy.hanning(N)
+          * bartlett: numpy.bartlett(N)
+        If None is specified, the CCH is not smoothed.
+        Default: None
+    method : string (optional)
+        Defines the algorithm to use. "speed" uses numpy.correlate to calculate
+        the correlation between two binned spike trains using a non-sparse data
+        representation. Due to various optimizations, it is the fastest
+        realization. In contrast, the option "memory" uses an own
+        implementation to calculate the correlation based on sparse matrices,
+        which is more memory efficient but slower than the "speed" option.
+        Default: "speed"
+
+    Returns
+    -------
+    cch : AnalogSignal
+        Containing the cross-correlation histogram between binned_st1 and binned_st2.
+
+        The central bin of the histogram represents correlation at zero
+        delay. Offset bins correspond to correlations at a delay equivalent
+        to the difference between the spike times of binned_st1 and those of binned_st2: an
+        entry at positive lags corresponds to a spike in binned_st2 following a
+        spike in binned_st1 bins to the right, and an entry at negative lags
+        corresponds to a spike in binned_st1 following a spike in binned_st2.
+
+        To illustrate this definition, consider the two spike trains:
+        binned_st1: 0 0 0 0 1 0 0 0 0 0 0
+        binned_st2: 0 0 0 0 0 0 0 1 0 0 0
+        Here, the CCH will have an entry of 1 at lag h=+3.
+
+        Consistent with the definition of AnalogSignals, the time axis
+        represents the left bin borders of each histogram bin. For example,
+        the time axis might be:
+        np.array([-2.5 -1.5 -0.5 0.5 1.5]) * ms
+    bin_ids : ndarray of int
+        Contains the IDs of the individual histogram bins, where the central
+        bin has ID 0, bins the left have negative IDs and bins to the right
+        have positive IDs, e.g.,:
+        np.array([-3, -2, -1, 0, 1, 2, 3])
+
+    Example
+    -------
+        Plot the cross-correlation histogram between two Poisson spike trains
+        >>> import elephant
+        >>> import matplotlib.pyplot as plt
+        >>> import quantities as pq
+
+        >>> binned_st1 = elephant.conversion.BinnedSpikeTrain(
+                elephant.spike_train_generation.homogeneous_poisson_process(
+                    10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
+                binsize=5. * pq.ms)
+        >>> binned_st2 = elephant.conversion.BinnedSpikeTrain(
+                elephant.spike_train_generation.homogeneous_poisson_process(
+                    10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
+                binsize=5. * pq.ms)
+
+        >>> cc_hist = elephant.spike_train_correlation.cross_correlation_histogram(
+                binned_st1, binned_st2, window=[-30,30],
+                border_correction=False,
+                binary=False, kernel=None, method='memory')
+
+        >>> plt.bar(
+                left=cc_hist[0].times.magnitude,
+                height=cc_hist[0][:, 0].magnitude,
+                width=cc_hist[0].sampling_period.magnitude)
+        >>> plt.xlabel('time (' + str(cc_hist[0].times.units) + ')')
+        >>> plt.ylabel('cross-correlation histogram')
+        >>> plt.axis('tight')
+        >>> plt.show()
+
+    Alias
+    -----
+    cch
+    """
+    def _border_correction(counts, max_num_bins, l, r):
+        # Correct the values taking into account lacking contributes
+        # at the edges
+        correction = float(max_num_bins + 1) / np.array(
+            max_num_bins + 1 - abs(
+                np.arange(l, r + 1)), float)
+        return counts * correction
+
+    def _kernel_smoothing(counts, kern, l, r):
+        # Define the kern for smoothing as an ndarray
+        if hasattr(kern, '__iter__'):
+            if len(kern) > np.abs(l) + np.abs(r) + 1:
+                raise ValueError(
+                    'The length of the kernel cannot be larger than the '
+                    'length %d of the resulting CCH.' % (
+                        np.abs(l) + np.abs(r) + 1))
+            kern = np.array(kern, dtype=float)
+            kern = 1. * kern / sum(kern)
+        # Check kern parameter
+        else:
+            raise ValueError('Invalid smoothing kernel.')
+
+        # Smooth the cross-correlation histogram with the kern
+        return np.convolve(counts, kern, mode='same')
+
+    def _cch_memory(binned_st1, binned_st2, win, border_corr, binary, kern):
+
+        # Retrieve unclipped matrix
+        st1_spmat = binned_st1.to_sparse_array()
+        st2_spmat = binned_st2.to_sparse_array()
+        binsize = binned_st1.binsize
+        max_num_bins = max(binned_st1.num_bins, binned_st2.num_bins)
+
+        # Set the time window in which is computed the cch
+        if not isinstance(win, str):
+            # Window parameter given in number of bins (integer)
+            if isinstance(win[0], int) and isinstance(win[1], int):
+                # Check the window parameter values
+                if win[0] >= win[1] or win[0] <= -max_num_bins \
+                        or win[1] >= max_num_bins:
+                    raise ValueError(
+                        "The window exceeds the length of the spike trains")
+                # Assign left and right edges of the cch
+                l, r = win[0], win[1]
+            # Window parameter given in time units
+            else:
+                # Check the window parameter values
+                if win[0].rescale(binsize.units).magnitude % \
+                    binsize.magnitude != 0 or win[1].rescale(
+                        binsize.units).magnitude % binsize.magnitude != 0:
+                    raise ValueError(
+                        "The window has to be a multiple of the binsize")
+                if win[0] >= win[1] or win[0] <= -max_num_bins * binsize \
+                        or win[1] >= max_num_bins * binsize:
+                    raise ValueError("The window exceeds the length of the"
+                                     " spike trains")
+                # Assign left and right edges of the cch
+                l, r = int(win[0].rescale(binsize.units) / binsize), int(
+                    win[1].rescale(binsize.units) / binsize)
+        # Case without explicit window parameter
+        elif window == 'full':
+            # cch computed for all the possible entries
+            # Assign left and right edges of the cch
+            r = binned_st2.num_bins - 1
+            l = - binned_st1.num_bins + 1
+            # cch compute only for the entries that completely overlap
+        elif window == 'valid':
+            # cch computed only for valid entries
+            # Assign left and right edges of the cch
+            r = max(binned_st2.num_bins - binned_st1.num_bins, 0)
+            l = min(binned_st2.num_bins - binned_st1.num_bins, 0)
+        # Check the mode parameter
+        else:
+            raise KeyError("Invalid window parameter")
+
+        # For each row, extract the nonzero column indices
+        # and the corresponding # data in the matrix (for performance reasons)
+        st1_bin_idx_unique = st1_spmat.nonzero()[1]
+        st2_bin_idx_unique = st2_spmat.nonzero()[1]
+
+        # Case with binary entries
+        if binary:
+            st1_bin_counts_unique = np.array(st1_spmat.data > 0, dtype=int)
+            st2_bin_counts_unique = np.array(st2_spmat.data > 0, dtype=int)
+        # Case with all values
+        else:
+            st1_bin_counts_unique = st1_spmat.data
+            st2_bin_counts_unique = st2_spmat.data
+
+        # Initialize the counts to an array of zeroes,
+        # and the bin IDs to integers
+        # spanning the time axis
+        counts = np.zeros(np.abs(l) + np.abs(r) + 1)
+        bin_ids = np.arange(l, r + 1)
+        # Compute the CCH at lags in l,...,r only
+        for idx, i in enumerate(st1_bin_idx_unique):
+            il = np.searchsorted(st2_bin_idx_unique, l + i)
+            ir = np.searchsorted(st2_bin_idx_unique, r + i, side='right')
+            timediff = st2_bin_idx_unique[il:ir] - i
+            assert ((timediff >= l) & (timediff <= r)).all(), 'Not all the '
+            'entries of cch lie in the window'
+            counts[timediff + np.abs(l)] += (st1_bin_counts_unique[idx] *
+                                             st2_bin_counts_unique[il:ir])
+            st2_bin_idx_unique = st2_bin_idx_unique[il:]
+            st2_bin_counts_unique = st2_bin_counts_unique[il:]
+        # Border correction
+        if border_corr is True:
+            counts = _border_correction(counts, max_num_bins, l, r)
+        if kern is not None:
+            # Smoothing
+            counts = _kernel_smoothing(counts, kern, l, r)
+        # Transform the array count into an AnalogSignal
+        cch_result = neo.AnalogSignal(
+            signal=counts.reshape(counts.size, 1),
+            units=pq.dimensionless,
+            t_start=(bin_ids[0] - 0.5) * binned_st1.binsize,
+            sampling_period=binned_st1.binsize)
+        # Return only the hist_bins bins and counts before and after the
+        # central one
+        return cch_result, bin_ids
+
+    def _cch_speed(binned_st1, binned_st2, win, border_corr, binary, kern):
+
+        # Retrieve the array of the binne spik train
+        st1_arr = binned_st1.to_array()[0, :]
+        st2_arr = binned_st2.to_array()[0, :]
+        binsize = binned_st1.binsize
+
+        # Convert the to binary version
+        if binary:
+            st1_arr = np.array(st1_arr > 0, dtype=int)
+            st2_arr = np.array(st2_arr > 0, dtype=int)
+        max_num_bins = max(len(st1_arr), len(st2_arr))
+
+        # Cross correlate the spiketrains
+
+        # Case explicit temporal window
+        if not isinstance(win, str):
+            # Window parameter given in number of bins (integer)
+            if isinstance(win[0], int) and isinstance(win[1], int):
+                # Check the window parameter values
+                if win[0] >= win[1] or win[0] <= -max_num_bins \
+                        or win[1] >= max_num_bins:
+                    raise ValueError(
+                        "The window exceed the length of the spike trains")
+                # Assign left and right edges of the cch
+                l, r = win
+            # Window parameter given in time units
+            else:
+                # Check the window parameter values
+                if win[0].rescale(binsize.units).magnitude % \
+                    binsize.magnitude != 0 or win[1].rescale(
+                        binsize.units).magnitude % binsize.magnitude != 0:
+                    raise ValueError(
+                        "The window has to be a multiple of the binsize")
+                if win[0] >= win[1] or win[0] <= -max_num_bins * binsize \
+                        or win[1] >= max_num_bins * binsize:
+                    raise ValueError("The window exceed the length of the"
+                                     " spike trains")
+                # Assign left and right edges of the cch
+                l, r = int(win[0].rescale(binsize.units) / binsize), int(
+                    win[1].rescale(binsize.units) / binsize)
+
+            # Zero padding
+            st1_arr = np.pad(
+                st1_arr, (int(np.abs(np.min([l, 0]))), np.max([r, 0])),
+                mode='constant')
+            cch_mode = 'valid'
+        else:
+            # Assign the edges of the cch for the different mode parameters
+            if win == 'full':
+                # Assign left and right edges of the cch
+                r = binned_st2.num_bins - 1
+                l = - binned_st1.num_bins + 1
+            # cch compute only for the entries that completely overlap
+            elif win == 'valid':
+                # Assign left and right edges of the cch
+                r = max(binned_st2.num_bins - binned_st1.num_bins, 0)
+                l = min(binned_st2.num_bins - binned_st1.num_bins, 0)
+            cch_mode = win
+
+        # Cross correlate the spike trains
+        counts = np.correlate(st2_arr, st1_arr, mode=cch_mode)
+        bin_ids = np.r_[l:r + 1]
+        # Border correction
+        if border_corr is True:
+            counts = _border_correction(counts, max_num_bins, l, r)
+        if kern is not None:
+            # Smoothing
+            counts = _kernel_smoothing(counts, kern, l, r)
+        # Transform the array count into an AnalogSignal
+        cch_result = neo.AnalogSignal(
+            signal=counts.reshape(counts.size, 1),
+            units=pq.dimensionless,
+            t_start=(bin_ids[0] - 0.5) * binned_st1.binsize,
+            sampling_period=binned_st1.binsize)
+        # Return only the hist_bins bins and counts before and after the
+        # central one
+        return cch_result, bin_ids
+
+    # Check that the spike trains are binned with the same temporal
+    # resolution
+    if not binned_st1.matrix_rows == 1:
+        raise AssertionError("Spike train must be one dimensional")
+    if not binned_st2.matrix_rows == 1:
+        raise AssertionError("Spike train must be one dimensional")
+    if not binned_st1.binsize == binned_st2.binsize:
+        raise AssertionError("Bin sizes must be equal")
+
+    # Check t_start and t_stop identical (to drop once that the
+    # pad functionality wil be available in the BinnedSpikeTrain classe)
+    if not binned_st1.t_start == binned_st2.t_start:
+        raise AssertionError("Spike train must have same t start")
+    if not binned_st1.t_stop == binned_st2.t_stop:
+        raise AssertionError("Spike train must have same t stop")
+
+    if method == "memory":
+        cch_result, bin_ids = _cch_memory(
+            binned_st1, binned_st2, window, border_correction, binary,
+            kernel)
+    elif method == "speed":
+
+        cch_result, bin_ids = _cch_speed(
+            binned_st1, binned_st2, window, border_correction, binary,
+            kernel)
+
+    return cch_result, bin_ids
+
+# Alias for common abbreviation
+cch = cross_correlation_histogram

+ 412 - 0
code/elephant/elephant/spike_train_dissimilarity.py

@@ -0,0 +1,412 @@
+# -*- coding: utf-8 -*-
+"""
+In neuroscience one often wants to evaluate, how similar or dissimilar pairs
+or even large sets of spiketrains are. For this purpose various different
+spike train dissimilarity measures were introduced in the literature.
+They differ, e.g., by the properties of having the mathematical properties of
+a metric or by being time-scale dependent or not. Well known representatives
+of spike train dissimilarity measures are the Victor-Purpura distance and the
+Van Rossum distance implemented in this module, which both are metrics in the
+mathematical sense and time-scale dependent.
+
+:copyright: Copyright 2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import quantities as pq
+import numpy as np
+import scipy as sp
+import elephant.kernels as kernels
+from neo.core import SpikeTrain
+
+# Problem of conversion from Python 2 to Python 3:
+# 'xrange' in Python 2 is 'range' in Python 3.
+try:
+    xrange
+except NameError:
+    xrange = range
+
+
+def _create_matrix_from_indexed_function(
+        shape, func, symmetric_2d=False, **func_params):
+    mat = np.empty(shape)
+    if symmetric_2d:
+        for i in xrange(shape[0]):
+            for j in xrange(i, shape[1]):
+                mat[i, j] = mat[j, i] = func(i, j, **func_params)
+    else:
+        for idx in np.ndindex(*shape):
+            mat[idx] = func(*idx, **func_params)
+    return mat
+
+
+def victor_purpura_dist(
+        trains, q=1.0 * pq.Hz, kernel=None, sort=True, algorithm='fast'):
+    """
+    Calculates the Victor-Purpura's (VP) distance. It is often denoted as
+    :math:`D^{\\text{spike}}[q]`.
+
+    It is defined as the minimal cost of transforming spike train `a` into
+    spike train `b` by using the following operations:
+
+        * Inserting or deleting a spike (cost 1.0).
+        * Shifting a spike from :math:`t` to :math:`t'` (cost :math:`q
+          \\cdot |t - t'|`).
+
+    A detailed description can be found in
+    *Victor, J. D., & Purpura, K. P. (1996). Nature and precision of
+    temporal coding in visual cortex: a metric-space analysis. Journal of
+    Neurophysiology.*
+
+    Given the average number of spikes :math:`n` in a spike train and
+    :math:`N` spike trains the run-time complexity of this function is
+    :math:`O(N^2 n^2)` and :math:`O(N^2 + n^2)` memory will be needed.
+
+    Parameters
+    ----------
+    trains : Sequence of :class:`neo.core.SpikeTrain` objects of
+        which the distance will be calculated pairwise.
+    q: Quantity scalar
+        Cost factor for spike shifts as inverse time scalar.
+        Extreme values :math:`q=0` meaning no cost for any shift of
+        spikes, or :math: `q=np.inf` meaning infinite cost for any
+        spike shift and hence exclusion of spike shifts, are explicitly
+        allowed. If `kernel` is not `None`, :math:`q` will be ignored.
+        Default: 1.0 * pq.Hz
+    kernel: :class:`.kernels.Kernel`
+        Kernel to use in the calculation of the distance. If `kernel` is
+        `None`, an unnormalized triangular kernel with standard deviation
+        of :math:'2.0/(q * sqrt(6.0))' corresponding to a half width of
+        :math:`2.0/q` will be used. Usage of the default value calculates
+        the Victor-Purpura distance correctly with a triangular kernel of
+        the suitable width. The choice of another kernel is enabled, but
+        this leaves the framework of Victor-Purpura distances.
+        Default: None
+    sort: bool
+        Spike trains with sorted spike times will be needed for the
+        calculation. You can set `sort` to `False` if you know that your
+        spike trains are already sorted to decrease calculation time.
+        Default: True
+    algorithm: string
+        Allowed values are 'fast' or 'intuitive', each selecting an
+        algorithm with which to calculate the pairwise Victor-Purpura distance.
+        Typically 'fast' should be used, because while giving always the
+        same result as 'intuitive', within the temporary structure of
+        Python and add-on modules as numpy it is faster.
+        Default: 'fast'
+
+    Returns
+    -------
+        2-D array
+        Matrix containing the VP distance of all pairs of spike trains.
+
+    Example
+    -------
+        import elephant.spike_train_dissimilarity_measures as stdm
+        q   = 1.0 / (10.0 * pq.ms)
+        st_a = SpikeTrain([10, 20, 30], units='ms', t_stop= 1000.0)
+        st_b = SpikeTrain([12, 24, 30], units='ms', t_stop= 1000.0)
+        vp_f = stdm.victor_purpura_dist([st_a, st_b], q)[0, 1]
+        vp_i = stdm.victor_purpura_dist(
+                   [st_a, st_b], q, algorithm='intuitive')[0, 1]
+    """
+    for train in trains:
+        if not (isinstance(train, (pq.quantity.Quantity, SpikeTrain)) and
+                train.dimensionality.simplified ==
+                pq.Quantity(1, "s").dimensionality.simplified):
+            raise TypeError("Spike trains must have a time unit.")
+
+    if not (isinstance(q, pq.quantity.Quantity) and
+            q.dimensionality.simplified ==
+            pq.Quantity(1, "Hz").dimensionality.simplified):
+        raise TypeError("q must be a rate quantity.")
+
+    if kernel is None:
+        if q == 0.0:
+            num_spikes = np.atleast_2d([st.size for st in trains])
+            return np.absolute(num_spikes.T - num_spikes)
+        elif q == np.inf:
+            num_spikes = np.atleast_2d([st.size for st in trains])
+            return num_spikes.T + num_spikes
+        else:
+            kernel = kernels.TriangularKernel(2.0 / (np.sqrt(6.0) * q))
+
+    if sort:
+        trains = [np.sort(st.view(type=pq.Quantity)) for st in trains]
+
+    def compute(i, j):
+        if i == j:
+            return 0.0
+        else:
+            if algorithm == 'fast':
+                return _victor_purpura_dist_for_st_pair_fast(
+                    trains[i], trains[j], kernel)
+            elif algorithm == 'intuitive':
+                return _victor_purpura_dist_for_st_pair_intuitive(
+                    trains[i], trains[j], q)
+            else:
+                raise NameError("algorithm must be either 'fast' "
+                                "or 'intuitive'.")
+
+    return _create_matrix_from_indexed_function(
+        (len(trains), len(trains)), compute, kernel.is_symmetric())
+
+
+def _victor_purpura_dist_for_st_pair_fast(train_a, train_b, kernel):
+    """
+    The algorithm used is based on the one given in
+
+    J. D. Victor and K. P. Purpura, Nature and precision of temporal
+    coding in visual cortex: a metric-space analysis, Journal of
+    Neurophysiology, 1996.
+
+    It constructs a matrix G[i, j] containing the minimal cost when only
+    considering the first i and j spikes of the spike trains. However, one
+    never needs to store more than one row and one column at the same time
+    for calculating the VP distance.
+    cost[0, :cost.shape[1] - i] corresponds to G[i:, i]. In the same way
+    cost[1, :cost.shape[1] - i] corresponds to G[i, i:].
+
+    Moreover, the minimum operation on the costs of the three kind of actions
+    (delete, insert or move spike) can be split up in two operations. One
+    operation depends only on the already calculated costs and kernel
+    evaluation (insertion of spike vs moving a spike). The other minimum
+    depends on that result and the cost of deleting a spike. This operation
+    always depends on the last calculated element in the cost array and
+    corresponds to a recursive application of
+    f(accumulated_min[i]) = min(f(accumulated_min[i-1]), accumulated_min[i])
+    + 1. That '+1' can be excluded from this function if the summed value for
+    all recursive applications is added upfront to accumulated_min.
+    Afterwards it has to be removed again except one for the currently
+    processed spike to get the real costs up to the evaluation of i.
+
+    All currently calculated costs will be considered -1 because this saves
+    a number of additions as in most cases the cost would be increased by
+    exactly one (the only exception is shifting, but in that calculation is
+    already the addition of a constant involved, thus leaving the number of
+    operations the same). The increase by one will be added after calculating
+    all minima by shifting decreasing_sequence by one when removing it from
+    accumulated_min.
+
+    Parameters
+    ----------
+    train_a, train_b : :class:`neo.core.SpikeTrain` objects of
+        which the Victor-Purpura distance will be calculated pairwise.
+    kernel: :class:`.kernels.Kernel`
+        Kernel to use in the calculation of the distance.
+
+    Returns
+    -------
+        float
+        The Victor-Purpura distance of train_a and train_b
+    """
+
+    if train_a.size <= 0 or train_b.size <= 0:
+        return max(train_a.size, train_b.size)
+
+    if train_a.size < train_b.size:
+        train_a, train_b = train_b, train_a
+
+    min_dim, max_dim = train_b.size, train_a.size + 1
+    cost = np.asfortranarray(np.tile(np.arange(float(max_dim)), (2, 1)))
+    decreasing_sequence = np.asfortranarray(cost[:, ::-1])
+    kern = kernel((np.atleast_2d(train_a).T.view(type=pq.Quantity) -
+                   train_b.view(type=pq.Quantity)))
+    as_fortran = np.asfortranarray(
+        ((np.sqrt(6.0) * kernel.sigma) * kern).simplified)
+    k = 1 - 2 * as_fortran
+
+    for i in xrange(min_dim):
+        # determine G[i, i] == accumulated_min[:, 0]
+        accumulated_min = cost[:, :-i - 1] + k[i:, i]
+        accumulated_min[1, :train_b.size - i] = \
+            cost[1, :train_b.size - i] + k[i, i:]
+        accumulated_min = np.minimum(
+            accumulated_min,  # shift
+            cost[:, 1:max_dim - i])  # insert
+        acc_dim = accumulated_min.shape[1]
+        # delete vs min(insert, shift)
+        accumulated_min[:, 0] = min(cost[1, 1], accumulated_min[0, 0])
+        # determine G[i, :] and G[:, i] by propagating minima.
+        accumulated_min += decreasing_sequence[:, -acc_dim - 1:-1]
+        accumulated_min = np.minimum.accumulate(accumulated_min, axis=1)
+        cost[:, :acc_dim] = accumulated_min - decreasing_sequence[:, -acc_dim:]
+    return cost[0, -min_dim - 1]
+
+
+def _victor_purpura_dist_for_st_pair_intuitive(
+                                             train_a, train_b, q=1.0 * pq.Hz):
+    """
+    Function to calculate the Victor-Purpura distance between two spike trains
+    described in *J. D. Victor and K. P. Purpura, Nature and precision of
+    temporal coding in visual cortex: a metric-space analysis,
+    J Neurophysiol,76(2):1310-1326, 1996*
+
+    This function originates from the spikes-module in the signals-folder
+    of the software package Neurotools. It represents the 'intuitive'
+    implementation of the Victor-Purpura distance. With respect to calculation
+    time at the moment this code is uncompetitive with the code implemented in
+    the function _victor_purpura_dist_for_st_pair_fast. However, it is
+    expected that the discrepancy in calculation time of the 2 algorithms
+    decreases drastically if the temporary valid calculation speed difference
+    of plain Python and Numpy routines would be removed when languages like
+    cython could take over. The decision then has to be made between an
+    intuitive and probably slightly slower algorithm versus a correct but
+    strange optimal solution of an optimization problem under boundary
+    conditions, where the boundary conditions would finally have been removed.
+    Hence also this algoritm is kept here.
+
+    Parameters
+    ----------
+    train_a, train_b : :class:`neo.core.SpikeTrain` objects of
+        which the Victor-Purpura distance will be calculated pairwise.
+    q : Quantity scalar of rate dimension
+        The cost parameter.
+        Default: 1.0 * pq.Hz
+
+    Returns
+    -------
+        float
+        The Victor-Purpura distance of train_a and train_b
+    """
+    nspk_a = len(train_a)
+    nspk_b = len(train_b)
+    scr = np.zeros((nspk_a+1, nspk_b+1))
+    scr[:, 0] = xrange(0, nspk_a+1)
+    scr[0, :] = xrange(0, nspk_b+1)
+
+    if nspk_a > 0 and nspk_b > 0:
+        for i in xrange(1, nspk_a+1):
+            for j in xrange(1, nspk_b+1):
+                scr[i, j] = min(scr[i-1, j]+1, scr[i, j-1]+1)
+                scr[i, j] = min(scr[i, j], scr[i-1, j-1] + np.float64((
+                               q*abs(train_a[i-1]-train_b[j-1])).simplified))
+    return scr[nspk_a, nspk_b]
+
+
+def van_rossum_dist(trains, tau=1.0 * pq.s, sort=True):
+    """
+    Calculates the van Rossum distance.
+
+    It is defined as Euclidean distance of the spike trains convolved with a
+    causal decaying exponential smoothing filter. A detailed description can
+    be found in *Rossum, M. C. W. (2001). A novel spike distance. Neural
+    Computation, 13(4), 751-763.* This implementation is normalized to yield
+    a distance of 1.0 for the distance between an empty spike train and a
+    spike train with a single spike. Divide the result by sqrt(2.0) to get
+    the normalization used in the cited paper.
+
+    Given :math:`N` spike trains with :math:`n` spikes on average the run-time
+    complexity of this function is :math:`O(N^2 n)`.
+
+    Parameters
+    ----------
+    trains : Sequence of :class:`neo.core.SpikeTrain` objects of
+        which the van Rossum distance will be calculated pairwise.
+    tau : Quantity scalar
+        Decay rate of the exponential function as time scalar. Controls for
+        which time scale the metric will be sensitive. This parameter will
+        be ignored if `kernel` is not `None`. May also be :const:`scipy.inf`
+        which will lead to only measuring differences in spike count.
+        Default: 1.0 * pq.s
+    sort : bool
+        Spike trains with sorted spike times might be needed for the
+        calculation. You can set `sort` to `False` if you know that your
+        spike trains are already sorted to decrease calculation time.
+        Default: True
+
+    Returns
+    -------
+        2-D array
+        Matrix containing the van Rossum distances for all pairs of
+        spike trains.
+
+    Example
+    -------
+        import elephant.spike_train_dissimilarity_measures as stdm
+        tau = 10.0 * pq.ms
+        st_a = SpikeTrain([10, 20, 30], units='ms', t_stop= 1000.0)
+        st_b = SpikeTrain([12, 24, 30], units='ms', t_stop= 1000.0)
+        vr   = stdm.van_rossum_dist([st_a, st_b], tau)[0, 1]
+    """
+    for train in trains:
+        if not (isinstance(train, (pq.quantity.Quantity, SpikeTrain)) and
+                train.dimensionality.simplified ==
+                pq.Quantity(1, "s").dimensionality.simplified):
+            raise TypeError("Spike trains must have a time unit.")
+
+    if not (isinstance(tau, pq.quantity.Quantity) and
+            tau.dimensionality.simplified ==
+            pq.Quantity(1, "s").dimensionality.simplified):
+        raise TypeError("tau must be a time quantity.")
+
+    if tau == 0:
+        spike_counts = [st.size for st in trains]
+        return np.sqrt(spike_counts + np.atleast_2d(spike_counts).T)
+    elif tau == np.inf:
+        spike_counts = [st.size for st in trains]
+        return np.absolute(spike_counts - np.atleast_2d(spike_counts).T)
+
+    k_dist = _summed_dist_matrix(
+        [st.view(type=pq.Quantity) for st in trains], tau, not sort)
+    vr_dist = np.empty_like(k_dist)
+    for i, j in np.ndindex(k_dist.shape):
+        vr_dist[i, j] = (
+            k_dist[i, i] + k_dist[j, j] - k_dist[i, j] - k_dist[j, i])
+    return sp.sqrt(vr_dist)
+
+
+def _summed_dist_matrix(spiketrains, tau, presorted=False):
+    # The algorithm underlying this implementation is described in
+    # Houghton, C., & Kreuz, T. (2012). On the efficient calculation of van
+    # Rossum distances. Network: Computation in Neural Systems, 23(1-2),
+    # 48-58. We would like to remark that in this paper in formula (9) the
+    # left side of the equation should be divided by two.
+    #
+    # Given N spiketrains with n entries on average the run-time complexity is
+    # O(N^2 * n). O(N^2 + N * n) memory will be needed.
+
+    if len(spiketrains) <= 0:
+        return np.zeros((0, 0))
+
+    if not presorted:
+        spiketrains = [v.copy() for v in spiketrains]
+        for v in spiketrains:
+            v.sort()
+
+    sizes = np.asarray([v.size for v in spiketrains])
+    values = np.empty((len(spiketrains), max(1, sizes.max())))
+    values.fill(np.nan)
+    for i, v in enumerate(spiketrains):
+        if v.size > 0:
+            values[i, :v.size] = \
+                (v / tau * pq.dimensionless).simplified
+
+    exp_diffs = np.exp(values[:, :-1] - values[:, 1:])
+    markage = np.zeros(values.shape)
+    for u in xrange(len(spiketrains)):
+        markage[u, 0] = 0
+        for i in xrange(sizes[u] - 1):
+            markage[u, i + 1] = (markage[u, i] + 1.0) * exp_diffs[u, i]
+
+    # Same spiketrain terms
+    D = np.empty((len(spiketrains), len(spiketrains)))
+    D[np.diag_indices_from(D)] = sizes + 2.0 * np.sum(markage, axis=1)
+
+    # Cross spiketrain terms
+    for u in xrange(D.shape[0]):
+        all_ks = np.searchsorted(values[u], values, 'left') - 1
+        for v in xrange(u):
+            js = np.searchsorted(values[v], values[u], 'right') - 1
+            ks = all_ks[v]
+            slice_j = np.s_[np.searchsorted(js, 0):sizes[u]]
+            slice_k = np.s_[np.searchsorted(ks, 0):sizes[v]]
+            D[u, v] = np.sum(
+                np.exp(values[v][js[slice_j]] - values[u][slice_j]) *
+                (1.0 + markage[v][js[slice_j]]))
+            D[u, v] += np.sum(
+                np.exp(values[u][ks[slice_k]] - values[v][slice_k]) *
+                (1.0 + markage[u][ks[slice_k]]))
+            D[v, u] = D[u, v]
+
+    return D

+ 970 - 0
code/elephant/elephant/spike_train_generation.py

@@ -0,0 +1,970 @@
+# -*- coding: utf-8 -*-
+"""
+Functions to generate spike trains from analog signals,
+or to generate random spike trains.
+
+Some functions are based on the NeuroTools stgen module, which was mostly
+written by Eilif Muller, or from the NeuroTools signals.analogs module.
+
+:copyright: Copyright 2015 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+from __future__ import division
+import numpy as np
+from quantities import ms, mV, Hz, Quantity, dimensionless
+from neo import SpikeTrain
+import random
+from elephant.spike_train_surrogates import dither_spike_train
+import warnings
+
+
+def spike_extraction(signal, threshold=0.0 * mV, sign='above',
+                     time_stamps=None, extr_interval=(-2 * ms, 4 * ms)):
+    """
+    Return the peak times for all events that cross threshold and the
+    waveforms. Usually used for extracting spikes from a membrane
+    potential to calculate waveform properties.
+    Similar to spike_train_generation.peak_detection.
+
+    Parameters
+    ----------
+    signal : neo AnalogSignal object
+        'signal' is an analog signal.
+    threshold : A quantity, e.g. in mV
+        'threshold' contains a value that must be reached for an event
+        to be detected. Default: 0.0 * mV.
+    sign : 'above' or 'below'
+        'sign' determines whether to count thresholding crossings
+        that cross above or below the threshold. Default: 'above'.
+    time_stamps: None, quantity array or Object with .times interface
+        if 'spike_train' is a quantity array or exposes a quantity array
+        exposes the .times interface, it provides the time_stamps
+        around which the waveform is extracted. If it is None, the
+        function peak_detection is used to calculate the time_stamps
+        from signal. Default: None.
+    extr_interval: unpackable time quantities, len == 2
+        'extr_interval' specifies the time interval around the
+        time_stamps where the waveform is extracted. The default is an
+        interval of '6 ms'. Default: (-2 * ms, 4 * ms).
+
+    Returns
+    -------
+    result_st : neo SpikeTrain object
+        'result_st' contains the time_stamps of each of the spikes and
+        the waveforms in result_st.waveforms.
+    """
+    # Get spike time_stamps
+    if time_stamps is None:
+        time_stamps = peak_detection(signal, threshold, sign=sign)
+    elif hasattr(time_stamps, 'times'):
+        time_stamps = time_stamps.times
+    elif type(time_stamps) is Quantity:
+        raise TypeError("time_stamps must be None, a quantity array or" +
+                        " expose the.times interface")
+
+    if len(time_stamps) == 0:
+        return SpikeTrain(time_stamps, units=signal.times.units,
+                          t_start=signal.t_start, t_stop=signal.t_stop,
+                          waveforms=np.array([]),
+                          sampling_rate=signal.sampling_rate)
+
+    # Unpack the extraction interval from tuple or array
+    extr_left, extr_right = extr_interval
+    if extr_left > extr_right:
+        raise ValueError("extr_interval[0] must be < extr_interval[1]")
+
+    if any(np.diff(time_stamps) < extr_interval[1]):
+        warnings.warn("Waveforms overlap.", UserWarning)
+
+    data_left = ((extr_left * signal.sampling_rate).simplified).magnitude
+
+    data_right = ((extr_right * signal.sampling_rate).simplified).magnitude
+
+    data_stamps = (((time_stamps - signal.t_start) *
+                    signal.sampling_rate).simplified).magnitude
+
+    data_stamps = data_stamps.astype(int)
+
+    borders_left = data_stamps + data_left
+
+    borders_right = data_stamps + data_right
+
+    borders = np.dstack((borders_left, borders_right)).flatten()
+
+    waveforms = np.array(
+                np.split(np.array(signal), borders.astype(int))[1::2]) * signal.units
+
+    # len(np.shape(waveforms)) == 1 if waveforms do not have the same width.
+    # this can occur when extr_interval indexes beyond the signal.
+    # Workaround: delete spikes shorter than the maximum length with
+    if len(np.shape(waveforms)) == 1:
+        max_len = (np.array([len(x) for x in waveforms])).max()
+        to_delete = np.array([idx for idx, x in enumerate(waveforms)
+                             if len(x) < max_len])
+        waveforms = np.delete(waveforms, to_delete, axis=0)
+        waveforms = np.array([x for x in waveforms])
+        warnings.warn("Waveforms " +
+                      ("{:d}, " * len(to_delete)).format(*to_delete) +
+                      "exceeded signal and had to be deleted. " +
+                      "Change extr_interval to keep.")
+
+    waveforms = waveforms[:, np.newaxis, :]
+
+    return SpikeTrain(time_stamps, units=signal.times.units,
+                      t_start=signal.t_start, t_stop=signal.t_stop,
+                      sampling_rate=signal.sampling_rate, waveforms=waveforms,
+                      left_sweep=extr_left)
+
+
+def threshold_detection(signal, threshold=0.0 * mV, sign='above'):
+    """
+    Returns the times when the analog signal crosses a threshold.
+    Usually used for extracting spike times from a membrane potential.
+    Adapted from version in NeuroTools.
+
+    Parameters
+    ----------
+    signal : neo AnalogSignal object
+        'signal' is an analog signal.
+    threshold : A quantity, e.g. in mV
+        'threshold' contains a value that must be reached
+        for an event to be detected. Default: 0.0 * mV.
+    sign : 'above' or 'below'
+        'sign' determines whether to count thresholding crossings
+        that cross above or below the threshold.
+    format : None or 'raw'
+        Whether to return as SpikeTrain (None)
+        or as a plain array of times ('raw').
+
+    Returns
+    -------
+    result_st : neo SpikeTrain object
+        'result_st' contains the spike times of each of the events (spikes)
+        extracted from the signal.
+    """
+
+    assert threshold is not None, "A threshold must be provided"
+
+    if sign is 'above':
+        cutout = np.where(signal > threshold)[0]
+    elif sign in 'below':
+        cutout = np.where(signal < threshold)[0]
+
+    if len(cutout) <= 0:
+        events = np.zeros(0)
+    else:
+        take = np.where(np.diff(cutout) > 1)[0] + 1
+        take = np.append(0, take)
+
+        time = signal.times
+        events = time[cutout][take]
+
+    events_base = events.base
+    if events_base is None:
+        # This occurs in some Python 3 builds due to some
+        # bug in quantities.
+        events_base = np.array([event.base for event in events])  # Workaround
+
+    result_st = SpikeTrain(events_base, units=signal.times.units,
+                           t_start=signal.t_start, t_stop=signal.t_stop)
+    return result_st
+
+
+def peak_detection(signal, threshold=0.0 * mV, sign='above', format=None):
+    """
+    Return the peak times for all events that cross threshold.
+    Usually used for extracting spike times from a membrane potential.
+    Similar to spike_train_generation.threshold_detection.
+
+    Parameters
+    ----------
+    signal : neo AnalogSignal object
+        'signal' is an analog signal.
+    threshold : A quantity, e.g. in mV
+        'threshold' contains a value that must be reached
+        for an event to be detected.
+    sign : 'above' or 'below'
+        'sign' determines whether to count thresholding crossings that
+        cross above or below the threshold. Default: 'above'.
+    format : None or 'raw'
+        Whether to return as SpikeTrain (None) or as a plain array
+        of times ('raw'). Default: None.
+
+    Returns
+    -------
+    result_st : neo SpikeTrain object
+        'result_st' contains the spike times of each of the events
+        (spikes) extracted from the signal.
+    """
+    assert threshold is not None, "A threshold must be provided"
+
+    if sign is 'above':
+        cutout = np.where(signal > threshold)[0]
+        peak_func = np.argmax
+    elif sign in 'below':
+        cutout = np.where(signal < threshold)[0]
+        peak_func = np.argmin
+    else:
+        raise ValueError("sign must be 'above' or 'below'")
+
+    if len(cutout) <= 0:
+        events_base = np.zeros(0)
+    else:
+        # Select thr crossings lasting at least 2 dtps, np.diff(cutout) > 2
+        # This avoids empty slices
+        border_start = np.where(np.diff(cutout) > 1)[0]
+        border_end = border_start + 1
+        borders = np.concatenate((border_start, border_end))
+        borders = np.append(0, borders)
+        borders = np.append(borders, len(cutout)-1)
+        borders = np.sort(borders)
+        true_borders = cutout[borders]
+        right_borders = true_borders[1::2] + 1
+        true_borders = np.sort(np.append(true_borders[0::2], right_borders))
+
+        # Workaround for bug that occurs when signal goes below thr for 1 dtp,
+        # Workaround eliminates empy slices from np. split
+        backward_mask = np.absolute(np.ediff1d(true_borders, to_begin=1)) > 0
+        forward_mask = np.absolute(np.ediff1d(true_borders[::-1],
+                                              to_begin=1)[::-1]) > 0
+        true_borders = true_borders[backward_mask * forward_mask]
+        split_signal = np.split(np.array(signal), true_borders)[1::2]
+
+        maxima_idc_split = np.array([peak_func(x) for x in split_signal])
+
+        max_idc = maxima_idc_split + true_borders[0::2]
+
+        events = signal.times[max_idc]
+        events_base = events.base
+
+    if events_base is None:
+        # This occurs in some Python 3 builds due to some
+        # bug in quantities.
+        events_base = np.array([event.base for event in events])  # Workaround
+    if format is None:
+        result_st = SpikeTrain(events_base, units=signal.times.units,
+                               t_start=signal.t_start, t_stop=signal.t_stop)
+    elif 'raw':
+        result_st = events_base
+    else:
+        raise ValueError("Format argument must be None or 'raw'")
+
+    return result_st
+
+
+def _homogeneous_process(interval_generator, args, mean_rate, t_start, t_stop,
+                         as_array):
+    """
+    Returns a spike train whose spikes are a realization of a random process
+    generated by the function `interval_generator` with the given rate,
+    starting at time `t_start` and stopping `time t_stop`.
+    """
+    def rescale(x):
+        return (x / mean_rate.units).rescale(t_stop.units)
+
+    n = int(((t_stop - t_start) * mean_rate).simplified)
+    number = np.ceil(n + 3 * np.sqrt(n))
+    if number < 100:
+        number = min(5 + np.ceil(2 * n), 100)
+    assert number > 4  # if positive, number cannot be less than 5
+    isi = rescale(interval_generator(*args, size=int(number)))
+    spikes = np.cumsum(isi)
+    spikes += t_start
+
+    i = spikes.searchsorted(t_stop)
+    if i == len(spikes):
+        # ISI buffer overrun
+        extra_spikes = []
+        t_last = spikes[-1] + rescale(interval_generator(*args, size=1))[0]
+        while t_last < t_stop:
+            extra_spikes.append(t_last)
+            t_last = t_last + rescale(interval_generator(*args, size=1))[0]
+        # np.concatenate does not conserve units
+        spikes = Quantity(
+            np.concatenate(
+                (spikes, extra_spikes)).magnitude, units=spikes.units)
+    else:
+        spikes = spikes[:i]
+
+    if as_array:
+        spikes = spikes.magnitude
+    else:
+        spikes = SpikeTrain(
+            spikes, t_start=t_start, t_stop=t_stop, units=spikes.units)
+
+    return spikes
+
+
+def homogeneous_poisson_process(rate, t_start=0.0 * ms, t_stop=1000.0 * ms,
+                                as_array=False):
+    """
+    Returns a spike train whose spikes are a realization of a Poisson process
+    with the given rate, starting at time `t_start` and stopping time `t_stop`.
+
+    All numerical values should be given as Quantities, e.g. 100*Hz.
+
+    Parameters
+    ----------
+
+    rate : Quantity scalar with dimension 1/time
+           The rate of the discharge.
+    t_start : Quantity scalar with dimension time
+              The beginning of the spike train.
+    t_stop : Quantity scalar with dimension time
+             The end of the spike train.
+    as_array : bool
+               If True, a NumPy array of sorted spikes is returned,
+               rather than a SpikeTrain object.
+
+    Raises
+    ------
+    ValueError : If `t_start` and `t_stop` are not of type `pq.Quantity`.
+
+    Examples
+    --------
+        >>> from quantities import Hz, ms
+        >>> spikes = homogeneous_poisson_process(50*Hz, 0*ms, 1000*ms)
+        >>> spikes = homogeneous_poisson_process(
+            20*Hz, 5000*ms, 10000*ms, as_array=True)
+
+    """
+    if not isinstance(t_start, Quantity) or not isinstance(t_stop, Quantity):
+        raise ValueError("t_start and t_stop must be of type pq.Quantity")
+    rate = rate.rescale((1 / t_start).units)
+    mean_interval = 1 / rate.magnitude
+    return _homogeneous_process(
+        np.random.exponential, (mean_interval,), rate, t_start, t_stop,
+        as_array)
+
+
+def homogeneous_gamma_process(a, b, t_start=0.0 * ms, t_stop=1000.0 * ms,
+                              as_array=False):
+    """
+    Returns a spike train whose spikes are a realization of a gamma process
+    with the given parameters, starting at time `t_start` and stopping time
+    `t_stop` (average rate will be b/a).
+
+    All numerical values should be given as Quantities, e.g. 100*Hz.
+
+    Parameters
+    ----------
+
+    a : int or float
+        The shape parameter of the gamma distribution.
+    b : Quantity scalar with dimension 1/time
+        The rate parameter of the gamma distribution.
+    t_start : Quantity scalar with dimension time
+              The beginning of the spike train.
+    t_stop : Quantity scalar with dimension time
+             The end of the spike train.
+    as_array : bool
+               If True, a NumPy array of sorted spikes is returned,
+               rather than a SpikeTrain object.
+
+    Raises
+    ------
+    ValueError : If `t_start` and `t_stop` are not of type `pq.Quantity`.
+
+    Examples
+    --------
+        >>> from quantities import Hz, ms
+        >>> spikes = homogeneous_gamma_process(2.0, 50*Hz, 0*ms, 1000*ms)
+        >>> spikes = homogeneous_gamma_process(
+                5.0, 20*Hz, 5000*ms, 10000*ms, as_array=True)
+
+    """
+    if not isinstance(t_start, Quantity) or not isinstance(t_stop, Quantity):
+        raise ValueError("t_start and t_stop must be of type pq.Quantity")
+    b = b.rescale((1 / t_start).units).simplified
+    rate = b / a
+    k, theta = a, (1 / b.magnitude)
+    return _homogeneous_process(np.random.gamma, (k, theta), rate, t_start, t_stop, as_array)
+
+
+def _n_poisson(rate, t_stop, t_start=0.0 * ms, n=1):
+    """
+    Generates one or more independent Poisson spike trains.
+
+    Parameters
+    ----------
+    rate : Quantity or Quantity array
+        Expected firing rate (frequency) of each output SpikeTrain.
+        Can be one of:
+        *  a single Quantity value: expected firing rate of each output
+           SpikeTrain
+        *  a Quantity array: rate[i] is the expected firing rate of the i-th
+           output SpikeTrain
+    t_stop : Quantity
+        Single common stop time of each output SpikeTrain. Must be > t_start.
+    t_start : Quantity (optional)
+        Single common start time of each output SpikeTrain. Must be < t_stop.
+        Default: 0 s.
+    n: int (optional)
+        If rate is a single Quantity value, n specifies the number of
+        SpikeTrains to be generated. If rate is an array, n is ignored and the
+        number of SpikeTrains is equal to len(rate).
+        Default: 1
+
+
+    Returns
+    -------
+    list of neo.SpikeTrain
+        Each SpikeTrain contains one of the independent Poisson spike trains,
+        either n SpikeTrains of the same rate, or len(rate) SpikeTrains with
+        varying rates according to the rate parameter. The time unit of the
+        SpikeTrains is given by t_stop.
+    """
+    # Check that the provided input is Hertz of return error
+    try:
+        for r in rate.reshape(-1, 1):
+            r.rescale('Hz')
+    except AttributeError:
+        raise ValueError('rate argument must have rate unit (1/time)')
+
+    # Check t_start < t_stop and create their strip dimensions
+    if not t_start < t_stop:
+        raise ValueError(
+            't_start (=%s) must be < t_stop (=%s)' % (t_start, t_stop))
+
+    # Set number n of output spike trains (specified or set to len(rate))
+    if not (type(n) == int and n > 0):
+        raise ValueError('n (=%s) must be a positive integer' % str(n))
+    rate_dl = rate.simplified.magnitude.flatten()
+
+    # Check rate input parameter
+    if len(rate_dl) == 1:
+        if rate_dl < 0:
+            raise ValueError('rate (=%s) must be non-negative.' % rate)
+        rates = np.array([rate_dl] * n)
+    else:
+        rates = rate_dl.flatten()
+        if any(rates < 0):
+            raise ValueError('rate must have non-negative elements.')
+    sts = []
+    for r in rates:
+        sts.append(homogeneous_poisson_process(r * Hz, t_start, t_stop))
+    return sts
+
+
+def single_interaction_process(
+        rate, rate_c, t_stop, n=2, jitter=0 * ms, coincidences='deterministic',
+        t_start=0 * ms, min_delay=0 * ms, return_coinc=False):
+    """
+    Generates a multidimensional Poisson SIP (single interaction process)
+    plus independent Poisson processes
+
+    A Poisson SIP consists of Poisson time series which are independent
+    except for simultaneous events in all of them. This routine generates
+    a SIP plus additional parallel independent Poisson processes.
+
+    See [1].
+
+    Parameters
+    -----------
+    t_stop: quantities.Quantity
+        Total time of the simulated processes. The events are drawn between
+        0 and `t_stop`.
+    rate: quantities.Quantity
+        Overall mean rate of the time series to be generated (coincidence
+        rate `rate_c` is subtracted to determine the background rate). Can be:
+        * a float, representing the overall mean rate of each process. If
+          so, it must be higher than `rate_c`.
+        * an iterable of floats (one float per process), each float
+          representing the overall mean rate of a process. If so, all the
+          entries must be larger than `rate_c`.
+    rate_c: quantities.Quantity
+        Coincidence rate (rate of coincidences for the n-dimensional SIP).
+        The SIP spike trains will have coincident events with rate `rate_c`
+        plus independent 'background' events with rate `rate-rate_c`.
+    n: int, optional
+        If `rate` is a single Quantity value, `n` specifies the number of
+        SpikeTrains to be generated. If rate is an array, `n` is ignored and
+        the number of SpikeTrains is equal to `len(rate)`.
+        Default: 1
+    jitter: quantities.Quantity, optional
+        Jitter for the coincident events. If `jitter == 0`, the events of all
+        n correlated processes are exactly coincident. Otherwise, they are
+        jittered around a common time randomly, up to +/- `jitter`.
+    coincidences: string, optional
+        Whether the total number of injected coincidences must be determin-
+        istic (i.e. rate_c is the actual rate with which coincidences are
+        generated) or stochastic (i.e. rate_c is the mean rate of coincid-
+        ences):
+        * 'deterministic': deterministic rate
+        * 'stochastic': stochastic rate
+        Default: 'deterministic'
+    t_start: quantities.Quantity, optional
+        Starting time of the series. If specified, it must be lower than
+        t_stop
+        Default: 0 * ms
+    min_delay: quantities.Quantity, optional
+        Minimum delay between consecutive coincidence times.
+        Default: 0 * ms
+    return_coinc: bool, optional
+        Whether to return the coincidence times for the SIP process
+        Default: False
+
+
+    Returns
+    --------
+    output: list
+        Realization of a SIP consisting of n Poisson processes characterized
+        by synchronous events (with the given jitter)
+        If `return_coinc` is `True`, the coincidence times are returned as a
+        second output argument. They also have an associated time unit (same
+        as `t_stop`).
+
+    References
+    ----------
+    [1] Kuhn, Aertsen, Rotter (2003) Neural Comput 15(1):67-101
+
+    EXAMPLE:
+
+    >>> import quantities as qt
+    >>> import jelephant.core.stocmod as sm
+    >>> sip, coinc = sm.sip_poisson(n=10, n=0, t_stop=1*qt.sec, \
+            rate=20*qt.Hz,  rate_c=4, return_coinc = True)
+
+    *************************************************************************
+    """
+
+    # Check if n is a positive integer
+    if not (isinstance(n, int) and n > 0):
+        raise ValueError('n (=%s) must be a positive integer' % str(n))
+
+    # Assign time unit to jitter, or check that its existing unit is a time
+    # unit
+    jitter = abs(jitter)
+
+    # Define the array of rates from input argument rate. Check that its length
+    # matches with n
+    if rate.ndim == 0:
+        if rate < 0 * Hz:
+            raise ValueError(
+                'rate (=%s) must be non-negative.' % str(rate))
+        rates_b = np.array(
+            [rate.magnitude for _ in range(n)]) * rate.units
+    else:
+        rates_b = np.array(rate).flatten() * rate.units
+        if not all(rates_b >= 0. * Hz):
+            raise ValueError('*rate* must have non-negative elements')
+
+    # Check: rate>=rate_c
+    if np.any(rates_b < rate_c):
+        raise ValueError('all elements of *rate* must be >= *rate_c*')
+
+    # Check min_delay < 1./rate_c
+    if not (rate_c == 0 * Hz or min_delay < 1. / rate_c):
+        raise ValueError(
+            "'*min_delay* (%s) must be lower than 1/*rate_c* (%s)." %
+            (str(min_delay), str((1. / rate_c).rescale(min_delay.units))))
+
+    # Generate the n Poisson processes there are the basis for the SIP
+    # (coincidences still lacking)
+    embedded_poisson_trains = _n_poisson(
+        rate=rates_b - rate_c, t_stop=t_stop, t_start=t_start)
+    # Convert the trains from neo SpikeTrain objects to simpler Quantity
+    # objects
+    embedded_poisson_trains = [
+        emb.view(Quantity) for emb in embedded_poisson_trains]
+
+    # Generate the array of times for coincident events in SIP, not closer than
+    # min_delay. The array is generated as a quantity from the Quantity class
+    # in the quantities module
+    if coincidences == 'deterministic':
+        Nr_coinc = int(((t_stop - t_start) * rate_c).rescale(dimensionless))
+        while True:
+            coinc_times = t_start + \
+                np.sort(np.random.random(Nr_coinc)) * (t_stop - t_start)
+            if len(coinc_times) < 2 or min(np.diff(coinc_times)) >= min_delay:
+                break
+    elif coincidences == 'stochastic':
+        while True:
+            coinc_times = homogeneous_poisson_process(
+                rate=rate_c, t_stop=t_stop, t_start=t_start)
+            if len(coinc_times) < 2 or min(np.diff(coinc_times)) >= min_delay:
+                break
+        # Convert coinc_times from a neo SpikeTrain object to a Quantity object
+        # pq.Quantity(coinc_times.base)*coinc_times.units
+        coinc_times = coinc_times.view(Quantity)
+        # Set the coincidence times to T-jitter if larger. This ensures that
+        # the last jittered spike time is <T
+        for i in range(len(coinc_times)):
+            if coinc_times[i] > t_stop - jitter:
+                coinc_times[i] = t_stop - jitter
+
+    # Replicate coinc_times n times, and jitter each event in each array by
+    # +/- jitter (within (t_start, t_stop))
+    embedded_coinc = coinc_times + \
+        np.random.random(
+            (len(rates_b), len(coinc_times))) * 2 * jitter - jitter
+    embedded_coinc = embedded_coinc + \
+        (t_start - embedded_coinc) * (embedded_coinc < t_start) - \
+        (t_stop - embedded_coinc) * (embedded_coinc > t_stop)
+
+    # Inject coincident events into the n SIP processes generated above, and
+    # merge with the n independent processes
+    sip_process = [
+        np.sort(np.concatenate((
+            embedded_poisson_trains[m].rescale(t_stop.units),
+            embedded_coinc[m].rescale(t_stop.units))) * t_stop.units)
+        for m in range(len(rates_b))]
+
+    # Convert back sip_process and coinc_times from Quantity objects to
+    # neo.SpikeTrain objects
+    sip_process = [
+        SpikeTrain(t, t_start=t_start, t_stop=t_stop).rescale(t_stop.units)
+        for t in sip_process]
+    coinc_times = [
+        SpikeTrain(t, t_start=t_start, t_stop=t_stop).rescale(t_stop.units)
+        for t in embedded_coinc]
+
+    # Return the processes in the specified output_format
+    if not return_coinc:
+        output = sip_process
+    else:
+        output = sip_process, coinc_times
+
+    return output
+
+
+def _pool_two_spiketrains(a, b, extremes='inner'):
+    """
+    Pool the spikes of two spike trains a and b into a unique spike train.
+
+    Parameters
+    ----------
+    a, b : neo.SpikeTrains
+        Spike trains to be pooled
+
+    extremes: str, optional
+        Only spikes of a and b in the specified extremes are considered.
+        * 'inner': pool all spikes from max(a.tstart_ b.t_start) to
+           min(a.t_stop, b.t_stop)
+        * 'outer': pool all spikes from min(a.tstart_ b.t_start) to
+           max(a.t_stop, b.t_stop)
+        Default: 'inner'
+
+    Output
+    ------
+    neo.SpikeTrain containing all spikes in a and b falling in the
+    specified extremes
+    """
+
+    unit = a.units
+    times_a_dimless = list(a.view(Quantity).magnitude)
+    times_b_dimless = list(b.rescale(unit).view(Quantity).magnitude)
+    times = (times_a_dimless + times_b_dimless) * unit
+
+    if extremes == 'outer':
+        t_start = min(a.t_start, b.t_start)
+        t_stop = max(a.t_stop, b.t_stop)
+    elif extremes == 'inner':
+        t_start = max(a.t_start, b.t_start)
+        t_stop = min(a.t_stop, b.t_stop)
+        times = times[times > t_start]
+        times = times[times < t_stop]
+
+    else:
+        raise ValueError(
+            'extremes (%s) can only be "inner" or "outer"' % extremes)
+    pooled_train = SpikeTrain(
+        times=sorted(times.magnitude), units=unit, t_start=t_start,
+        t_stop=t_stop)
+    return pooled_train
+
+
+def _pool_spiketrains(trains, extremes='inner'):
+    """
+    Pool spikes from any number of spike trains into a unique spike train.
+
+    Parameters
+    ----------
+    trains: list
+        list of spike trains to merge
+
+    extremes: str, optional
+        Only spikes of a and b in the specified extremes are considered.
+        * 'inner': pool all spikes from min(a.t_start b.t_start) to
+           max(a.t_stop, b.t_stop)
+        * 'outer': pool all spikes from max(a.tstart_ b.t_start) to
+           min(a.t_stop, b.t_stop)
+        Default: 'inner'
+
+    Output
+    ------
+    neo.SpikeTrain containing all spikes in trains falling in the
+    specified extremes
+    """
+
+    merge_trains = trains[0]
+    for t in trains[1:]:
+        merge_trains = _pool_two_spiketrains(
+            merge_trains, t, extremes=extremes)
+    t_start, t_stop = merge_trains.t_start, merge_trains.t_stop
+    merge_trains = sorted(merge_trains)
+    merge_trains = np.squeeze(merge_trains)
+    merge_trains = SpikeTrain(
+        merge_trains, t_stop=t_stop, t_start=t_start, units=trains[0].units)
+    return merge_trains
+
+
+def _sample_int_from_pdf(a, n):
+    """
+    Draw n independent samples from the set {0,1,...,L}, where L=len(a)-1,
+    according to the probability distribution a.
+    a[j] is the probability to sample j, for each j from 0 to L.
+
+
+    Parameters
+    -----
+    a: numpy.array
+        Probability vector (i..e array of sum 1) that at each entry j carries
+        the probability to sample j (j=0,1,...,len(a)-1).
+
+    n: int
+        Number of samples generated with the function
+
+    Output
+    -------
+    array of n samples taking values between 0 and n=len(a)-1.
+    """
+
+    A = np.cumsum(a)  # cumulative distribution of a
+    u = np.random.uniform(0, 1, size=n)
+    U = np.array([u for i in a]).T  # copy u (as column vector) len(a) times
+    return (A < U).sum(axis=1)
+
+
+def _mother_proc_cpp_stat(A, t_stop, rate, t_start=0 * ms):
+    """
+    Generate the hidden ("mother") Poisson process for a Compound Poisson
+    Process (CPP).
+
+
+    Parameters
+    ----------
+    A : numpy.array
+        Amplitude distribution. A[j] represents the probability of a
+        synchronous event of size j.
+        The sum over all entries of a must be equal to one.
+    t_stop : quantities.Quantity
+        The stopping time of the mother process
+    rate : quantities.Quantity
+        Homogeneous rate of the n spike trains that will be genereted by the
+        CPP function
+    t_start : quantities.Quantity, optional
+        The starting time of the mother process
+        Default: 0 ms
+
+    Output
+    ------
+    Poisson spike train representing the mother process generating the CPP
+    """
+    N = len(A) - 1
+    exp_A = np.dot(A, range(N + 1))  # expected value of a
+    exp_mother = (N * rate) / float(exp_A)  # rate of the mother process
+    return homogeneous_poisson_process(
+        rate=exp_mother, t_stop=t_stop, t_start=t_start)
+
+
+def _cpp_hom_stat(A, t_stop, rate, t_start=0 * ms):
+    """
+    Generate a Compound Poisson Process (CPP) with amplitude distribution
+    A and heterogeneous firing rates r=r[0], r[1], ..., r[-1].
+
+    Parameters
+    ----------
+    A : numpy.ndarray
+        Amplitude distribution. A[j] represents the probability of a
+        synchronous event of size j.
+        The sum over all entries of A must be equal to one.
+    t_stop : quantities.Quantity
+        The end time of the output spike trains
+    rate : quantities.Quantity
+        Average rate of each spike train generated
+    t_start : quantities.Quantity, optional
+        The start time of the output spike trains
+        Default: 0 ms
+
+    Output
+    ------
+    List of n neo.SpikeTrains, having average firing rate r and correlated
+    such to form a CPP with amplitude distribution a
+    """
+
+    # Generate mother process and associated spike labels
+    mother = _mother_proc_cpp_stat(
+        A=A, t_stop=t_stop, rate=rate, t_start=t_start)
+    labels = _sample_int_from_pdf(A, len(mother))
+
+    N = len(A) - 1  # Number of trains in output
+
+    try:  # Faster but more memory-consuming approach
+        M = len(mother)  # number of spikes in the mother process
+        spike_matrix = np.zeros((N, M), dtype=bool)
+        # for each spike, take its label l
+        for spike_id, l in enumerate(labels):
+            # choose l random trains
+            train_ids = random.sample(range(N), l)
+            # and set the spike matrix for that train
+            for train_id in train_ids:
+                spike_matrix[train_id, spike_id] = True  # and spike to True
+
+        times = [[] for i in range(N)]
+        for train_id, row in enumerate(spike_matrix):
+            times[train_id] = mother[row].view(Quantity)
+
+    except MemoryError:  # Slower (~2x) but less memory-consuming approach
+        print('memory case')
+        times = [[] for i in range(N)]
+        for t, l in zip(mother, labels):
+            train_ids = random.sample(range(N), l)
+            for train_id in train_ids:
+                times[train_id].append(t)
+
+    trains = [SpikeTrain(
+        times=t, t_start=t_start, t_stop=t_stop) for t in times]
+
+    return trains
+
+
+def _cpp_het_stat(A, t_stop, rate, t_start=0. * ms):
+    """
+    Generate a Compound Poisson Process (CPP) with amplitude distribution
+    A and heterogeneous firing rates r=r[0], r[1], ..., r[-1].
+
+    Parameters
+    ----------
+    A : array
+        CPP's amplitude distribution. A[j] represents the probability of
+        a synchronous event of size j among the generated spike trains.
+        The sum over all entries of A must be equal to one.
+    t_stop : Quantity (time)
+        The end time of the output spike trains
+    rate : Quantity (1/time)
+        Average rate of each spike train generated
+    t_start : quantities.Quantity, optional
+        The start time of the output spike trains
+        Default: 0 ms
+
+    Output
+    ------
+    List of neo.SpikeTrains with different firing rates, forming
+    a CPP with amplitude distribution A
+    """
+
+    # Computation of Parameters of the two CPPs that will be merged
+    # (uncorrelated with heterog. rates + correlated with homog. rates)
+    N = len(rate)  # number of output spike trains
+    A_exp = np.dot(A, range(N + 1))  # expectation of A
+    r_sum = np.sum(rate)  # sum of all output firing rates
+    r_min = np.min(rate)  # minimum of the firing rates
+    r1 = r_sum - N * r_min  # rate of the uncorrelated CPP
+    r2 = r_sum / float(A_exp) - r1  # rate of the correlated CPP
+    r_mother = r1 + r2  # rate of the hidden mother process
+
+    # Check the analytical constraint for the amplitude distribution
+    if A[1] < (r1 / r_mother).rescale(dimensionless).magnitude:
+        raise ValueError('A[1] too small / A[i], i>1 too high')
+
+    # Compute the amplitude distrib of the correlated CPP, and generate it
+    a = [(r_mother * i) / float(r2) for i in A]
+    a[1] = a[1] - r1 / float(r2)
+    CPP = _cpp_hom_stat(a, t_stop, r_min, t_start)
+
+    # Generate the independent heterogeneous Poisson processes
+    POISS = [
+        homogeneous_poisson_process(i - r_min, t_start, t_stop) for i in rate]
+
+    # Pool the correlated CPP and the corresponding Poisson processes
+    out = [_pool_two_spiketrains(CPP[i], POISS[i]) for i in range(N)]
+    return out
+
+
+def compound_poisson_process(rate, A, t_stop, shift=None, t_start=0 * ms):
+    """
+    Generate a Compound Poisson Process (CPP; see [1]) with a given amplitude
+    distribution A and stationary marginal rates r.
+
+    The CPP process is a model for parallel, correlated processes with Poisson
+    spiking statistics at pre-defined firing rates. It is composed of len(A)-1
+    spike trains with a correlation structure determined by the amplitude
+    distribution A: A[j] is the probability that a spike occurs synchronously
+    in any j spike trains.
+
+    The CPP is generated by creating a hidden mother Poisson process, and then
+    copying spikes of the mother process to j of the output spike trains with
+    probability A[j].
+
+    Note that this function decorrelates the firing rate of each SpikeTrain
+    from the probability for that SpikeTrain to participate in a synchronous
+    event (which is uniform across SpikeTrains).
+
+    Parameters
+    ----------
+    rate : quantities.Quantity
+        Average rate of each spike train generated. Can be:
+          - a single value, all spike trains will have same rate rate
+          - an array of values (of length len(A)-1), each indicating the
+            firing rate of one process in output
+    A : array
+        CPP's amplitude distribution. A[j] represents the probability of
+        a synchronous event of size j among the generated spike trains.
+        The sum over all entries of A must be equal to one.
+    t_stop : quantities.Quantity
+        The end time of the output spike trains.
+    shift : None or quantities.Quantity, optional
+        If None, the injected synchrony is exact. If shift is a Quantity, all
+        the spike trains are shifted independently by a random amount in
+        the interval [-shift, +shift].
+        Default: None
+    t_start : quantities.Quantity, optional
+        The t_start time of the output spike trains.
+        Default: 0 s
+
+    Returns
+    -------
+    List of neo.SpikeTrains
+        SpikeTrains with specified firing rates forming the CPP with amplitude
+        distribution A.
+
+    References
+    ----------
+    [1] Staude, Rotter, Gruen (2010) J Comput Neurosci 29:327-350.
+    """
+    # Check A is a probability distribution (it sums to 1 and is positive)
+    if abs(sum(A) - 1) > np.finfo('float').eps:
+        raise ValueError(
+            'A must be a probability vector, sum(A)= %f !=1' % (sum(A)))
+    if any([a < 0 for a in A]):
+        raise ValueError(
+            'A must be a probability vector, all the elements of must be >0')
+    # Check that the rate is not an empty Quantity
+    if rate.ndim == 1 and len(rate.magnitude) == 0:
+        raise ValueError('Rate is an empty Quantity array')
+    # Return empty spike trains for specific parameters
+    elif A[0] == 1 or np.sum(np.abs(rate.magnitude)) == 0:
+        return [
+            SpikeTrain([] * t_stop.units, t_stop=t_stop,
+                       t_start=t_start) for i in range(len(A) - 1)]
+    else:
+        # Homogeneous rates
+        if rate.ndim == 0:
+            cpp = _cpp_hom_stat(A=A, t_stop=t_stop, rate=rate, t_start=t_start)
+        # Heterogeneous rates
+        else:
+            cpp = _cpp_het_stat(A=A, t_stop=t_stop, rate=rate, t_start=t_start)
+
+        if shift is None:
+            return cpp
+        # Dither the output spiketrains
+        else:
+            cpp = [
+                dither_spike_train(cp, shift=shift, edges=True)[0]
+                for cp in cpp]
+            return cpp
+
+# Alias for the compound poisson process
+cpp = compound_poisson_process

+ 523 - 0
code/elephant/elephant/spike_train_surrogates.py

@@ -0,0 +1,523 @@
+# -*- coding: utf-8 -*-
+"""
+Module to generate surrogates of a spike train by randomising its spike times
+in different ways (see [1]). Different methods destroy different features of
+the original data:
+
+* randomise_spikes:
+    randomly reposition all spikes inside the time interval (t_start, t_stop).
+    Keeps spike count, generates Poisson spike trains with time-stationary
+    firing rate
+* dither_spikes:
+    dither each spike time around original position by a random amount;
+    keeps spike count and firing rates computed on a slow temporal scale;
+    destroys ISIs, making them more exponentially distributed
+* dither_spike_train:
+    dither the whole input spike train (i.e. all spikes equally) by a random
+    amount; keeps spike count, ISIs, and firing rates computed on a slow
+    temporal scale
+* jitter_spikes:
+    discretise the full time interval (t_start, t_stop) into time segments
+    and locally randomise the spike times (see randomise_spikes) inside each
+    segment. Keeps spike count inside each segment and creates locally Poisson
+    spike trains with locally time-stationary rates
+* shuffle_isis:
+    shuffle the inter-spike intervals (ISIs) of the spike train randomly,
+    keeping the first spike time fixed and generating the others from the
+    new sequence of ISIs. Keeps spike count and ISIs, flattens the firing rate
+    profile
+
+[1] Louis et al (2010) Surrogate Spike Train Generation Through Dithering in
+    Operational Time. Front Comput Neurosci. 2010; 4: 127.
+
+Original implementation by: Emiliano Torre [e.torre@fz-juelich.de]
+:copyright: Copyright 2015-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import numpy as np
+import quantities as pq
+import neo
+try:
+    import elephant.statistics as es
+    isi = es.isi
+except ImportError:
+    from .statistics import isi  # Convenience when in elephant working dir.
+
+
+def dither_spikes(spiketrain, dither, n=1, decimals=None, edges=True):
+    """
+    Generates surrogates of a spike train by spike dithering.
+
+    The surrogates are obtained by uniformly dithering times around the
+    original position. The dithering is performed independently for each
+    surrogate.
+
+    The surrogates retain the :attr:`t_start` and :attr:`t_stop` of the
+    original `SpikeTrain` object. Spikes moved beyond this range are lost or
+    moved to the range's ends, depending on the parameter edge.
+
+
+    Parameters
+    ----------
+    spiketrain :  neo.SpikeTrain
+        The spike train from which to generate the surrogates
+    dither : quantities.Quantity
+        Amount of dithering. A spike at time t is placed randomly within
+        ]t-dither, t+dither[.
+    n : int (optional)
+        Number of surrogates to be generated.
+        Default: 1
+    decimals : int or None (optional)
+        Number of decimal points for every spike time in the surrogates
+        If None, machine precision is used.
+        Default: None
+    edges : bool (optional)
+        For surrogate spikes falling outside the range
+        `[spiketrain.t_start, spiketrain.t_stop)`, whether to drop them out
+        (for edges = True) or set that to the range's closest end
+        (for edges = False).
+        Default: True
+
+    Returns
+    -------
+    list of neo.SpikeTrain
+      A list of `neo.SpikeTrain`, each obtained from :attr:`spiketrain` by
+      randomly dithering its spikes. The range of the surrogate spike trains
+      is the same as :attr:`spiketrain`.
+
+    Examples
+    --------
+    >>> import quantities as pq
+    >>> import neo
+    >>>
+    >>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
+    >>> print dither_spikes(st, dither = 20*pq.ms)   # doctest: +SKIP
+    [<SpikeTrain(array([  96.53801903,  248.57047376,  601.48865767,
+     815.67209811]) * ms, [0.0 ms, 1000.0 ms])>]
+    >>> print dither_spikes(st, dither = 20*pq.ms, n=2)   # doctest: +SKIP
+    [<SpikeTrain(array([ 104.24942044,  246.0317873 ,  584.55938657,
+        818.84446913]) * ms, [0.0 ms, 1000.0 ms])>,
+     <SpikeTrain(array([ 111.36693058,  235.15750163,  618.87388515,
+        786.1807108 ]) * ms, [0.0 ms, 1000.0 ms])>]
+    >>> print dither_spikes(st, dither = 20*pq.ms, decimals=0)   # doctest: +SKIP
+    [<SpikeTrain(array([  81.,  242.,  595.,  799.]) * ms,
+        [0.0 ms, 1000.0 ms])>]
+    """
+
+    # Transform spiketrain into a Quantity object (needed for matrix algebra)
+    data = spiketrain.view(pq.Quantity)
+
+    # Main: generate the surrogates
+    surr = data.reshape((1, len(data))) + 2 * dither * np.random.random_sample(
+        (n, len(data))) - dither
+
+    # Round the surrogate data to decimal position, if requested
+    if decimals is not None:
+        surr = surr.round(decimals)
+
+    if edges is False:
+        # Move all spikes outside [spiketrain.t_start, spiketrain.t_stop] to
+        # the range's ends
+        surr = np.minimum(np.maximum(surr.base,
+            (spiketrain.t_start / spiketrain.units).base),
+            (spiketrain.t_stop / spiketrain.units).base) * spiketrain.units
+    else:
+        # Leave out all spikes outside [spiketrain.t_start, spiketrain.t_stop]
+        tstart, tstop = (spiketrain.t_start / spiketrain.units).base, \
+                        (spiketrain.t_stop / spiketrain.units).base
+        surr = [np.sort(s[np.all([s >= tstart, s < tstop], axis=0)]) * spiketrain.units
+                for s in surr.base]
+
+    # Return the surrogates as SpikeTrains
+    return [neo.SpikeTrain(s,
+                           t_start=spiketrain.t_start,
+                           t_stop=spiketrain.t_stop).rescale(spiketrain.units)
+            for s in surr]
+
+
+def randomise_spikes(spiketrain, n=1, decimals=None):
+    """
+    Generates surrogates of a spike trains by spike time randomisation.
+
+    The surrogates are obtained by keeping the spike count of the original
+    `SpikeTrain` object, but placing them randomly into the interval
+    `[spiketrain.t_start, spiketrain.t_stop]`.
+    This generates independent Poisson neo.SpikeTrain objects (exponentially
+    distributed inter-spike intervals) while keeping the spike count as in
+    :attr:`spiketrain`.
+
+    Parameters
+    ----------
+    spiketrain :  neo.SpikeTrain
+        The spike train from which to generate the surrogates
+    n : int (optional)
+        Number of surrogates to be generated.
+        Default: 1
+    decimals : int or None (optional)
+        Number of decimal points for every spike time in the surrogates
+        If None, machine precision is used.
+        Default: None
+
+    Returns
+    -------
+    list of neo.SpikeTrain object(s)
+      A list of `neo.SpikeTrain` objects, each obtained from :attr:`spiketrain`
+      by randomly dithering its spikes. The range of the surrogate spike trains
+      is the same as :attr:`spiketrain`.
+
+    Examples
+    --------
+    >>> import quantities as pq
+    >>> import neo
+    >>>
+    >>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
+    >>> print randomise_spikes(st)   # doctest: +SKIP
+        [<SpikeTrain(array([ 131.23574603,  262.05062963,  549.84371387,
+                            940.80503832]) * ms, [0.0 ms, 1000.0 ms])>]
+    >>> print randomise_spikes(st, n=2)   # doctest: +SKIP
+        [<SpikeTrain(array([  84.53274955,  431.54011743,  733.09605806,
+              852.32426583]) * ms, [0.0 ms, 1000.0 ms])>,
+         <SpikeTrain(array([ 197.74596726,  528.93517359,  567.44599968,
+              775.97843799]) * ms, [0.0 ms, 1000.0 ms])>]
+    >>> print randomise_spikes(st, decimals=0)   # doctest: +SKIP
+        [<SpikeTrain(array([  29.,  667.,  720.,  774.]) * ms,
+              [0.0 ms, 1000.0 ms])>]
+    """
+
+    # Create surrogate spike trains as rows of a Quantity array
+    sts = ((spiketrain.t_stop - spiketrain.t_start) *
+           np.random.random(size=(n, len(spiketrain))) +
+           spiketrain.t_start).rescale(spiketrain.units)
+
+    # Round the surrogate data to decimal position, if requested
+    if decimals is not None:
+        sts = sts.round(decimals)
+
+    # Convert the Quantity array to a list of SpikeTrains, and return them
+    return [neo.SpikeTrain(np.sort(st), t_start=spiketrain.t_start, t_stop=spiketrain.t_stop)
+            for st in sts]
+
+
+def shuffle_isis(spiketrain, n=1, decimals=None):
+    """
+    Generates surrogates of a neo.SpikeTrain object by inter-spike-interval
+    (ISI) shuffling.
+
+    The surrogates are obtained by randomly sorting the ISIs of the given input
+    :attr:`spiketrain`. This generates independent `SpikeTrain` object(s) with
+    same ISI distribution and spike count as in :attr:`spiketrain`, while
+    destroying temporal dependencies and firing rate profile.
+
+    Parameters
+    ----------
+    spiketrain :  neo.SpikeTrain
+        The spike train from which to generate the surrogates
+    n : int (optional)
+        Number of surrogates to be generated.
+        Default: 1
+    decimals : int or None (optional)
+        Number of decimal points for every spike time in the surrogates
+        If None, machine precision is used.
+        Default: None
+
+    Returns
+    -------
+    list of SpikeTrain
+      A list of spike trains, each obtained from `spiketrain` by random ISI
+      shuffling. The range of the surrogate `neo.SpikeTrain` objects is the
+      same as :attr:`spiketrain`.
+
+    Examples
+    --------
+    >>> import quantities as pq
+    >>> import neo
+    >>>
+    >>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
+    >>> print shuffle_isis(st)   # doctest: +SKIP
+        [<SpikeTrain(array([ 200.,  350.,  700.,  800.]) * ms,
+                 [0.0 ms, 1000.0 ms])>]
+    >>> print shuffle_isis(st, n=2)   # doctest: +SKIP
+        [<SpikeTrain(array([ 100.,  300.,  450.,  800.]) * ms,
+              [0.0 ms, 1000.0 ms])>,
+         <SpikeTrain(array([ 200.,  350.,  700.,  800.]) * ms,
+              [0.0 ms, 1000.0 ms])>]
+
+    """
+
+    if len(spiketrain) > 0:
+        isi0 = spiketrain[0] - spiketrain.t_start
+        ISIs = np.hstack([isi0, isi(spiketrain)])
+
+        # Round the ISIs to decimal position, if requested
+        if decimals is not None:
+            ISIs = ISIs.round(decimals)
+
+        # Create list of surrogate spike trains by random ISI permutation
+        sts = []
+        for i in range(n):
+            surr_times = np.cumsum(np.random.permutation(ISIs)) *\
+                spiketrain.units + spiketrain.t_start
+            sts.append(neo.SpikeTrain(
+                surr_times, t_start=spiketrain.t_start,
+                t_stop=spiketrain.t_stop))
+
+    else:
+        sts = []
+        empty_train = neo.SpikeTrain([] * spiketrain.units,
+                                     t_start=spiketrain.t_start,
+                                     t_stop=spiketrain.t_stop)
+        for i in range(n):
+            sts.append(empty_train)
+
+    return sts
+
+
+def dither_spike_train(spiketrain, shift, n=1, decimals=None, edges=True):
+    """
+    Generates surrogates of a neo.SpikeTrain by spike train shifting.
+
+    The surrogates are obtained by shifting the whole spike train by a
+    random amount (independent for each surrogate). Thus, ISIs and temporal
+    correlations within the spike train are kept. For small shifts, the
+    firing rate profile is also kept with reasonable accuracy.
+
+    The surrogates retain the :attr:`t_start` and :attr:`t_stop` of the
+    :attr:`spiketrain`. Spikes moved beyond this range are lost or moved to
+    the range's ends, depending on the parameter edge.
+
+    Parameters
+    ----------
+    spiketrain :  neo.SpikeTrain
+        The spike train from which to generate the surrogates
+    shift : quantities.Quantity
+        Amount of shift. spiketrain is shifted by a random amount uniformly
+        drawn from the range ]-shift, +shift[.
+    n : int (optional)
+        Number of surrogates to be generated.
+        Default: 1
+    decimals : int or None (optional)
+        Number of decimal points for every spike time in the surrogates
+        If None, machine precision is used.
+        Default: None
+    edges : bool
+        For surrogate spikes falling outside the range `[spiketrain.t_start,
+        spiketrain.t_stop)`, whether to drop them out (for edges = True) or set
+        that to the range's closest end (for edges = False).
+        Default: True
+
+    Returns
+    -------
+    list of SpikeTrain
+      A list of spike trains, each obtained from spiketrain by randomly
+      dithering its spikes. The range of the surrogate spike trains is the
+      same as :attr:`spiketrain`.
+
+    Examples
+    --------
+    >>> import quantities as pq
+    >>> import neo
+    >>>
+    >>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
+    >>>
+    >>> print dither_spike_train(st, shift = 20*pq.ms)   # doctest: +SKIP
+    [<SpikeTrain(array([  96.53801903,  248.57047376,  601.48865767,
+     815.67209811]) * ms, [0.0 ms, 1000.0 ms])>]
+    >>> print dither_spike_train(st, shift = 20*pq.ms, n=2)   # doctest: +SKIP
+    [<SpikeTrain(array([  92.89084054,  242.89084054,  592.89084054,
+        792.89084054]) * ms, [0.0 ms, 1000.0 ms])>,
+     <SpikeTrain(array([  84.61079043,  234.61079043,  584.61079043,
+        784.61079043]) * ms, [0.0 ms, 1000.0 ms])>]
+    >>> print dither_spike_train(st, shift = 20*pq.ms, decimals=0)   # doctest: +SKIP
+    [<SpikeTrain(array([  82.,  232.,  582.,  782.]) * ms,
+        [0.0 ms, 1000.0 ms])>]
+    """
+
+    # Transform spiketrain into a Quantity object (needed for matrix algebra)
+    data = spiketrain.view(pq.Quantity)
+
+    # Main: generate the surrogates by spike train shifting
+    surr = data.reshape((1, len(data))) + 2 * shift * \
+        np.random.random_sample((n, 1)) - shift
+
+    # Round the surrogate data to decimal position, if requested
+    if decimals is not None:
+        surr = surr.round(decimals)
+
+    if edges is False:
+        # Move all spikes outside [spiketrain.t_start, spiketrain.t_stop] to
+        # the range's ends
+        surr = np.minimum(np.maximum(surr.base,
+            (spiketrain.t_start / spiketrain.units).base),
+            (spiketrain.t_stop / spiketrain.units).base) * spiketrain.units
+    else:
+        # Leave out all spikes outside [spiketrain.t_start, spiketrain.t_stop]
+        tstart, tstop = (spiketrain.t_start / spiketrain.units).base,\
+                        (spiketrain.t_stop / spiketrain.units).base
+        surr = [s[np.all([s >= tstart, s < tstop], axis=0)] * spiketrain.units
+                for s in surr.base]
+
+    # Return the surrogates as SpikeTrains
+    return [neo.SpikeTrain(s, t_start=spiketrain.t_start,
+                           t_stop=spiketrain.t_stop).rescale(spiketrain.units)
+            for s in surr]
+
+
+def jitter_spikes(spiketrain, binsize, n=1):
+    """
+    Generates surrogates of a :attr:`spiketrain` by spike jittering.
+
+    The surrogates are obtained by defining adjacent time bins spanning the
+    :attr:`spiketrain` range, and random re-positioning (independently for each
+    surrogate) each spike in the time bin it falls into.
+
+    The surrogates retain the :attr:`t_start and :attr:`t_stop` of the
+    :attr:`spike train`. Note that within each time bin the surrogate
+    `neo.SpikeTrain` objects are locally poissonian (the inter-spike-interval
+    are exponentially distributed).
+
+    Parameters
+    ----------
+    spiketrain :  neo.SpikeTrain
+        The spike train from which to generate the surrogates
+    binsize : quantities.Quantity
+        Size of the time bins within which to randomise the spike times.
+        Note: the last bin arrives until `spiketrain.t_stop` and might have
+        width different from `binsize`.
+    n : int (optional)
+        Number of surrogates to be generated.
+        Default: 1
+
+    Returns
+    -------
+    list of SpikeTrain
+      A list of spike trains, each obtained from `spiketrain` by randomly
+      replacing its spikes within bins of user-defined width. The range of the
+      surrogate spike trains is the same as `spiketrain`.
+
+    Examples
+    --------
+    >>> import quantities as pq
+    >>> import neo
+    >>>
+    >>> st = neo.SpikeTrain([80, 150, 320, 480]*pq.ms, t_stop=1*pq.s)
+    >>> print jitter_spikes(st, binsize=100*pq.ms)   # doctest: +SKIP
+    [<SpikeTrain(array([  98.82898293,  178.45805954,  346.93993867,
+        461.34268507]) * ms, [0.0 ms, 1000.0 ms])>]
+    >>> print jitter_spikes(st, binsize=100*pq.ms, n=2)   # doctest: +SKIP
+    [<SpikeTrain(array([  97.15720041,  199.06945744,  397.51928207,
+        402.40065162]) * ms, [0.0 ms, 1000.0 ms])>,
+     <SpikeTrain(array([  80.74513157,  173.69371317,  338.05860962,
+        495.48869981]) * ms, [0.0 ms, 1000.0 ms])>]
+    >>> print jitter_spikes(st, binsize=100*pq.ms)   # doctest: +SKIP
+    [<SpikeTrain(array([  4.55064897e-01,   1.31927046e+02,   3.57846265e+02,
+         4.69370604e+02]) * ms, [0.0 ms, 1000.0 ms])>]
+    """
+    # Define standard time unit; all time Quantities are converted to
+    # scalars after being rescaled to this unit, to use the power of numpy
+    std_unit = binsize.units
+
+    # Compute bin edges for the jittering procedure
+    # !: the last bin arrives until spiketrain.t_stop and might have
+    # size != binsize
+    start_dl = spiketrain.t_start.rescale(std_unit).magnitude
+    stop_dl = spiketrain.t_stop.rescale(std_unit).magnitude
+
+    bin_edges = start_dl + np.arange(start_dl, stop_dl, binsize.magnitude)
+    bin_edges = np.hstack([bin_edges, stop_dl])
+
+    # Create n surrogates with spikes randomly placed in the interval (0,1)
+    surr_poiss01 = np.random.random_sample((n, len(spiketrain)))
+
+    # Compute the bin id of each spike
+    bin_ids = np.array(
+        (spiketrain.view(pq.Quantity) /
+         binsize).rescale(pq.dimensionless).magnitude, dtype=int)
+
+    # Compute the size of each time bin (as a numpy array)
+    bin_sizes_dl = np.diff(bin_edges)
+
+    # For each spike compute its offset (the left end of the bin it falls
+    # into) and the size of the bin it falls into
+    offsets = start_dl + np.array([bin_edges[bin_id] for bin_id in bin_ids])
+    dilats = np.array([bin_sizes_dl[bin_id] for bin_id in bin_ids])
+
+    # Compute each surrogate by dilatating and shifting each spike s in the
+    # poisson 0-1 spike trains to dilat * s + offset. Attach time unit again
+    surr = np.sort(surr_poiss01 * dilats + offsets, axis=1) * std_unit
+
+    return [neo.SpikeTrain(s, t_start=spiketrain.t_start,
+                           t_stop=spiketrain.t_stop).rescale(spiketrain.units)
+            for s in surr]
+
+
+def surrogates(
+        spiketrain, n=1, surr_method='dither_spike_train', dt=None, decimals=None,
+        edges=True):
+    """
+    Generates surrogates of a :attr:`spiketrain` by a desired generation
+    method.
+
+    This routine is a wrapper for the other surrogate generators in the
+    module.
+
+    The surrogates retain the :attr:`t_start` and :attr:`t_stop` of the
+    original :attr:`spiketrain`.
+
+
+    Parameters
+    ----------
+    spiketrain :  neo.SpikeTrain
+        The spike train from which to generate the surrogates
+    n : int, optional
+        Number of surrogates to be generated.
+        Default: 1
+    surr_method : str, optional
+        The method to use to generate surrogate spike trains. Can be one of:
+        * 'dither_spike_train': see surrogates.dither_spike_train() [dt needed]
+        * 'dither_spikes': see surrogates.dither_spikes() [dt needed]
+        * 'jitter_spikes': see surrogates.jitter_spikes() [dt needed]
+        * 'randomise_spikes': see surrogates.randomise_spikes()
+        * 'shuffle_isis': see surrogates.shuffle_isis()
+        Default: 'dither_spike_train'
+    dt : quantities.Quantity, optional
+        For methods shifting spike times randomly around their original time
+        (spike dithering, train shifting) or replacing them randomly within a
+        certain window (spike jittering), dt represents the size of that
+        shift / window. For other methods, dt is ignored.
+        Default: None
+    decimals : int or None, optional
+        Number of decimal points for every spike time in the surrogates
+        If None, machine precision is used.
+        Default: None
+    edges : bool
+        For surrogate spikes falling outside the range `[spiketrain.t_start,
+        spiketrain.t_stop)`, whether to drop them out (for edges = True) or set
+        that to the range's closest end (for edges = False).
+        Default: True
+
+    Returns
+    -------
+    list of neo.SpikeTrain objects
+      A list of spike trains, each obtained from `spiketrain` by randomly
+      dithering its spikes. The range of the surrogate `neo.SpikeTrain`
+      object(s) is the same as `spiketrain`.
+    """
+
+    # Define the surrogate function to use, depending on the specified method
+    surrogate_types = {
+        'dither_spike_train': dither_spike_train,
+        'dither_spikes': dither_spikes,
+        'jitter_spikes': jitter_spikes,
+        'randomise_spikes': randomise_spikes,
+        'shuffle_isis': shuffle_isis}
+
+    if surr_method not in surrogate_types.keys():
+        raise ValueError('specified surr_method (=%s) not valid' % surr_method)
+
+    if surr_method in ['dither_spike_train', 'dither_spikes', 'jitter_spikes']:
+        return surrogate_types[surr_method](
+            spiketrain, dt, n=n, decimals=decimals, edges=edges)
+    elif surr_method in ['randomise_spikes', 'shuffle_isis']:
+        return surrogate_types[surr_method](
+            spiketrain, n=n, decimals=decimals)

+ 317 - 0
code/elephant/elephant/sta.py

@@ -0,0 +1,317 @@
+# -*- coding: utf-8 -*-
+'''
+Functions to calculate spike-triggered average and spike-field coherence of
+analog signals.
+
+:copyright: Copyright 2015-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+'''
+
+from __future__ import division
+import numpy as np
+import scipy.signal
+import quantities as pq
+from neo.core import AnalogSignal, SpikeTrain
+import warnings
+from .conversion import BinnedSpikeTrain
+
+
+def spike_triggered_average(signal, spiketrains, window):
+    """
+    Calculates the spike-triggered averages of analog signals in a time window
+    relative to the spike times of a corresponding spiketrain for multiple
+    signals each. The function receives n analog signals and either one or
+    n spiketrains. In case it is one spiketrain this one is muliplied n-fold
+    and used for each of the n analog signals.
+
+    Parameters
+    ----------
+    signal : neo AnalogSignal object
+        'signal' contains n analog signals.
+    spiketrains : one SpikeTrain or one numpy ndarray or a list of n of either of these.
+        'spiketrains' contains the times of the spikes in the spiketrains.
+    window : tuple of 2 Quantity objects with dimensions of time.
+        'window' is the start time and the stop time, relative to a spike, of
+        the time interval for signal averaging.
+        If the window size is not a multiple of the sampling interval of the 
+        signal the window will be extended to the next multiple. 
+
+    Returns
+    -------
+    result_sta : neo AnalogSignal object
+        'result_sta' contains the spike-triggered averages of each of the
+        analog signals with respect to the spikes in the corresponding
+        spiketrains. The length of 'result_sta' is calculated as the number
+        of bins from the given start and stop time of the averaging interval
+        and the sampling rate of the analog signal. If for an analog signal
+        no spike was either given or all given spikes had to be ignored
+        because of a too large averaging interval, the corresponding returned
+        analog signal has all entries as nan. The number of used spikes and
+        unused spikes for each analog signal are returned as annotations to 
+        the returned AnalogSignal object.
+
+    Examples
+    --------
+
+    >>> signal = neo.AnalogSignal(np.array([signal1, signal2]).T, units='mV',
+    ...                                sampling_rate=10/ms)
+    >>> stavg = spike_triggered_average(signal, [spiketrain1, spiketrain2],
+    ...                                 (-5 * ms, 10 * ms))
+
+    """
+
+    # checking compatibility of data and data types
+    # window_starttime: time to specify the start time of the averaging
+    # interval relative to a spike
+    # window_stoptime: time to specify the stop time of the averaging
+    # interval relative to a spike
+    window_starttime, window_stoptime = window
+    if not (isinstance(window_starttime, pq.quantity.Quantity) and
+            window_starttime.dimensionality.simplified ==
+            pq.Quantity(1, "s").dimensionality):
+        raise TypeError("The start time of the window (window[0]) "
+                        "must be a time quantity.")
+    if not (isinstance(window_stoptime, pq.quantity.Quantity) and
+            window_stoptime.dimensionality.simplified ==
+            pq.Quantity(1, "s").dimensionality):
+        raise TypeError("The stop time of the window (window[1]) "
+                        "must be a time quantity.")
+    if window_stoptime <= window_starttime:
+        raise ValueError("The start time of the window (window[0]) must be "
+                         "earlier than the stop time of the window (window[1]).")
+
+    # checks on signal
+    if not isinstance(signal, AnalogSignal):
+        raise TypeError(
+            "Signal must be an AnalogSignal, not %s." % type(signal))
+    if len(signal.shape) > 1:
+        # num_signals: number of analog signals
+        num_signals = signal.shape[1]
+    else:
+        raise ValueError("Empty analog signal, hence no averaging possible.")
+    if window_stoptime - window_starttime > signal.t_stop - signal.t_start:
+        raise ValueError("The chosen time window is larger than the "
+                         "time duration of the signal.")
+
+    # spiketrains type check
+    if isinstance(spiketrains, (np.ndarray, SpikeTrain)):
+        spiketrains = [spiketrains]
+    elif isinstance(spiketrains, list):
+        for st in spiketrains:
+            if not isinstance(st, (np.ndarray, SpikeTrain)):
+                raise TypeError(
+                    "spiketrains must be a SpikeTrain, a numpy ndarray, or a "
+                    "list of one of those, not %s." % type(spiketrains))
+    else:
+        raise TypeError(
+            "spiketrains must be a SpikeTrain, a numpy ndarray, or a list of "
+            "one of those, not %s." % type(spiketrains))
+
+    # multiplying spiketrain in case only a single spiketrain is given
+    if len(spiketrains) == 1 and num_signals != 1:
+        template = spiketrains[0]
+        spiketrains = []
+        for i in range(num_signals):
+            spiketrains.append(template)
+
+    # checking for matching numbers of signals and spiketrains
+    if num_signals != len(spiketrains):
+        raise ValueError(
+            "The number of signals and spiketrains has to be the same.")
+
+    # checking the times of signal and spiketrains
+    for i in range(num_signals):
+        if spiketrains[i].t_start < signal.t_start:
+            raise ValueError(
+                "The spiketrain indexed by %i starts earlier than "
+                "the analog signal." % i)
+        if spiketrains[i].t_stop > signal.t_stop:
+            raise ValueError(
+                "The spiketrain indexed by %i stops later than "
+                "the analog signal." % i)
+
+    # *** Main algorithm: ***
+
+    # window_bins: number of bins of the chosen averaging interval
+    window_bins = int(np.ceil(((window_stoptime - window_starttime) *
+        signal.sampling_rate).simplified))
+    # result_sta: array containing finally the spike-triggered averaged signal
+    result_sta = AnalogSignal(np.zeros((window_bins, num_signals)),
+        sampling_rate=signal.sampling_rate, units=signal.units)
+    # setting of correct times of the spike-triggered average
+    # relative to the spike
+    result_sta.t_start = window_starttime
+    used_spikes = np.zeros(num_signals, dtype=int)
+    unused_spikes = np.zeros(num_signals, dtype=int)
+    total_used_spikes = 0
+
+    for i in range(num_signals):
+        # summing over all respective signal intervals around spiketimes
+        for spiketime in spiketrains[i]:
+            # checks for sufficient signal data around spiketime
+            if (spiketime + window_starttime >= signal.t_start and
+                    spiketime + window_stoptime <= signal.t_stop):
+                # calculating the startbin in the analog signal of the
+                # averaging window for spike
+                startbin = int(np.floor(((spiketime + window_starttime -
+                    signal.t_start) * signal.sampling_rate).simplified))
+                # adds the signal in selected interval relative to the spike
+                result_sta[:, i] += signal[
+                    startbin: startbin + window_bins, i]
+                # counting of the used spikes
+                used_spikes[i] += 1
+            else:
+                # counting of the unused spikes
+                unused_spikes[i] += 1
+
+        # normalization
+        result_sta[:, i] = result_sta[:, i] / used_spikes[i]
+
+        total_used_spikes += used_spikes[i]
+
+    if total_used_spikes == 0:
+        warnings.warn(
+            "No spike at all was either found or used for averaging")
+    result_sta.annotate(used_spikes=used_spikes, unused_spikes=unused_spikes)
+
+    return result_sta
+
+
+def spike_field_coherence(signal, spiketrain, **kwargs):
+    """
+    Calculates the spike-field coherence between a analog signal(s) and a
+    (binned) spike train.
+
+    The current implementation makes use of scipy.signal.coherence(). Additional
+    kwargs will will be directly forwarded to scipy.signal.coherence(),
+    except for the axis parameter and the sampling frequency, which will be
+    extracted from the input signals.
+
+    The spike_field_coherence function receives an analog signal array and
+    either a binned spike train or a spike train containing the original spike
+    times. In case of original spike times the spike train is binned according
+    to the sampling rate of the analog signal array.
+
+    The AnalogSignal object can contain one or multiple signal traces. In case
+    of multiple signal traces, the spike field coherence is calculated
+    individually for each signal trace and the spike train.
+
+    Parameters
+    ----------
+    signal : neo AnalogSignal object
+        'signal' contains n analog signals.
+    spiketrain : SpikeTrain or BinnedSpikeTrain
+        Single spike train to perform the analysis on. The binsize of the
+        binned spike train must match the sampling_rate of signal.
+
+    KWArgs
+    ------
+    All KWArgs are passed to scipy.signal.coherence().
+
+    Returns
+    -------
+    coherence : complex Quantity array
+        contains the coherence values calculated for each analog signal trace
+        in combination with the spike train. The first dimension corresponds to
+        the frequency, the second to the number of the signal trace.
+    frequencies : Quantity array
+        contains the frequency values corresponding to the first dimension of
+        the 'coherence' array
+
+    Example
+    -------
+
+    Plot the SFC between a regular spike train at 20 Hz, and two sinusoidal
+    time series at 20 Hz and 23 Hz, respectively.
+
+    >>> import numpy as np
+    >>> import matplotlib.pyplot as plt
+    >>> from quantities import ms, mV, Hz, kHz
+    >>> import neo, elephant
+
+    >>> t = pq.Quantity(range(10000),units='ms')
+    >>> f1, f2 = 20. * Hz, 23. * Hz
+    >>> signal = neo.AnalogSignal(np.array([
+            np.sin(f1 * 2. * np.pi * t.rescale(s)),
+            np.sin(f2 * 2. * np.pi * t.rescale(s))]).T,
+            units=pq.mV, sampling_rate=1. * kHz)
+    >>> spiketrain = neo.SpikeTrain(
+        range(t[0], t[-1], 50), units='ms',
+        t_start=t[0], t_stop=t[-1])
+    >>> sfc, freqs = elephant.sta.spike_field_coherence(
+        signal, spiketrain, window='boxcar')
+
+    >>> plt.plot(freqs, sfc[:,0])
+    >>> plt.plot(freqs, sfc[:,1])
+    >>> plt.xlabel('Frequency [Hz]')
+    >>> plt.ylabel('SFC')
+    >>> plt.xlim((0, 60))
+    >>> plt.show()
+    """
+
+    if not hasattr(scipy.signal, 'coherence'):
+        raise AttributeError('scipy.signal.coherence is not available. The sfc '
+                             'function uses scipy.signal.coherence for '
+                             'the coherence calculation. This function is '
+                             'available for scipy version 0.16 or newer. '
+                             'Please update you scipy version.')
+
+    # spiketrains type check
+    if not isinstance(spiketrain, (SpikeTrain, BinnedSpikeTrain)):
+        raise TypeError(
+            "spiketrain must be of type SpikeTrain or BinnedSpikeTrain, "
+            "not %s." % type(spiketrain))
+
+    # checks on analogsignal
+    if not isinstance(signal, AnalogSignal):
+        raise TypeError(
+            "Signal must be an AnalogSignal, not %s." % type(signal))
+    if len(signal.shape) > 1:
+        # num_signals: number of individual traces in the analog signal
+        num_signals = signal.shape[1]
+    elif len(signal.shape) == 1:
+        num_signals = 1
+    else:
+        raise ValueError("Empty analog signal.")
+    len_signals = signal.shape[0]
+
+    # bin spiketrain if necessary
+    if isinstance(spiketrain, SpikeTrain):
+        spiketrain = BinnedSpikeTrain(
+            spiketrain, binsize=signal.sampling_period)
+
+    # check the start and stop times of signal and spike trains
+    if spiketrain.t_start < signal.t_start:
+        raise ValueError(
+            "The spiketrain starts earlier than the analog signal.")
+    if spiketrain.t_stop > signal.t_stop:
+        raise ValueError(
+            "The spiketrain stops later than the analog signal.")
+
+    # check equal time resolution for both signals
+    if spiketrain.binsize != signal.sampling_period:
+        raise ValueError(
+            "The spiketrain and signal must have a "
+            "common sampling frequency / binsize")
+
+    # calculate how many bins to add on the left of the binned spike train
+    delta_t = spiketrain.t_start - signal.t_start
+    if delta_t % spiketrain.binsize == 0:
+        left_edge = int((delta_t / spiketrain.binsize).magnitude)
+    else:
+        raise ValueError("Incompatible binning of spike train and LFP")
+    right_edge = int(left_edge + spiketrain.num_bins)
+
+    # duplicate spike trains
+    spiketrain_array = np.zeros((1, len_signals))
+    spiketrain_array[0, left_edge:right_edge] = spiketrain.to_array()
+    spiketrains_array = np.repeat(spiketrain_array, repeats=num_signals, axis=0).transpose()
+
+    # calculate coherence
+    frequencies, sfc = scipy.signal.coherence(
+        spiketrains_array, signal.magnitude,
+        fs=signal.sampling_rate.rescale('Hz').magnitude,
+        axis=0, **kwargs)
+
+    return (pq.Quantity(sfc, units=pq.dimensionless),
+            pq.Quantity(frequencies, units=pq.Hz))

File diff suppressed because it is too large
+ 1157 - 0
code/elephant/elephant/statistics.py


+ 0 - 0
code/elephant/elephant/test/__init__.py


+ 64 - 0
code/elephant/elephant/test/make_spike_extraction_test_data.py

@@ -0,0 +1,64 @@
+def main():
+  from brian2 import start_scope,mvolt,ms,NeuronGroup,StateMonitor,run
+  import matplotlib.pyplot as plt
+  import neo
+  import quantities as pq
+
+  start_scope()
+  
+  # Izhikevich neuron parameters.  
+  a = 0.02/ms
+  b = 0.2/ms
+  c = -65*mvolt
+  d = 6*mvolt/ms
+  I = 4*mvolt/ms
+  
+  # Standard Izhikevich neuron equations.  
+  eqs = '''
+  dv/dt = 0.04*v**2/(ms*mvolt) + (5/ms)*v + 140*mvolt/ms - u + I : volt
+  du/dt = a*((b*v) - u) : volt/second
+  '''
+  
+  reset = '''
+  v = c
+  u += d
+  '''
+  
+  # Setup and run simulation.  
+  G = NeuronGroup(1, eqs, threshold='v>30*mvolt', reset='v = -70*mvolt')
+  G.v = -65*mvolt
+  G.u = b*G.v
+  M = StateMonitor(G, 'v', record=True)
+  run(300*ms)
+  
+  # Store results in neo format.  
+  vm = neo.core.AnalogSignal(M.v[0], units=pq.V, sampling_period=0.1*pq.ms)
+  
+  # Plot results.  
+  plt.figure()
+  plt.plot(vm.times*1000,vm*1000) # Plot mV and ms instead of V and s.  
+  plt.xlabel('Time (ms)')
+  plt.ylabel('mv')
+  
+  # Save results.  
+  iom = neo.io.PyNNNumpyIO('spike_extraction_test_data')
+  block = neo.core.Block()
+  segment = neo.core.Segment()
+  segment.analogsignals.append(vm)
+  block.segments.append(segment)
+  iom.write(block)
+  
+  # Load results.  
+  iom2 = neo.io.PyNNNumpyIO('spike_extraction_test_data.npz')
+  data = iom2.read()
+  vm = data[0].segments[0].analogsignals[0]
+  
+  # Plot results. 
+  # The two figures should match.   
+  plt.figure()
+  plt.plot(vm.times*1000,vm*1000) # Plot mV and ms instead of V and s.  
+  plt.xlabel('Time (ms)')
+  plt.ylabel('mv')
+  
+if __name__ == '__main__':
+  main()

BIN
code/elephant/elephant/test/spike_extraction_test_data.npz


+ 228 - 0
code/elephant/elephant/test/test_asset.py

@@ -0,0 +1,228 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the ASSET analysis.
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+import numpy as np
+import scipy.spatial
+import quantities as pq
+import neo
+
+try:
+    import sklearn
+except ImportError:
+    HAVE_SKLEARN = False
+else:
+    import elephant.asset as asset
+    HAVE_SKLEARN = True
+    stretchedmetric2d = asset._stretched_metric_2d
+    cluster = asset.cluster_matrix_entries
+
+
+@unittest.skipUnless(HAVE_SKLEARN, 'requires sklearn')
+class AssetTestCase(unittest.TestCase):
+
+    def test_stretched_metric_2d_size(self):
+        nr_points = 4
+        x = np.arange(nr_points)
+        D = stretchedmetric2d(x, x, stretch=1, ref_angle=45)
+        self.assertEqual(D.shape, (nr_points, nr_points))
+
+    def test_stretched_metric_2d_correct_stretching(self):
+        x = (0, 1, 0)
+        y = (0, 0, 1)
+        stretch = 10
+        ref_angle = 0
+        D = stretchedmetric2d(x, y, stretch=stretch, ref_angle=ref_angle)
+        self.assertEqual(D[0, 1], 1)
+        self.assertEqual(D[0, 2], stretch)
+
+    def test_stretched_metric_2d_symmetric(self):
+        x = (1, 2, 2)
+        y = (1, 2, 0)
+        stretch = 10
+        D = stretchedmetric2d(x, y, stretch=stretch, ref_angle=45)
+        np.testing.assert_array_almost_equal(D, D.T, decimal=12)
+
+    def test_stretched_metric_2d_equals_euclidean_if_stretch_1(self):
+        x = np.arange(10)
+        y = y = x ** 2 - 2 * x - 4
+        # compute stretched distance matrix
+        stretch = 1
+        D = stretchedmetric2d(x, y, stretch=stretch, ref_angle=45)
+        # Compute Euclidean distance matrix
+        points = np.vstack([x, y]).T
+        E = scipy.spatial.distance_matrix(points, points)
+        # assert D == E
+        np.testing.assert_array_almost_equal(D, E, decimal=12)
+
+    def test_cluster_correct(self):
+        mat = np.zeros((6, 6))
+        mat[[2, 4, 5], [0, 0, 1]] = 1
+        mat_clustered = cluster(mat, eps=4, min=2, stretch=6)
+
+        mat_correct = np.zeros((6, 6))
+        mat_correct[[4, 5], [0, 1]] = 1
+        mat_correct[2, 0] = -1
+        np.testing.assert_array_equal(mat_clustered, mat_correct)
+
+    def test_cluster_symmetric(self):
+        x = [0, 1, 2, 5, 6, 7]
+        y = [3, 4, 5, 1, 2, 3]
+        mat = np.zeros((10, 10))
+        mat[x, y] = 1
+        mat = mat + mat.T
+        # compute stretched distance matrix
+        mat_clustered = cluster(mat, eps=4, min=2, stretch=6)
+        mat_equals_m1 = (mat_clustered == -1)
+        mat_equals_0 = (mat_clustered == 0)
+        mat_larger_0 = (mat_clustered > 0)
+        np.testing.assert_array_equal(mat_equals_m1, mat_equals_m1.T)
+        np.testing.assert_array_equal(mat_equals_0, mat_equals_0.T)
+        np.testing.assert_array_equal(mat_larger_0, mat_larger_0.T)
+
+    def test_sse_difference(self):
+        a = {(1, 2): set([1, 2, 3]), (3, 4): set([5, 6]), (6, 7): set([0, 1])}
+        b = {(1, 2): set([1, 2, 5]), (5, 6): set([0, 2]), (6, 7): set([0, 1])}
+        diff_ab_pixelwise = {(3, 4): set([5, 6])}
+        diff_ba_pixelwise = {(5, 6): set([0, 2])}
+        diff_ab_linkwise = {(1, 2): set([3]), (3, 4): set([5, 6])}
+        diff_ba_linkwise = {(1, 2): set([5]), (5, 6): set([0, 2])}
+        self.assertEqual(
+            asset.sse_difference(a, b, 'pixelwise'), diff_ab_pixelwise)
+        self.assertEqual(
+            asset.sse_difference(b, a, 'pixelwise'), diff_ba_pixelwise)
+        self.assertEqual(
+            asset.sse_difference(a, b, 'linkwise'), diff_ab_linkwise)
+        self.assertEqual(
+            asset.sse_difference(b, a, 'linkwise'), diff_ba_linkwise)
+
+    def test_sse_intersection(self):
+        a = {(1, 2): set([1, 2, 3]), (3, 4): set([5, 6]), (6, 7): set([0, 1])}
+        b = {(1, 2): set([1, 2, 5]), (5, 6): set([0, 2]), (6, 7): set([0, 1])}
+        inters_ab_pixelwise = {(1, 2): set([1, 2, 3]), (6, 7): set([0, 1])}
+        inters_ba_pixelwise = {(1, 2): set([1, 2, 5]), (6, 7): set([0, 1])}
+        inters_ab_linkwise = {(1, 2): set([1, 2]), (6, 7): set([0, 1])}
+        inters_ba_linkwise = {(1, 2): set([1, 2]), (6, 7): set([0, 1])}
+        self.assertEqual(
+            asset.sse_intersection(a, b, 'pixelwise'), inters_ab_pixelwise)
+        self.assertEqual(
+            asset.sse_intersection(b, a, 'pixelwise'), inters_ba_pixelwise)
+        self.assertEqual(
+            asset.sse_intersection(a, b, 'linkwise'), inters_ab_linkwise)
+        self.assertEqual(
+            asset.sse_intersection(b, a, 'linkwise'), inters_ba_linkwise)
+
+    def test_sse_relations(self):
+        a = {(1, 2): set([1, 2, 3]), (3, 4): set([5, 6]), (6, 7): set([0, 1])}
+        b = {(1, 2): set([1, 2, 5]), (5, 6): set([0, 2]), (6, 7): set([0, 1])}
+        c = {(5, 6): set([0, 2])}
+        d = {(3, 4): set([0, 1]), (5, 6): set([0, 1, 2])}
+        self.assertTrue(asset.sse_isequal({}, {}))
+        self.assertTrue(asset.sse_isequal(a, a))
+        self.assertFalse(asset.sse_isequal(b, c))
+        self.assertTrue(asset.sse_isdisjoint(a, c))
+        self.assertTrue(asset.sse_isdisjoint(a, d))
+        self.assertFalse(asset.sse_isdisjoint(a, b))
+        self.assertTrue(asset.sse_issub(c, b))
+        self.assertTrue(asset.sse_issub(c, d))
+        self.assertFalse(asset.sse_issub(a, b))
+        self.assertTrue(asset.sse_issuper(b, c))
+        self.assertTrue(asset.sse_issuper(d, c))
+        self.assertFalse(asset.sse_issuper(a, b))
+        self.assertTrue(asset.sse_overlap(a, b))
+        self.assertFalse(asset.sse_overlap(c, d))
+
+    def test_mask_matrix(self):
+        mat1 = np.array([[0, 1], [1, 2]])
+        mat2 = np.array([[2, 1], [1, 3]])
+        mask_1_2 = asset.mask_matrices([mat1, mat2], [1, 2])
+        mask_1_2_correct = np.array([[False, False], [False, True]])
+        self.assertTrue(np.all(mask_1_2 == mask_1_2_correct))
+        self.assertIsInstance(mask_1_2[0, 0], np.bool_)
+
+    def test_cluster_matrix_entries(self):
+        mat = np.array([[False, False, True, False],
+                        [False, True, False, False],
+                        [True, False, False, True],
+                        [False, False, True, False]])
+        clustered1 = asset.cluster_matrix_entries(
+            mat, eps=1.5, min=2, stretch=1)
+        clustered2 = asset.cluster_matrix_entries(
+            mat, eps=1.5, min=3, stretch=1)
+        clustered1_correctA = np.array([[0, 0, 1, 0],
+                                       [0, 1, 0, 0],
+                                       [1, 0, 0, 2],
+                                       [0, 0, 2, 0]])
+        clustered1_correctB = np.array([[0, 0, 2, 0],
+                                       [0, 2, 0, 0],
+                                       [2, 0, 0, 1],
+                                       [0, 0, 1, 0]])
+        clustered2_correct = np.array([[0, 0, 1, 0],
+                                       [0, 1, 0, 0],
+                                       [1, 0, 0, -1],
+                                       [0, 0, -1, 0]])
+        self.assertTrue(np.all(clustered1 == clustered1_correctA) or
+                        np.all(clustered1 == clustered1_correctB))
+        self.assertTrue(np.all(clustered2 == clustered2_correct))
+
+    def test_intersection_matrix(self):
+        st1 = neo.SpikeTrain([1, 2, 4]*pq.ms, t_stop=6*pq.ms)
+        st2 = neo.SpikeTrain([1, 3, 4]*pq.ms, t_stop=6*pq.ms)
+        st3 = neo.SpikeTrain([2, 5]*pq.ms, t_start=1*pq.ms, t_stop=6*pq.ms)
+        st4 = neo.SpikeTrain([1, 3, 6]*pq.ms, t_stop=8*pq.ms)
+        binsize = 1 * pq.ms
+
+        # Check that the routine works for correct input...
+        # ...same t_start, t_stop on both time axes
+        imat_1_2, xedges, yedges = asset.intersection_matrix(
+            [st1, st2], binsize, dt=5*pq.ms)
+        trueimat_1_2 = np.array([[0.,  0.,  0.,  0.,  0.],
+                                 [0.,  2.,  1.,  1.,  2.],
+                                 [0.,  1.,  1.,  0.,  1.],
+                                 [0.,  1.,  0.,  1.,  1.],
+                                 [0.,  2.,  1.,  1.,  2.]])
+        self.assertTrue(np.all(xedges == np.arange(6)*pq.ms))  # correct bins
+        self.assertTrue(np.all(yedges == np.arange(6)*pq.ms))  # correct bins
+        self.assertTrue(np.all(imat_1_2 == trueimat_1_2))  # correct matrix
+        # ...different t_start, t_stop on the two time axes
+        imat_1_2, xedges, yedges = asset.intersection_matrix(
+            [st1, st2], binsize, t_start_y=1*pq.ms, dt=5*pq.ms)
+        trueimat_1_2 = np.array([[0.,  0.,  0.,  0., 0.],
+                                 [2.,  1.,  1.,  2., 0.],
+                                 [1.,  1.,  0.,  1., 0.],
+                                 [1.,  0.,  1.,  1., 0.],
+                                 [2.,  1.,  1.,  2., 0.]])
+        self.assertTrue(np.all(xedges == np.arange(6)*pq.ms))  # correct bins
+        self.assertTrue(np.all(imat_1_2 == trueimat_1_2))  # correct matrix
+
+        # Check that errors are raised correctly...
+        # ...for dt too large compared to length of spike trains
+        self.assertRaises(ValueError, asset.intersection_matrix,
+                          spiketrains=[st1, st2], binsize=binsize, dt=8*pq.ms)
+        # ...for different SpikeTrain's t_starts
+        self.assertRaises(ValueError, asset.intersection_matrix,
+                          spiketrains=[st1, st3], binsize=binsize, dt=8*pq.ms)
+        # ...when the analysis is specified for a time span where the
+        # spike trains are not defined (e.g. t_start_x < SpikeTrain.t_start)
+        self.assertRaises(ValueError, asset.intersection_matrix,
+                          spiketrains=[st1, st2], binsize=binsize, dt=8*pq.ms,
+                          t_start_x=-2*pq.ms, t_start_y=-2*pq.ms)
+
+
+def suite():
+    suite = unittest.makeSuite(AssetTestCase, 'test')
+    return suite
+
+
+def run():
+    runner = unittest.TextTestRunner(verbosity=2)
+    runner.run(suite())
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 505 - 0
code/elephant/elephant/test/test_conversion.py

@@ -0,0 +1,505 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the conversion module.
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+
+import neo
+import numpy as np
+from numpy.testing.utils import assert_array_almost_equal
+import quantities as pq
+
+import elephant.conversion as cv
+
+
+def get_nearest(times, time):
+    return (np.abs(times-time)).argmin()
+
+
+class binarize_TestCase(unittest.TestCase):
+    def setUp(self):
+        self.test_array_1d = np.array([1.23, 0.3, 0.87, 0.56])
+
+    def test_binarize_with_spiketrain_exact(self):
+        st = neo.SpikeTrain(self.test_array_1d, units='ms',
+                            t_stop=10.0, sampling_rate=100)
+        times = np.arange(0, 10.+.01, .01)
+        target = np.zeros_like(times).astype('bool')
+        for time in self.test_array_1d:
+            target[get_nearest(times, time)] = True
+        times = pq.Quantity(times, units='ms')
+
+        res, tres = cv.binarize(st, return_times=True)
+        assert_array_almost_equal(res, target, decimal=9)
+        assert_array_almost_equal(tres, times, decimal=9)
+
+    def test_binarize_with_spiketrain_exact_set_ends(self):
+        st = neo.SpikeTrain(self.test_array_1d, units='ms',
+                            t_stop=10.0, sampling_rate=100)
+        times = np.arange(5., 10.+.01, .01)
+        target = np.zeros_like(times).astype('bool')
+        times = pq.Quantity(times, units='ms')
+
+        res, tres = cv.binarize(st, return_times=True, t_start=5., t_stop=10.)
+        assert_array_almost_equal(res, target, decimal=9)
+        assert_array_almost_equal(tres, times, decimal=9)
+
+    def test_binarize_with_spiketrain_round(self):
+        st = neo.SpikeTrain(self.test_array_1d, units='ms',
+                            t_stop=10.0, sampling_rate=10.0)
+        times = np.arange(0, 10.+.1, .1)
+        target = np.zeros_like(times).astype('bool')
+        for time in np.round(self.test_array_1d, 1):
+            target[get_nearest(times, time)] = True
+        times = pq.Quantity(times, units='ms')
+
+        res, tres = cv.binarize(st, return_times=True)
+        assert_array_almost_equal(res, target, decimal=9)
+        assert_array_almost_equal(tres, times, decimal=9)
+
+    def test_binarize_with_quantities_exact(self):
+        st = pq.Quantity(self.test_array_1d, units='ms')
+        times = np.arange(0, 1.23+.01, .01)
+        target = np.zeros_like(times).astype('bool')
+        for time in self.test_array_1d:
+            target[get_nearest(times, time)] = True
+        times = pq.Quantity(times, units='ms')
+
+        res, tres = cv.binarize(st, return_times=True,
+                                sampling_rate=100.*pq.kHz)
+        assert_array_almost_equal(res, target, decimal=9)
+        assert_array_almost_equal(tres, times, decimal=9)
+
+    def test_binarize_with_quantities_exact_set_ends(self):
+        st = pq.Quantity(self.test_array_1d, units='ms')
+        times = np.arange(0, 10.+.01, .01)
+        target = np.zeros_like(times).astype('bool')
+        for time in self.test_array_1d:
+            target[get_nearest(times, time)] = True
+        times = pq.Quantity(times, units='ms')
+
+        res, tres = cv.binarize(st, return_times=True, t_stop=10.,
+                                sampling_rate=100.*pq.kHz)
+        assert_array_almost_equal(res, target, decimal=9)
+        assert_array_almost_equal(tres, times, decimal=9)
+
+    def test_binarize_with_quantities_round_set_ends(self):
+        st = pq.Quantity(self.test_array_1d, units='ms')
+        times = np.arange(5., 10.+.1, .1)
+        target = np.zeros_like(times).astype('bool')
+        times = pq.Quantity(times, units='ms')
+
+        res, tres = cv.binarize(st, return_times=True, t_start=5., t_stop=10.,
+                                sampling_rate=10.*pq.kHz)
+        assert_array_almost_equal(res, target, decimal=9)
+        assert_array_almost_equal(tres, times, decimal=9)
+
+    def test_binarize_with_plain_array_exact(self):
+        st = self.test_array_1d
+        times = np.arange(0, 1.23+.01, .01)
+        target = np.zeros_like(times).astype('bool')
+        for time in self.test_array_1d:
+            target[get_nearest(times, time)] = True
+
+        res, tres = cv.binarize(st, return_times=True, sampling_rate=100)
+        assert_array_almost_equal(res, target, decimal=9)
+        assert_array_almost_equal(tres, times, decimal=9)
+
+    def test_binarize_with_plain_array_exact_set_ends(self):
+        st = self.test_array_1d
+        times = np.arange(0, 10.+.01, .01)
+        target = np.zeros_like(times).astype('bool')
+        for time in self.test_array_1d:
+            target[get_nearest(times, time)] = True
+
+        res, tres = cv.binarize(st, return_times=True, t_stop=10., sampling_rate=100.)
+        assert_array_almost_equal(res, target, decimal=9)
+        assert_array_almost_equal(tres, times, decimal=9)
+
+    def test_binarize_no_time(self):
+        st = self.test_array_1d
+        times = np.arange(0, 1.23+.01, .01)
+        target = np.zeros_like(times).astype('bool')
+        for time in self.test_array_1d:
+            target[get_nearest(times, time)] = True
+
+        res0, tres = cv.binarize(st, return_times=True, sampling_rate=100)
+        res1 = cv.binarize(st, return_times=False, sampling_rate=100)
+        res2 = cv.binarize(st, sampling_rate=100)
+        assert_array_almost_equal(res0, res1, decimal=9)
+        assert_array_almost_equal(res0, res2, decimal=9)
+
+    def test_binariz_rate_with_plain_array_and_units_typeerror(self):
+        st = self.test_array_1d
+        self.assertRaises(TypeError, cv.binarize, st,
+                          t_start=pq.Quantity(0, 'ms'),
+                          sampling_rate=10.)
+        self.assertRaises(TypeError, cv.binarize, st,
+                          t_stop=pq.Quantity(10, 'ms'),
+                          sampling_rate=10.)
+        self.assertRaises(TypeError, cv.binarize, st,
+                          t_start=pq.Quantity(0, 'ms'),
+                          t_stop=pq.Quantity(10, 'ms'),
+                          sampling_rate=10.)
+        self.assertRaises(TypeError, cv.binarize, st,
+                          t_start=pq.Quantity(0, 'ms'),
+                          t_stop=10.,
+                          sampling_rate=10.)
+        self.assertRaises(TypeError, cv.binarize, st,
+                          t_start=0.,
+                          t_stop=pq.Quantity(10, 'ms'),
+                          sampling_rate=10.)
+        self.assertRaises(TypeError, cv.binarize, st,
+                          sampling_rate=10.*pq.Hz)
+
+    def test_binariz_without_sampling_rate_valueerror(self):
+        st0 = self.test_array_1d
+        st1 = pq.Quantity(st0, 'ms')
+        self.assertRaises(ValueError, cv.binarize, st0)
+        self.assertRaises(ValueError, cv.binarize, st0,
+                          t_start=0)
+        self.assertRaises(ValueError, cv.binarize, st0,
+                          t_stop=10)
+        self.assertRaises(ValueError, cv.binarize, st0,
+                          t_start=0, t_stop=10)
+        self.assertRaises(ValueError, cv.binarize, st1,
+                          t_start=pq.Quantity(0, 'ms'), t_stop=10.)
+        self.assertRaises(ValueError, cv.binarize, st1,
+                          t_start=0., t_stop=pq.Quantity(10, 'ms'))
+        self.assertRaises(ValueError, cv.binarize, st1)
+
+
+class TimeHistogramTestCase(unittest.TestCase):
+    def setUp(self):
+        self.spiketrain_a = neo.SpikeTrain(
+            [0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
+        self.spiketrain_b = neo.SpikeTrain(
+            [0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
+        self.binsize = 1 * pq.s
+
+    def tearDown(self):
+        self.spiketrain_a = None
+        del self.spiketrain_a
+        self.spiketrain_b = None
+        del self.spiketrain_b
+
+    def test_binned_spiketrain_sparse(self):
+        a = neo.SpikeTrain([1.7, 1.8, 4.3] * pq.s, t_stop=10.0 * pq.s)
+        b = neo.SpikeTrain([1.7, 1.8, 4.3] * pq.s, t_stop=10.0 * pq.s)
+        binsize = 1 * pq.s
+        nbins = 10
+        x = cv.BinnedSpikeTrain([a, b], num_bins=nbins, binsize=binsize,
+                                t_start=0 * pq.s)
+        x_sparse = [2, 1, 2, 1]
+        s = x.to_sparse_array()
+        self.assertTrue(np.array_equal(s.data, x_sparse))
+        self.assertTrue(
+            np.array_equal(x.spike_indices, [[1, 1, 4], [1, 1, 4]]))
+
+    def test_binned_spiketrain_shape(self):
+        a = self.spiketrain_a
+        x = cv.BinnedSpikeTrain(a, num_bins=10,
+                                binsize=self.binsize,
+                                t_start=0 * pq.s)
+        x_bool = cv.BinnedSpikeTrain(a, num_bins=10, binsize=self.binsize,
+                                     t_start=0 * pq.s)
+        self.assertTrue(x.to_array().shape == (1, 10))
+        self.assertTrue(x_bool.to_bool_array().shape == (1, 10))
+
+    # shape of the matrix for a list of spike trains
+    def test_binned_spiketrain_shape_list(self):
+        a = self.spiketrain_a
+        b = self.spiketrain_b
+        c = [a, b]
+        nbins = 5
+        x = cv.BinnedSpikeTrain(c, num_bins=nbins, t_start=0 * pq.s,
+                                t_stop=10.0 * pq.s)
+        x_bool = cv.BinnedSpikeTrain(c, num_bins=nbins, t_start=0 * pq.s,
+                                     t_stop=10.0 * pq.s)
+        self.assertTrue(x.to_array().shape == (2, 5))
+        self.assertTrue(x_bool.to_bool_array().shape == (2, 5))
+
+    def test_binned_spiketrain_neg_times(self):
+        a = neo.SpikeTrain(
+            [-6.5, 0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s,
+            t_start=-6.5 * pq.s, t_stop=10.0 * pq.s)
+        binsize = self.binsize
+        nbins = 16
+        x = cv.BinnedSpikeTrain(a, num_bins=nbins, binsize=binsize,
+                                t_start=-6.5 * pq.s)
+        y = [
+            np.array([1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0])]
+        self.assertTrue(np.array_equal(x.to_bool_array(), y))
+
+    def test_binned_spiketrain_neg_times_list(self):
+        a = neo.SpikeTrain(
+            [-6.5, 0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s,
+            t_start=-7 * pq.s, t_stop=7 * pq.s)
+        b = neo.SpikeTrain(
+            [-0.1, -0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s,
+            t_start=-1 * pq.s, t_stop=8 * pq.s)
+        c = [a, b]
+
+        binsize = self.binsize
+        x_bool = cv.BinnedSpikeTrain(c, binsize=binsize)
+        y_bool = [[0, 1, 1, 0, 1, 1, 1, 1],
+                     [1, 0, 1, 1, 0, 1, 1, 0]]
+
+        self.assertTrue(
+            np.array_equal(x_bool.to_bool_array(), y_bool))
+
+    # checking spike_indices(f) and matrix(m) for 1 spiketrain
+    def test_binned_spiketrain_indices(self):
+        a = self.spiketrain_a
+        binsize = self.binsize
+        nbins = 10
+        x = cv.BinnedSpikeTrain(a, num_bins=nbins, binsize=binsize,
+                                t_start=0 * pq.s)
+        x_bool = cv.BinnedSpikeTrain(a, num_bins=nbins, binsize=binsize,
+                                     t_start=0 * pq.s)
+        y_matrix = [
+            np.array([2., 1., 0., 1., 1., 1., 1., 0., 0., 0.])]
+        y_bool_matrix = [
+            np.array([1., 1., 0., 1., 1., 1., 1., 0., 0., 0.])]
+        self.assertTrue(
+            np.array_equal(x.to_array(),
+                           y_matrix))
+        self.assertTrue(
+            np.array_equal(x_bool.to_bool_array(), y_bool_matrix))
+        self.assertTrue(
+            np.array_equal(x_bool.to_bool_array(), y_bool_matrix))
+        s = x_bool.to_sparse_bool_array()[
+            x_bool.to_sparse_bool_array().nonzero()]
+        self.assertTrue(np.array_equal(s, [[True]*6]))
+
+    def test_binned_spiketrain_list(self):
+        a = self.spiketrain_a
+        b = self.spiketrain_b
+
+        binsize = self.binsize
+        nbins = 10
+        c = [a, b]
+        x = cv.BinnedSpikeTrain(c, num_bins=nbins, binsize=binsize,
+                                t_start=0 * pq.s)
+        x_bool = cv.BinnedSpikeTrain(c, num_bins=nbins, binsize=binsize,
+                                     t_start=0 * pq.s)
+        y_matrix = np.array(
+            [[2, 1, 0, 1, 1, 1, 1, 0, 0, 0],
+             [2, 1, 1, 0, 1, 1, 0, 0, 1, 0]])
+        y_matrix_bool = np.array(
+            [[1, 1, 0, 1, 1, 1, 1, 0, 0, 0],
+             [1, 1, 1, 0, 1, 1, 0, 0, 1, 0]])
+        self.assertTrue(
+            np.array_equal(x.to_array(),
+                           y_matrix))
+        self.assertTrue(
+            np.array_equal(x_bool.to_bool_array(), y_matrix_bool))
+
+    # t_stop is None
+    def test_binned_spiketrain_list_t_stop(self):
+        a = self.spiketrain_a
+        b = self.spiketrain_b
+        c = [a, b]
+        binsize = self.binsize
+        nbins = 10
+        x = cv.BinnedSpikeTrain(c, num_bins=nbins, binsize=binsize,
+                                t_start=0 * pq.s,
+                                t_stop=None)
+        x_bool = cv.BinnedSpikeTrain(c, num_bins=nbins, binsize=binsize,
+                                     t_start=0 * pq.s)
+        self.assertTrue(x.t_stop == 10 * pq.s)
+        self.assertTrue(x_bool.t_stop == 10 * pq.s)
+
+    # Test number of bins
+    def test_binned_spiketrain_list_numbins(self):
+        a = self.spiketrain_a
+        b = self.spiketrain_b
+        c = [a, b]
+        binsize = 1 * pq.s
+        x = cv.BinnedSpikeTrain(c, binsize=binsize, t_start=0 * pq.s,
+                                t_stop=10. * pq.s)
+        x_bool = cv.BinnedSpikeTrain(c, binsize=binsize, t_start=0 * pq.s,
+                                     t_stop=10. * pq.s)
+        self.assertTrue(x.num_bins == 10)
+        self.assertTrue(x_bool.num_bins == 10)
+
+    def test_binned_spiketrain_matrix(self):
+        # Init
+        a = self.spiketrain_a
+        b = self.spiketrain_b
+        x_bool_a = cv.BinnedSpikeTrain(a, binsize=pq.s, t_start=0 * pq.s,
+                                       t_stop=10. * pq.s)
+        x_bool_b = cv.BinnedSpikeTrain(b, binsize=pq.s, t_start=0 * pq.s,
+                                       t_stop=10. * pq.s)
+
+        # Assumed results
+        y_matrix_a = [
+            np.array([2, 1, 0, 1, 1, 1, 1, 0, 0, 0])]
+        y_matrix_bool_a = [np.array([1, 1, 0, 1, 1, 1, 1, 0, 0, 0])]
+        y_matrix_bool_b = [np.array([1, 1, 1, 0, 1, 1, 0, 0, 1, 0])]
+
+        # Asserts
+        self.assertTrue(
+            np.array_equal(x_bool_a.to_bool_array(), y_matrix_bool_a))
+        self.assertTrue(np.array_equal(x_bool_b.to_bool_array(),
+                                       y_matrix_bool_b))
+        self.assertTrue(
+            np.array_equal(x_bool_a.to_array(), y_matrix_a))
+
+    def test_binned_spiketrain_matrix_storing(self):
+        a = self.spiketrain_a
+        b = self.spiketrain_b
+
+        x_bool = cv.BinnedSpikeTrain(a, binsize=pq.s, t_start=0 * pq.s,
+                                     t_stop=10. * pq.s)
+        x = cv.BinnedSpikeTrain(b, binsize=pq.s, t_start=0 * pq.s,
+                                t_stop=10. * pq.s)
+        # Store Matrix in variable
+        matrix_bool = x_bool.to_bool_array()
+        matrix = x.to_array(store_array=True)
+
+        # Check if same matrix
+        self.assertTrue(np.array_equal(x._mat_u,
+                                       matrix))
+        # Get the stored matrix using method
+        self.assertTrue(
+            np.array_equal(x_bool.to_bool_array(),
+                           matrix_bool))
+        self.assertTrue(
+            np.array_equal(x.to_array(),
+                           matrix))
+
+        # Test storing of sparse mat
+        sparse_bool = x_bool.to_sparse_bool_array()
+        self.assertTrue(np.array_equal(sparse_bool.toarray(),
+                                       x_bool.to_sparse_bool_array().toarray()))
+
+        # New class without calculating the matrix
+        x = cv.BinnedSpikeTrain(b, binsize=pq.s, t_start=0 * pq.s,
+                                t_stop=10. * pq.s)
+        # No matrix calculated, should be None
+        self.assertEqual(x._mat_u, None)
+        # Test with stored matrix
+        self.assertFalse(np.array_equal(x, matrix))
+
+    # Test matrix removing
+    def test_binned_spiketrain_remove_matrix(self):
+        a = self.spiketrain_a
+        x = cv.BinnedSpikeTrain(a, binsize=1 * pq.s, num_bins=10,
+                                t_stop=10. * pq.s)
+        # Store
+        x.to_array(store_array=True)
+        # Remove
+        x.remove_stored_array()
+        # Assert matrix is not stored
+        self.assertIsNone(x._mat_u)
+
+    # Test if t_start is calculated correctly
+    def test_binned_spiketrain_parameter_calc_tstart(self):
+        a = self.spiketrain_a
+        x = cv.BinnedSpikeTrain(a, binsize=1 * pq.s, num_bins=10,
+                                t_stop=10. * pq.s)
+        self.assertEqual(x.t_start, 0. * pq.s)
+        self.assertEqual(x.t_stop, 10. * pq.s)
+        self.assertEqual(x.binsize, 1 * pq.s)
+        self.assertEqual(x.num_bins, 10)
+
+    # Test if error raises when type of num_bins is not an integer
+    def test_binned_spiketrain_numbins_type_error(self):
+        a = self.spiketrain_a
+        self.assertRaises(TypeError, cv.BinnedSpikeTrain, a, binsize=pq.s,
+                          num_bins=1.4, t_start=0 * pq.s,
+                          t_stop=10. * pq.s)
+
+    # Test if error is raised when providing insufficient number of
+    # parameters
+    def test_binned_spiketrain_insufficient_arguments(self):
+        a = self.spiketrain_a
+        self.assertRaises(AttributeError, cv.BinnedSpikeTrain, a)
+        self.assertRaises(ValueError, cv.BinnedSpikeTrain, a, binsize=1 * pq.s,
+                          t_start=0 * pq.s, t_stop=0 * pq.s)
+
+    def test_calc_attributes_error(self):
+        self.assertRaises(ValueError, cv._calc_num_bins, 1, 1 * pq.s, 0 * pq.s)
+        self.assertRaises(ValueError, cv._calc_binsize, 1, 1 * pq.s, 0 * pq.s)
+
+    def test_different_input_types(self):
+        a = self.spiketrain_a
+        q = [1, 2, 3] * pq.s
+        self.assertRaises(TypeError, cv.BinnedSpikeTrain, [a, q], binsize=pq.s)
+
+    def test_get_start_stop(self):
+        a = self.spiketrain_a
+        b = neo.SpikeTrain(
+            [-0.1, -0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s,
+            t_start=-1 * pq.s, t_stop=8 * pq.s)
+        start, stop = cv._get_start_stop_from_input(a)
+        self.assertEqual(start, a.t_start)
+        self.assertEqual(stop, a.t_stop)
+        start, stop = cv._get_start_stop_from_input([a, b])
+        self.assertEqual(start, a.t_start)
+        self.assertEqual(stop, b.t_stop)
+
+    def test_consistency_errors(self):
+        a = self.spiketrain_a
+        b = neo.SpikeTrain([-2, -1] * pq.s, t_start=-2 * pq.s,
+                           t_stop=-1 * pq.s)
+        self.assertRaises(ValueError, cv.BinnedSpikeTrain, [a, b], t_start=5,
+                          t_stop=0, binsize=pq.s, num_bins=10)
+
+        b = neo.SpikeTrain([-7, -8, -9] * pq.s, t_start=-9 * pq.s,
+                           t_stop=-7 * pq.s)
+        self.assertRaises(ValueError, cv.BinnedSpikeTrain, b, t_start=0,
+                          t_stop=10, binsize=pq.s, num_bins=10)
+        self.assertRaises(ValueError, cv.BinnedSpikeTrain, a, t_start=0 * pq.s,
+                          t_stop=10 * pq.s, binsize=3 * pq.s, num_bins=10)
+
+        b = neo.SpikeTrain([-4, -2, 0, 1] * pq.s, t_start=-4 * pq.s,
+                           t_stop=1 * pq.s)
+        self.assertRaises(TypeError, cv.BinnedSpikeTrain, b, binsize=-2*pq.s,
+                          t_start=-4 * pq.s, t_stop=0 * pq.s)
+
+    # Test edges
+    def test_binned_spiketrain_bin_edges(self):
+        a = self.spiketrain_a
+        x = cv.BinnedSpikeTrain(a, binsize=1 * pq.s, num_bins=10,
+                                t_stop=10. * pq.s)
+        # Test all edges
+        edges = [float(i) for i in range(11)]
+        self.assertTrue(np.array_equal(x.bin_edges, edges))
+
+        # Test left edges
+        edges = [float(i) for i in range(10)]
+        self.assertTrue(np.array_equal(x.bin_edges[:-1], edges))
+
+        # Test right edges
+        edges = [float(i) for i in range(1, 11)]
+        self.assertTrue(np.array_equal(x.bin_edges[1:], edges))
+
+        # Test center edges
+        edges = np.arange(0, 10) + 0.5
+        self.assertTrue(np.array_equal(x.bin_centers, edges))
+
+    # Test for different units but same times
+    def test_binned_spiketrain_different_units(self):
+        a = self.spiketrain_a
+        b = a.rescale(pq.ms)
+        binsize = 1 * pq.s
+        xa = cv.BinnedSpikeTrain(a, binsize=binsize)
+        xb = cv.BinnedSpikeTrain(b, binsize=binsize.rescale(pq.ms))
+        self.assertTrue(
+            np.array_equal(xa.to_bool_array(), xb.to_bool_array()))
+        self.assertTrue(
+            np.array_equal(xa.to_sparse_array().data,
+                           xb.to_sparse_array().data))
+        self.assertTrue(
+            np.array_equal(xa.bin_edges[:-1],
+                           xb.bin_edges[:-1].rescale(binsize.units)))
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 157 - 0
code/elephant/elephant/test/test_csd.py

@@ -0,0 +1,157 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the kCSD methods
+
+This was written by :
+Chaitanya Chintaluri,
+Laboratory of Neuroinformatics,
+Nencki Institute of Exprimental Biology, Warsaw.
+
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+import numpy as np
+import quantities as pq
+from elephant import current_source_density as csd
+import elephant.current_source_density_src.utility_functions as utils
+
+available_1d = ['StandardCSD', 'DeltaiCSD', 'StepiCSD', 'SplineiCSD', 'KCSD1D']
+available_2d = ['KCSD2D', 'MoIKCSD']
+available_3d = ['KCSD3D']
+kernel_methods = ['KCSD1D', 'KCSD2D', 'KCSD3D', 'MoIKCSD']
+icsd_methods = ['DeltaiCSD', 'StepiCSD', 'SplineiCSD']
+py_iCSD_toolbox = ['StandardCSD', 'DeltaiCSD', 'StepiCSD', 'SplineiCSD']
+
+
+class LFP_TestCase(unittest.TestCase):
+    def test_lfp1d_electrodes(self):
+        ele_pos = utils.generate_electrodes(dim=1).reshape(5, 1)
+        lfp = csd.generate_lfp(utils.gauss_1d_dipole, ele_pos)
+        self.assertEqual(ele_pos.shape[1], 1)
+        self.assertEqual(ele_pos.shape[0], len(lfp))
+
+    def test_lfp2d_electrodes(self):
+        ele_pos = utils.generate_electrodes(dim=2)
+        xx_ele, yy_ele = ele_pos
+        lfp = csd.generate_lfp(utils.large_source_2D, xx_ele, yy_ele)
+        self.assertEqual(len(ele_pos), 2)
+        self.assertEqual(xx_ele.shape[0], len(lfp))
+
+    def test_lfp3d_electrodes(self):
+        ele_pos = utils.generate_electrodes(dim=3, res=3)
+        xx_ele, yy_ele, zz_ele = ele_pos
+        lfp = csd.generate_lfp(utils.gauss_3d_dipole, xx_ele, yy_ele, zz_ele)
+        self.assertEqual(len(ele_pos), 3)
+        self.assertEqual(xx_ele.shape[0], len(lfp))
+
+
+class CSD1D_TestCase(unittest.TestCase):
+    def setUp(self):
+        self.ele_pos = utils.generate_electrodes(dim=1).reshape(5, 1)
+        self.lfp = csd.generate_lfp(utils.gauss_1d_dipole, self.ele_pos)
+        self.csd_method = csd.estimate_csd
+
+        self.params = {}  # Input dictionaries for each method
+        self.params['DeltaiCSD'] = {'sigma_top': 0. * pq.S / pq.m,
+                                    'diam': 500E-6 * pq.m}
+        self.params['StepiCSD'] = {'sigma_top': 0. * pq.S / pq.m, 'tol': 1E-12,
+                                   'diam': 500E-6 * pq.m}
+        self.params['SplineiCSD'] = {'sigma_top': 0. * pq.S / pq.m,
+                                     'num_steps': 201, 'tol': 1E-12,
+                                     'diam': 500E-6 * pq.m}
+        self.params['StandardCSD'] = {}
+        self.params['KCSD1D'] = {'h': 50., 'Rs': np.array((0.1, 0.25, 0.5))}
+
+    def test_validate_inputs(self):
+        self.assertRaises(TypeError, self.csd_method, lfp=[[1], [2], [3]])
+        self.assertRaises(ValueError, self.csd_method, lfp=self.lfp,
+                          coords=self.ele_pos * pq.mm)
+        # inconsistent number of electrodes
+        self.assertRaises(ValueError, self.csd_method, lfp=self.lfp,
+                          coords=[1, 2, 3, 4] * pq.mm, method='StandardCSD')
+        # bad method name
+        self.assertRaises(ValueError, self.csd_method, lfp=self.lfp,
+                          method='InvalidMethodName')
+        self.assertRaises(ValueError, self.csd_method, lfp=self.lfp,
+                          method='KCSD2D')
+        self.assertRaises(ValueError, self.csd_method, lfp=self.lfp,
+                          method='KCSD3D')
+
+    def test_inputs_standardcsd(self):
+        method = 'StandardCSD'
+        result = self.csd_method(self.lfp, method=method)
+        self.assertEqual(result.t_start, 0.0 * pq.s)
+        self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
+        self.assertEqual(len(result.times), 1)
+
+    def test_inputs_deltasplineicsd(self):
+        methods = ['DeltaiCSD', 'SplineiCSD']
+        for method in methods:
+            self.assertRaises(ValueError, self.csd_method, lfp=self.lfp,
+                              method=method)
+            result = self.csd_method(self.lfp, method=method,
+                                     **self.params[method])
+            self.assertEqual(result.t_start, 0.0 * pq.s)
+            self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
+            self.assertEqual(len(result.times), 1)
+
+    def test_inputs_stepicsd(self):
+        method = 'StepiCSD'
+        self.assertRaises(ValueError, self.csd_method, lfp=self.lfp,
+                          method=method)
+        self.assertRaises(AssertionError, self.csd_method, lfp=self.lfp,
+                          method=method, **self.params[method])
+        self.params['StepiCSD'].update({'h': np.ones(5) * 100E-6 * pq.m})
+        result = self.csd_method(self.lfp, method=method,
+                                 **self.params[method])
+        self.assertEqual(result.t_start, 0.0 * pq.s)
+        self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
+        self.assertEqual(len(result.times), 1)
+
+    def test_inuts_kcsd(self):
+        method = 'KCSD1D'
+        result = self.csd_method(self.lfp, method=method,
+                                 **self.params[method])
+        self.assertEqual(result.t_start, 0.0 * pq.s)
+        self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
+        self.assertEqual(len(result.times), 1)
+
+
+class CSD2D_TestCase(unittest.TestCase):
+    def setUp(self):
+        xx_ele, yy_ele = utils.generate_electrodes(dim=2)
+        self.lfp = csd.generate_lfp(utils.large_source_2D, xx_ele, yy_ele)
+        self.params = {}  # Input dictionaries for each method
+        self.params['KCSD2D'] = {'sigma': 1., 'Rs': np.array((0.1, 0.25, 0.5))}
+
+    def test_kcsd2d_init(self):
+        method = 'KCSD2D'
+        result = csd.estimate_csd(lfp=self.lfp, method=method,
+                                  **self.params[method])
+        self.assertEqual(result.t_start, 0.0 * pq.s)
+        self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
+        self.assertEqual(len(result.times), 1)
+
+
+class CSD3D_TestCase(unittest.TestCase):
+    def setUp(self):
+        xx_ele, yy_ele, zz_ele = utils.generate_electrodes(dim=3)
+        self.lfp = csd.generate_lfp(utils.gauss_3d_dipole,
+                                    xx_ele, yy_ele, zz_ele)
+        self.params = {}
+        self.params['KCSD3D'] = {'gdx': 0.1, 'gdy': 0.1, 'gdz': 0.1,
+                                 'src_type': 'step',
+                                 'Rs': np.array((0.1, 0.25, 0.5))}
+
+    def test_kcsd2d_init(self):
+        method = 'KCSD3D'
+        result = csd.estimate_csd(lfp=self.lfp, method=method,
+                                  **self.params[method])
+        self.assertEqual(result.t_start, 0.0 * pq.s)
+        self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
+        self.assertEqual(len(result.times), 1)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 151 - 0
code/elephant/elephant/test/test_cubic.py

@@ -0,0 +1,151 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the CUBIC analysis.
+
+:copyright: Copyright 2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+import elephant.cubic as cubic
+import quantities as pq
+import neo
+import numpy
+
+
+class CubicTestCase(unittest.TestCase):
+    '''
+    This test is constructed to check the implementation of the CuBIC
+    method [1].
+    In the setup function is constructed an neo.AnalogSignal, that
+    represents the Population Histogram of a population of neurons with order
+    of correlation equal to ten. Since the population contu is either equal to
+    0 or 10 means that the embedded order of correlation is exactly 10.
+    In test_cubic() the format of all the output and the order of correlation
+    of the function jelephant.cubic.cubic() are tested.
+
+    References
+    ----------
+    [1]Staude, Rotter, Gruen, (2009) J. Comp. Neurosci
+    '''
+    def setUp(self):
+        n2 = 300
+        n0 = 100000-n2
+        self.xi = 10
+        self.data_signal = neo.AnalogSignal(
+            numpy.array([self.xi] * n2 + [0] * n0).reshape(n0 + n2, 1) *
+            pq.dimensionless, sampling_period=1*pq.s)
+        self.data_array = numpy.array([self.xi] * n2 + [0] * n0)
+        self.alpha = 0.05
+        self.ximax = 10
+
+    def test_cubic(self):
+
+        # Computing the output of CuBIC for the test data AnalogSignal
+        xi, p_vals, k, test_aborted = cubic.cubic(
+            self.data_signal, alpha=self.alpha)
+
+        # Check the types of the outputs
+        self.assertIsInstance(xi, int)
+        self.assertIsInstance(p_vals, list)
+        self.assertIsInstance(k, list)
+
+        # Check that the number of tests is the output order of correlation
+        self.assertEqual(xi, len(p_vals))
+
+        # Check that all the first  xi-1 tests have not passed the
+        # significance level alpha
+        for p in p_vals[:-1]:
+            self.assertGreater(self.alpha, p)
+
+        # Check that the last p-value has passed the significance level
+        self.assertGreater(p_vals[-1], self.alpha)
+
+        # Check that the number of cumulant of the output is 3
+        self.assertEqual(3, len(k))
+
+        # Check the analytical constrain of the cumulants for which K_1<K_2
+        self.assertGreater(k[1], k[0])
+
+        # Check the computed order of correlation is the expected
+        # from the test data
+        self.assertEqual(xi, self.xi)
+
+        # Computing the output of CuBIC for the test data Array
+        xi, p_vals, k, test_aborted = cubic.cubic(
+            self.data_array, alpha=self.alpha)
+
+        # Check the types of the outputs
+        self.assertIsInstance(xi, int)
+        self.assertIsInstance(p_vals, list)
+        self.assertIsInstance(k, list)
+
+        # Check that the number of tests is the output order of correlation
+        self.assertEqual(xi, len(p_vals))
+
+        # Check that all the first  xi-1 tests have not passed the
+        # significance level alpha
+        for p in p_vals[:-1]:
+            self.assertGreater(self.alpha, p)
+
+        # Check that the last p-value has passed the significance level
+        self.assertGreater(p_vals[-1], self.alpha)
+
+        # Check that the number of cumulant of the output is 3
+        self.assertEqual(3, len(k))
+
+        # Check the analytical constrain of the cumulants for which K_1<K_2
+        self.assertGreater(k[1], k[0])
+
+        # Check the computed order of correlation is the expected
+        # from the test data
+        self.assertEqual(xi, self.xi)
+
+        # Check the output for test_aborted
+        self.assertEqual(test_aborted, False)
+
+    def test_cubic_ximax(self):
+        # Test exceeding ximax
+        xi_ximax, p_vals_ximax, k_ximax, test_aborted = cubic.cubic(
+            self.data_signal, alpha=1, ximax=self.ximax)
+
+        self.assertEqual(test_aborted, True)
+        self.assertEqual(xi_ximax - 1, self.ximax)
+
+    def test_cubic_errors(self):
+
+        # Check error ouputs for mis-settings of the parameters
+
+        # Empty signal
+        self.assertRaises(
+            ValueError, cubic.cubic, neo.AnalogSignal(
+                []*pq.dimensionless, sampling_period=10*pq.ms))
+
+        # Multidimensional array
+        self.assertRaises(ValueError, cubic.cubic, neo.AnalogSignal(
+            [[1, 2, 3], [1, 2, 3]] * pq.dimensionless,
+            sampling_period=10 * pq.ms))
+        self.assertRaises(ValueError, cubic.cubic, numpy.array(
+            [[1, 2, 3], [1, 2, 3]]))
+
+        # Negative alpha
+        self.assertRaises(ValueError, cubic.cubic, self.data_array, alpha=-0.1)
+
+        # Negative number of iterations ximax
+        self.assertRaises(ValueError, cubic.cubic, self.data_array, ximax=-100)
+
+        # Checking case in which the second cumulant of the signal is smaller
+        # than the first cumulant (analitycal constrain of the method)
+        self.assertRaises(ValueError, cubic.cubic, neo.AnalogSignal(
+            numpy.array([1]*1000).reshape(1000, 1), units=pq.dimensionless,
+            sampling_period=10*pq.ms), alpha=self.alpha)
+
+
+def suite():
+    suite = unittest.makeSuite(CubicTestCase, 'test')
+    return suite
+
+
+if __name__ == "__main__":
+    runner = unittest.TextTestRunner(verbosity=2)
+    runner.run(suite())

File diff suppressed because it is too large
+ 1245 - 0
code/elephant/elephant/test/test_icsd.py


+ 183 - 0
code/elephant/elephant/test/test_kcsd.py

@@ -0,0 +1,183 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the kCSD methods
+
+This was written by :
+Chaitanya Chintaluri,
+Laboratory of Neuroinformatics,
+Nencki Institute of Exprimental Biology, Warsaw.
+
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+import neo
+import numpy as np
+import quantities as pq
+from elephant import current_source_density as CSD
+import elephant.current_source_density_src.utility_functions as utils
+
+
+class KCSD1D_TestCase(unittest.TestCase):
+    def setUp(self):
+        self.ele_pos = utils.generate_electrodes(dim=1).reshape(5, 1)
+        self.csd_profile = utils.gauss_1d_dipole
+        pots = CSD.generate_lfp(self.csd_profile, self.ele_pos)
+        self.pots = np.reshape(pots, (-1, 1))
+        self.test_method = 'KCSD1D'
+        self.test_params = {'h': 50.}
+
+        temp_signals = []
+        for ii in range(len(self.pots)):
+            temp_signals.append(self.pots[ii])
+        self.an_sigs = neo.AnalogSignal(temp_signals * pq.mV,
+                                       sampling_rate=1000 * pq.Hz)
+        chidx = neo.ChannelIndex(range(len(self.pots)))
+        chidx.analogsignals.append(self.an_sigs)
+        chidx.coordinates = self.ele_pos * pq.mm
+
+        chidx.create_relationship()
+
+    def test_kcsd1d_estimate(self, cv_params={}):
+        self.test_params.update(cv_params)
+        result = CSD.estimate_csd(self.an_sigs, method=self.test_method,
+                                  **self.test_params)
+        self.assertEqual(result.t_start, 0.0 * pq.s)
+        self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
+        self.assertEqual(result.times, [0.] * pq.s)
+        self.assertEqual(len(result.annotations.keys()), 1)
+        true_csd = self.csd_profile(result.annotations['x_coords'])
+        rms = np.linalg.norm(np.array(result[0, :]) - true_csd)
+        rms /= np.linalg.norm(true_csd)
+        self.assertLess(rms, 0.5, msg='RMS between trueCSD and estimate > 0.5')
+
+    def test_valid_inputs(self):
+        self.test_method = 'InvalidMethodName'
+        self.assertRaises(ValueError, self.test_kcsd1d_estimate)
+        self.test_method = 'KCSD1D'
+        self.test_params = {'src_type': 22}
+        self.assertRaises(KeyError, self.test_kcsd1d_estimate)
+        self.test_method = 'KCSD1D'
+        self.test_params = {'InvalidKwarg': 21}
+        self.assertRaises(TypeError, self.test_kcsd1d_estimate)
+        cv_params = {'InvalidCVArg': np.array((0.1, 0.25, 0.5))}
+        self.assertRaises(TypeError, self.test_kcsd1d_estimate, cv_params)
+
+
+class KCSD2D_TestCase(unittest.TestCase):
+    def setUp(self):
+        xx_ele, yy_ele = utils.generate_electrodes(dim=2, res=9,
+                                                   xlims=[0.05, 0.95],
+                                                   ylims=[0.05, 0.95])
+        self.ele_pos = np.vstack((xx_ele, yy_ele)).T
+        self.csd_profile = utils.large_source_2D
+        pots = CSD.generate_lfp(self.csd_profile, xx_ele, yy_ele, res=100)
+        self.pots = np.reshape(pots, (-1, 1))
+        self.test_method = 'KCSD2D'
+        self.test_params = {'gdx': 0.25, 'gdy': 0.25, 'R_init': 0.08,
+                            'h': 50., 'xmin': 0., 'xmax': 1.,
+                            'ymin': 0., 'ymax': 1.}
+        temp_signals = []
+        for ii in range(len(self.pots)):
+            temp_signals.append(self.pots[ii])
+        self.an_sigs = neo.AnalogSignal(temp_signals * pq.mV,
+                                       sampling_rate=1000 * pq.Hz)
+        chidx = neo.ChannelIndex(range(len(self.pots)))
+        chidx.analogsignals.append(self.an_sigs)
+        chidx.coordinates = self.ele_pos * pq.mm
+
+        chidx.create_relationship()
+
+
+    def test_kcsd2d_estimate(self, cv_params={}):
+        self.test_params.update(cv_params)
+        result = CSD.estimate_csd(self.an_sigs, method=self.test_method,
+                                  **self.test_params)
+        self.assertEqual(result.t_start, 0.0 * pq.s)
+        self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
+        self.assertEqual(result.times, [0.] * pq.s)
+        self.assertEqual(len(result.annotations.keys()), 2)
+        true_csd = self.csd_profile(result.annotations['x_coords'],
+                                    result.annotations['y_coords'])
+        rms = np.linalg.norm(np.array(result[0, :]) - true_csd)
+        rms /= np.linalg.norm(true_csd)
+        self.assertLess(rms, 0.5, msg='RMS ' + str(rms) +
+                        'between trueCSD and estimate > 0.5')
+
+    def test_moi_estimate(self):
+        result = CSD.estimate_csd(self.an_sigs, method='MoIKCSD',
+                                  MoI_iters=10, lambd=0.0,
+                                  gdx=0.2, gdy=0.2)
+        self.assertEqual(result.t_start, 0.0 * pq.s)
+        self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
+        self.assertEqual(result.times, [0.] * pq.s)
+        self.assertEqual(len(result.annotations.keys()), 2)
+
+    def test_valid_inputs(self):
+        self.test_method = 'InvalidMethodName'
+        self.assertRaises(ValueError, self.test_kcsd2d_estimate)
+        self.test_method = 'KCSD2D'
+        self.test_params = {'src_type': 22}
+        self.assertRaises(KeyError, self.test_kcsd2d_estimate)
+        self.test_params = {'InvalidKwarg': 21}
+        self.assertRaises(TypeError, self.test_kcsd2d_estimate)
+        cv_params = {'InvalidCVArg': np.array((0.1, 0.25, 0.5))}
+        self.assertRaises(TypeError, self.test_kcsd2d_estimate, cv_params)
+
+
+class KCSD3D_TestCase(unittest.TestCase):
+    def setUp(self):
+        xx_ele, yy_ele, zz_ele = utils.generate_electrodes(dim=3, res=5,
+                                                           xlims=[0.15, 0.85],
+                                                           ylims=[0.15, 0.85],
+                                                           zlims=[0.15, 0.85])
+        self.ele_pos = np.vstack((xx_ele, yy_ele, zz_ele)).T
+        self.csd_profile = utils.gauss_3d_dipole
+        pots = CSD.generate_lfp(self.csd_profile, xx_ele, yy_ele, zz_ele)
+        self.pots = np.reshape(pots, (-1, 1))
+        self.test_method = 'KCSD3D'
+        self.test_params = {'gdx': 0.05, 'gdy': 0.05, 'gdz': 0.05,
+                            'lambd': 5.10896977451e-19, 'src_type': 'step',
+                            'R_init': 0.31, 'xmin': 0., 'xmax': 1., 'ymin': 0.,
+                            'ymax': 1., 'zmin': 0., 'zmax': 1.}
+
+        temp_signals = []
+        for ii in range(len(self.pots)):
+            temp_signals.append(self.pots[ii])
+        self.an_sigs = neo.AnalogSignal(temp_signals * pq.mV,
+                                       sampling_rate=1000 * pq.Hz)
+        chidx = neo.ChannelIndex(range(len(self.pots)))
+        chidx.analogsignals.append(self.an_sigs)
+        chidx.coordinates = self.ele_pos * pq.mm
+
+        chidx.create_relationship()
+
+    def test_kcsd3d_estimate(self, cv_params={}):
+        self.test_params.update(cv_params)
+        result = CSD.estimate_csd(self.an_sigs, method=self.test_method,
+                                  **self.test_params)
+        self.assertEqual(result.t_start, 0.0 * pq.s)
+        self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
+        self.assertEqual(result.times, [0.] * pq.s)
+        self.assertEqual(len(result.annotations.keys()), 3)
+        true_csd = self.csd_profile(result.annotations['x_coords'],
+                                    result.annotations['y_coords'],
+                                    result.annotations['z_coords'])
+        rms = np.linalg.norm(np.array(result[0, :]) - true_csd)
+        rms /= np.linalg.norm(true_csd)
+        self.assertLess(rms, 0.5, msg='RMS ' + str(rms) +
+                        ' between trueCSD and estimate > 0.5')
+
+    def test_valid_inputs(self):
+        self.test_method = 'InvalidMethodName'
+        self.assertRaises(ValueError, self.test_kcsd3d_estimate)
+        self.test_method = 'KCSD3D'
+        self.test_params = {'src_type': 22}
+        self.assertRaises(KeyError, self.test_kcsd3d_estimate)
+        self.test_params = {'InvalidKwarg': 21}
+        self.assertRaises(TypeError, self.test_kcsd3d_estimate)
+        cv_params = {'InvalidCVArg': np.array((0.1, 0.25, 0.5))}
+        self.assertRaises(TypeError, self.test_kcsd3d_estimate, cv_params)
+
+if __name__ == '__main__':
+    unittest.main()

+ 129 - 0
code/elephant/elephant/test/test_kernels.py

@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the kernels module.
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+
+import numpy as np
+import quantities as pq
+import scipy.integrate as spint
+import elephant.kernels as kernels
+
+
+class kernel_TestCase(unittest.TestCase):
+    def setUp(self):
+        self.kernel_types = [obj for obj in kernels.__dict__.values()
+                             if isinstance(obj, type) and
+                             issubclass(obj, kernels.Kernel) and
+                             hasattr(obj, "_evaluate") and
+                             obj is not kernels.Kernel and
+                             obj is not kernels.SymmetricKernel]
+        self.fraction = 0.9999
+
+    def test_error_kernels(self):
+        """
+        Test of various error cases in the kernels module.
+        """
+        self.assertRaises(
+            TypeError, kernels.RectangularKernel, sigma=2.0)
+        self.assertRaises(
+            ValueError, kernels.RectangularKernel, sigma=-0.03*pq.s)
+        self.assertRaises(
+            ValueError, kernels.RectangularKernel, sigma=2.0*pq.ms,
+            invert=2)
+        rec_kernel = kernels.RectangularKernel(sigma=0.3*pq.ms)
+        self.assertRaises(
+            TypeError, rec_kernel, [1, 2, 3])
+        self.assertRaises(
+            TypeError, rec_kernel, [1, 2, 3]*pq.V)
+        kernel = kernels.Kernel(sigma=0.3*pq.ms)
+        self.assertRaises(
+            NotImplementedError, kernel._evaluate, [1, 2, 3]*pq.V)
+        self.assertRaises(
+            NotImplementedError, kernel.boundary_enclosing_area_fraction,
+            fraction=0.9)
+        self.assertRaises(TypeError,
+                          rec_kernel.boundary_enclosing_area_fraction, [1, 2])
+        self.assertRaises(ValueError,
+                          rec_kernel.boundary_enclosing_area_fraction, -10)
+        self.assertEquals(kernel.is_symmetric(), False)
+        self.assertEquals(rec_kernel.is_symmetric(), True)
+
+    @unittest.skip('very time-consuming test')
+    def test_error_alpha_kernel(self):
+        alp_kernel = kernels.AlphaKernel(sigma=0.3*pq.ms)
+        self.assertRaises(ValueError,
+            alp_kernel.boundary_enclosing_area_fraction, 0.9999999)
+
+    def test_kernels_normalization(self):
+        """
+        Test that each kernel normalizes to area one.
+        """
+        sigma = 0.1 * pq.mV
+        kernel_resolution = sigma / 100.0
+        kernel_list = [kernel_type(sigma, invert=False) for
+                       kernel_type in self.kernel_types]
+        for kernel in kernel_list:
+            b = kernel.boundary_enclosing_area_fraction(self.fraction).magnitude
+            restric_defdomain = \
+                np.linspace(-b, b, 2*b/kernel_resolution.magnitude) * sigma.units
+            kern = kernel(restric_defdomain)
+            norm = spint.cumtrapz(y=kern.magnitude,
+                                  x=restric_defdomain.magnitude)[-1]
+            self.assertAlmostEqual(norm, 1, delta=0.003)
+
+    def test_kernels_stddev(self):
+        """
+        Test that the standard deviation calculated from the kernel (almost)
+        equals the parameter sigma with which the kernel was constructed.
+        """
+        sigma = 0.5 * pq.s
+        kernel_resolution = sigma / 50.0
+        for invert in (False, True):
+            kernel_list = [kernel_type(sigma, invert) for
+                           kernel_type in self.kernel_types]
+            for kernel in kernel_list:
+                b = kernel.boundary_enclosing_area_fraction(self.fraction).magnitude
+                restric_defdomain = \
+                    np.linspace(-b, b, 2*b/kernel_resolution.magnitude) * \
+                    sigma.units
+                kern = kernel(restric_defdomain)
+                av_integr = kern * restric_defdomain
+                average = spint.cumtrapz(y=av_integr.magnitude,
+                                         x=restric_defdomain.magnitude)[-1] * \
+                          sigma.units
+                var_integr = (restric_defdomain-average)**2 * kern
+                variance = spint.cumtrapz(y=var_integr.magnitude,
+                                          x=restric_defdomain.magnitude)[-1] * \
+                           sigma.units**2
+                stddev = np.sqrt(variance)
+                self.assertAlmostEqual(stddev, sigma, delta=0.01*sigma)
+
+    def test_kernel_boundary_enclosing(self):
+        """
+        Test whether the integral of the kernel with boundary taken from
+        the return value of the method boundary_enclosing_area_fraction
+        is (almost) equal to the input variable `fraction` of
+        boundary_enclosing_area_fraction.
+        """
+        sigma = 0.5 * pq.s
+        kernel_resolution = sigma / 500.0
+        kernel_list = [kernel_type(sigma, invert=False) for
+                       kernel_type in self.kernel_types]
+        for fraction in np.arange(0.15, 1.0, 0.4):
+            for kernel in kernel_list:
+                b = kernel.boundary_enclosing_area_fraction(fraction).magnitude
+                restric_defdomain = \
+                    np.linspace(-b, b, 2*b/kernel_resolution.magnitude) * \
+                    sigma.units
+                kern = kernel(restric_defdomain)
+                frac = spint.cumtrapz(y=kern.magnitude,
+                                      x=restric_defdomain.magnitude)[-1]
+                self.assertAlmostEqual(frac, fraction, delta=0.002)
+
+if __name__ == '__main__':
+    unittest.main()

File diff suppressed because it is too large
+ 1382 - 0
code/elephant/elephant/test/test_neo_tools.py


File diff suppressed because it is too large
+ 2684 - 0
code/elephant/elephant/test/test_pandas_bridge.py


+ 572 - 0
code/elephant/elephant/test/test_signal_processing.py

@@ -0,0 +1,572 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the signal_processing module.
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+from __future__ import division, print_function
+
+import unittest
+
+import neo
+import numpy as np
+import scipy.signal as spsig
+import scipy.stats
+from numpy.testing.utils import assert_array_almost_equal
+import quantities as pq
+
+import elephant.signal_processing
+
+from numpy.ma.testutils import assert_array_equal, assert_allclose
+
+
+class ZscoreTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.test_seq1 = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
+                          4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
+                          4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
+                          5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
+                          2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
+                          11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
+                          1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
+                          6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
+                          6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
+                          5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
+        self.test_seq2 = [6, 3, 0, 0, 18, 4, 14, 98, 3, 56,
+                          7, 4, 6, 9, 11, 16, 13, 3, 2, 15,
+                          24, 1, 0, 7, 4, 4, 9, 24, 12, 11,
+                          9, 7, 9, 8, 5, 2, 7, 12, 15, 17,
+                          3, 7, 2, 1, 0, 17, 2, 6, 3, 32,
+                          22, 19, 11, 8, 5, 4, 3, 2, 7, 21,
+                          24, 2, 5, 10, 11, 14, 6, 8, 4, 12,
+                          6, 5, 2, 22, 25, 19, 16, 22, 13, 2,
+                          19, 20, 17, 19, 2, 4, 1, 3, 5, 23,
+                          20, 15, 4, 7, 10, 14, 15, 15, 20, 1]
+
+    def test_zscore_single_dup(self):
+        '''
+        Test z-score on a single AnalogSignal, asking to return a
+        duplicate.
+        '''
+        signal = neo.AnalogSignal(
+            self.test_seq1, units='mV',
+            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)
+
+        m = np.mean(self.test_seq1)
+        s = np.std(self.test_seq1)
+        target = (self.test_seq1 - m) / s
+        assert_array_equal(target, scipy.stats.zscore(self.test_seq1))
+
+        result = elephant.signal_processing.zscore(signal, inplace=False)
+        assert_array_almost_equal(
+            result.magnitude, target.reshape(-1, 1), decimal=9)
+
+        self.assertEqual(result.units, pq.Quantity(1. * pq.dimensionless))
+
+        # Assert original signal is untouched
+        self.assertEqual(signal[0].magnitude, self.test_seq1[0])
+
+    def test_zscore_single_inplace(self):
+        '''
+        Test z-score on a single AnalogSignal, asking for an inplace
+        operation.
+        '''
+        signal = neo.AnalogSignal(
+            self.test_seq1, units='mV',
+            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)
+
+        m = np.mean(self.test_seq1)
+        s = np.std(self.test_seq1)
+        target = (self.test_seq1 - m) / s
+
+        result = elephant.signal_processing.zscore(signal, inplace=True)
+
+        assert_array_almost_equal(
+            result.magnitude, target.reshape(-1, 1), decimal=9)
+
+        self.assertEqual(result.units, pq.Quantity(1. * pq.dimensionless))
+
+        # Assert original signal is overwritten
+        self.assertEqual(signal[0].magnitude, target[0])
+
+    def test_zscore_single_multidim_dup(self):
+        '''
+        Test z-score on a single AnalogSignal with multiple dimensions, asking
+        to return a duplicate.
+        '''
+        signal = neo.AnalogSignal(
+            np.transpose(
+                np.vstack([self.test_seq1, self.test_seq2])), units='mV',
+            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)
+
+        m = np.mean(signal.magnitude, axis=0, keepdims=True)
+        s = np.std(signal.magnitude, axis=0, keepdims=True)
+        target = (signal.magnitude - m) / s
+
+        assert_array_almost_equal(
+            elephant.signal_processing.zscore(
+                signal, inplace=False).magnitude, target, decimal=9)
+
+        # Assert original signal is untouched
+        self.assertEqual(signal[0, 0].magnitude, self.test_seq1[0])
+
+    def test_zscore_single_multidim_inplace(self):
+        '''
+        Test z-score on a single AnalogSignal with multiple dimensions, asking
+        for an inplace operation.
+        '''
+        signal = neo.AnalogSignal(
+            np.vstack([self.test_seq1, self.test_seq2]), units='mV',
+            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)
+
+        m = np.mean(signal.magnitude, axis=0, keepdims=True)
+        s = np.std(signal.magnitude, axis=0, keepdims=True)
+        target = (signal.magnitude - m) / s
+
+        assert_array_almost_equal(
+            elephant.signal_processing.zscore(
+                signal, inplace=True).magnitude, target, decimal=9)
+
+        # Assert original signal is overwritten
+        self.assertEqual(signal[0, 0].magnitude, target[0, 0])
+
+    def test_zscore_single_dup_int(self):
+        '''
+        Test if the z-score is correctly calculated even if the input is an
+        AnalogSignal of type int, asking for a duplicate (duplicate should
+        be of type float).
+        '''
+        signal = neo.AnalogSignal(
+            self.test_seq1, units='mV',
+            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=int)
+
+        m = np.mean(self.test_seq1)
+        s = np.std(self.test_seq1)
+        target = (self.test_seq1 - m) / s
+
+        assert_array_almost_equal(
+            elephant.signal_processing.zscore(signal, inplace=False).magnitude,
+            target.reshape(-1, 1), decimal=9)
+
+        # Assert original signal is untouched
+        self.assertEqual(signal.magnitude[0], self.test_seq1[0])
+
+    def test_zscore_single_inplace_int(self):
+        '''
+        Test if the z-score is correctly calculated even if the input is an
+        AnalogSignal of type int, asking for an inplace operation.
+        '''
+        signal = neo.AnalogSignal(
+            self.test_seq1, units='mV',
+            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=int)
+
+        m = np.mean(self.test_seq1)
+        s = np.std(self.test_seq1)
+        target = (self.test_seq1 - m) / s
+
+        assert_array_almost_equal(
+            elephant.signal_processing.zscore(signal, inplace=True).magnitude,
+            target.reshape(-1, 1).astype(int), decimal=9)
+
+        # Assert original signal is overwritten
+        self.assertEqual(signal[0].magnitude, target.astype(int)[0])
+
+    def test_zscore_list_dup(self):
+        '''
+        Test zscore on a list of AnalogSignal objects, asking to return a
+        duplicate.
+        '''
+        signal1 = neo.AnalogSignal(
+            np.transpose(np.vstack([self.test_seq1, self.test_seq1])),
+            units='mV',
+            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)
+        signal2 = neo.AnalogSignal(
+            np.transpose(np.vstack([self.test_seq1, self.test_seq2])),
+            units='mV',
+            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)
+        signal_list = [signal1, signal2]
+
+        m = np.mean(np.hstack([self.test_seq1, self.test_seq1]))
+        s = np.std(np.hstack([self.test_seq1, self.test_seq1]))
+        target11 = (self.test_seq1 - m) / s
+        target21 = (self.test_seq1 - m) / s
+        m = np.mean(np.hstack([self.test_seq1, self.test_seq2]))
+        s = np.std(np.hstack([self.test_seq1, self.test_seq2]))
+        target12 = (self.test_seq1 - m) / s
+        target22 = (self.test_seq2 - m) / s
+
+        # Call elephant function
+        result = elephant.signal_processing.zscore(signal_list, inplace=False)
+
+        assert_array_almost_equal(
+            result[0].magnitude,
+            np.transpose(np.vstack([target11, target12])), decimal=9)
+        assert_array_almost_equal(
+            result[1].magnitude,
+            np.transpose(np.vstack([target21, target22])), decimal=9)
+
+        # Assert original signal is untouched
+        self.assertEqual(signal1.magnitude[0, 0], self.test_seq1[0])
+        self.assertEqual(signal2.magnitude[0, 1], self.test_seq2[0])
+
+    def test_zscore_list_inplace(self):
+        '''
+        Test zscore on a list of AnalogSignal objects, asking for an
+        inplace operation.
+        '''
+        signal1 = neo.AnalogSignal(
+            np.transpose(np.vstack([self.test_seq1, self.test_seq1])),
+            units='mV',
+            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)
+        signal2 = neo.AnalogSignal(
+            np.transpose(np.vstack([self.test_seq1, self.test_seq2])),
+            units='mV',
+            t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)
+        signal_list = [signal1, signal2]
+
+        m = np.mean(np.hstack([self.test_seq1, self.test_seq1]))
+        s = np.std(np.hstack([self.test_seq1, self.test_seq1]))
+        target11 = (self.test_seq1 - m) / s
+        target21 = (self.test_seq1 - m) / s
+        m = np.mean(np.hstack([self.test_seq1, self.test_seq2]))
+        s = np.std(np.hstack([self.test_seq1, self.test_seq2]))
+        target12 = (self.test_seq1 - m) / s
+        target22 = (self.test_seq2 - m) / s
+
+        # Call elephant function
+        result = elephant.signal_processing.zscore(signal_list, inplace=True)
+
+        assert_array_almost_equal(
+            result[0].magnitude,
+            np.transpose(np.vstack([target11, target12])), decimal=9)
+        assert_array_almost_equal(
+            result[1].magnitude,
+            np.transpose(np.vstack([target21, target22])), decimal=9)
+
+        # Assert original signal is overwritten
+        self.assertEqual(signal1[0, 0].magnitude, target11[0])
+        self.assertEqual(signal2[0, 0].magnitude, target21[0])
+
+
+class ButterTestCase(unittest.TestCase):
+
+    def test_butter_filter_type(self):
+        """
+        Test if correct type of filtering is performed according to how cut-off
+        frequencies are given
+        """
+        # generate white noise AnalogSignal
+        noise = neo.AnalogSignal(
+            np.random.normal(size=5000),
+            sampling_rate=1000 * pq.Hz, units='mV')
+
+        # test high-pass filtering: power at the lowest frequency
+        # should be almost zero
+        # Note: the default detrend function of scipy.signal.welch() seems to
+        # cause artificial finite power at the lowest frequencies. Here I avoid
+        # this by using an identity function for detrending
+        filtered_noise = elephant.signal_processing.butter(
+            noise, 250.0 * pq.Hz, None)
+        _, psd = spsig.welch(filtered_noise.T, nperseg=1024, fs=1000.0,
+                             detrend=lambda x: x)
+        self.assertAlmostEqual(psd[0, 0], 0)
+
+        # test low-pass filtering: power at the highest frequency
+        # should be almost zero
+        filtered_noise = elephant.signal_processing.butter(
+            noise, None, 250.0 * pq.Hz)
+        _, psd = spsig.welch(filtered_noise.T, nperseg=1024, fs=1000.0)
+        self.assertAlmostEqual(psd[0, -1], 0)
+
+        # test band-pass filtering: power at the lowest and highest frequencies
+        # should be almost zero
+        filtered_noise = elephant.signal_processing.butter(
+            noise, 200.0 * pq.Hz, 300.0 * pq.Hz)
+        _, psd = spsig.welch(filtered_noise.T, nperseg=1024, fs=1000.0,
+                             detrend=lambda x: x)
+        self.assertAlmostEqual(psd[0, 0], 0)
+        self.assertAlmostEqual(psd[0, -1], 0)
+
+        # test band-stop filtering: power at the intermediate frequency
+        # should be almost zero
+        filtered_noise = elephant.signal_processing.butter(
+            noise, 400.0 * pq.Hz, 100.0 * pq.Hz)
+        _, psd = spsig.welch(filtered_noise.T, nperseg=1024, fs=1000.0)
+        self.assertAlmostEqual(psd[0, 256], 0)
+
+    def test_butter_filter_function(self):
+        # generate white noise AnalogSignal
+        noise = neo.AnalogSignal(
+            np.random.normal(size=5000),
+            sampling_rate=1000 * pq.Hz, units='mV')
+
+        # test if the filter performance is as well with filftunc=lfilter as
+        # with filtfunc=filtfilt (i.e. default option)
+        kwds = {'signal': noise, 'highpass_freq': 250.0 * pq.Hz,
+                'lowpass_freq': None, 'filter_function': 'filtfilt'}
+        filtered_noise = elephant.signal_processing.butter(**kwds)
+        _, psd_filtfilt = spsig.welch(
+            filtered_noise.T, nperseg=1024, fs=1000.0, detrend=lambda x: x)
+
+        kwds['filter_function'] = 'lfilter'
+        filtered_noise = elephant.signal_processing.butter(**kwds)
+        _, psd_lfilter = spsig.welch(
+            filtered_noise.T, nperseg=1024, fs=1000.0, detrend=lambda x: x)
+
+        self.assertAlmostEqual(psd_filtfilt[0, 0], psd_lfilter[0, 0])
+
+    def test_butter_invalid_filter_function(self):
+        # generate a dummy AnalogSignal
+        anasig_dummy = neo.AnalogSignal(
+            np.zeros(5000), sampling_rate=1000 * pq.Hz, units='mV')
+        # test exception upon invalid filtfunc string
+        kwds = {'signal': anasig_dummy, 'highpass_freq': 250.0 * pq.Hz,
+                'filter_function': 'invalid_filter'}
+        self.assertRaises(
+            ValueError, elephant.signal_processing.butter, **kwds)
+
+    def test_butter_missing_cutoff_freqs(self):
+        # generate a dummy AnalogSignal
+        anasig_dummy = neo.AnalogSignal(
+            np.zeros(5000), sampling_rate=1000 * pq.Hz, units='mV')
+        # test a case where no cut-off frequencies are given
+        kwds = {'signal': anasig_dummy, 'highpass_freq': None,
+                'lowpass_freq': None}
+        self.assertRaises(
+            ValueError, elephant.signal_processing.butter, **kwds)
+
+    def test_butter_input_types(self):
+        # generate white noise data of different types
+        noise_np = np.random.normal(size=5000)
+        noise_pq = noise_np * pq.mV
+        noise = neo.AnalogSignal(noise_pq, sampling_rate=1000.0 * pq.Hz)
+
+        # check input as NumPy ndarray
+        filtered_noise_np = elephant.signal_processing.butter(
+            noise_np, 400.0, 100.0, fs=1000.0)
+        self.assertTrue(isinstance(filtered_noise_np, np.ndarray))
+        self.assertFalse(isinstance(filtered_noise_np, pq.quantity.Quantity))
+        self.assertFalse(isinstance(filtered_noise_np, neo.AnalogSignal))
+        self.assertEqual(filtered_noise_np.shape, noise_np.shape)
+
+        # check input as Quantity array
+        filtered_noise_pq = elephant.signal_processing.butter(
+            noise_pq, 400.0 * pq.Hz, 100.0 * pq.Hz, fs=1000.0)
+        self.assertTrue(isinstance(filtered_noise_pq, pq.quantity.Quantity))
+        self.assertFalse(isinstance(filtered_noise_pq, neo.AnalogSignal))
+        self.assertEqual(filtered_noise_pq.shape, noise_pq.shape)
+
+        # check input as neo AnalogSignal
+        filtered_noise = elephant.signal_processing.butter(noise,
+                                                           400.0 * pq.Hz,
+                                                           100.0 * pq.Hz)
+        self.assertTrue(isinstance(filtered_noise, neo.AnalogSignal))
+        self.assertEqual(filtered_noise.shape, noise.shape)
+
+        # check if the results from different input types are identical
+        self.assertTrue(np.all(
+            filtered_noise_pq.magnitude == filtered_noise_np))
+        self.assertTrue(np.all(
+            filtered_noise.magnitude[:, 0] == filtered_noise_np))
+
+    def test_butter_axis(self):
+        noise = np.random.normal(size=(4, 5000))
+        filtered_noise = elephant.signal_processing.butter(
+            noise, 250.0, fs=1000.0)
+        filtered_noise_transposed = elephant.signal_processing.butter(
+            noise.T, 250.0, fs=1000.0, axis=0)
+        self.assertTrue(np.all(filtered_noise == filtered_noise_transposed.T))
+
+    def test_butter_multidim_input(self):
+        noise_pq = np.random.normal(size=(4, 5000)) * pq.mV
+        noise_neo = neo.AnalogSignal(
+            noise_pq.T, sampling_rate=1000.0 * pq.Hz)
+        noise_neo1d = neo.AnalogSignal(
+            noise_pq[0], sampling_rate=1000.0 * pq.Hz)
+        filtered_noise_pq = elephant.signal_processing.butter(
+            noise_pq, 250.0, fs=1000.0)
+        filtered_noise_neo = elephant.signal_processing.butter(
+            noise_neo, 250.0)
+        filtered_noise_neo1d = elephant.signal_processing.butter(
+            noise_neo1d, 250.0)
+        self.assertTrue(np.all(
+            filtered_noise_pq.magnitude == filtered_noise_neo.T.magnitude))
+        self.assertTrue(np.all(
+            filtered_noise_neo1d.magnitude[:, 0] ==
+            filtered_noise_neo.magnitude[:, 0]))
+
+
+class HilbertTestCase(unittest.TestCase):
+
+    def setUp(self):
+        # Generate test data of a harmonic function over a long time
+        time = np.arange(0, 1000, 0.1) * pq.ms
+        freq = 10 * pq.Hz
+
+        self.amplitude = np.array([
+            np.linspace(1, 10, len(time)),
+            np.linspace(1, 10, len(time)),
+            np.ones((len(time))),
+            np.ones((len(time))) * 10.]).T
+        self.phase = np.array([
+            (time * freq).simplified.magnitude * 2. * np.pi,
+            (time * freq).simplified.magnitude * 2. * np.pi + np.pi / 2,
+            (time * freq).simplified.magnitude * 2. * np.pi + np.pi,
+            (time * freq).simplified.magnitude * 2. * 2. * np.pi]).T
+
+        self.phase = np.mod(self.phase + np.pi, 2. * np.pi) - np.pi
+
+        # rising amplitude cosine, random ampl. sine, flat inverse cosine,
+        # flat cosine at double frequency
+        sigs = np.vstack([
+            self.amplitude[:, 0] * np.cos(self.phase[:, 0]),
+            self.amplitude[:, 1] * np.cos(self.phase[:, 1]),
+            self.amplitude[:, 2] * np.cos(self.phase[:, 2]),
+            self.amplitude[:, 3] * np.cos(self.phase[:, 3])])
+
+        self.long_signals = neo.AnalogSignal(
+            sigs.T, units='mV',
+            t_start=0. * pq.ms,
+            sampling_rate=(len(time) / (time[-1] - time[0])).rescale(pq.Hz),
+            dtype=float)
+
+        # Generate test data covering a single oscillation cycle in 1s only
+        phases = np.arange(0, 2 * np.pi, np.pi / 256)
+        sigs = np.vstack([
+            np.sin(phases),
+            np.cos(phases),
+            np.sin(2 * phases),
+            np.cos(2 * phases)])
+
+        self.one_period = neo.AnalogSignal(
+            sigs.T, units=pq.mV,
+            sampling_rate=len(phases) * pq.Hz)
+
+    def test_hilbert_pad_type_error(self):
+        """
+        Tests if incorrect pad_type raises ValueError.
+        """
+        padding = 'wrong_type'
+
+        self.assertRaises(
+            ValueError, elephant.signal_processing.hilbert,
+            self.long_signals, N=padding)
+
+    def test_hilbert_output_shape(self):
+        """
+        Tests if the length of the output is identical to the original signal,
+        and the dimension is dimensionless.
+        """
+        true_shape = np.shape(self.long_signals)
+        output = elephant.signal_processing.hilbert(
+            self.long_signals, N='nextpow')
+        self.assertEquals(np.shape(output), true_shape)
+        self.assertEqual(output.units, pq.dimensionless)
+        output = elephant.signal_processing.hilbert(
+            self.long_signals, N=16384)
+        self.assertEquals(np.shape(output), true_shape)
+        self.assertEqual(output.units, pq.dimensionless)
+
+    def test_hilbert_theoretical_long_signals(self):
+        """
+        Tests the output of the hilbert function with regard to amplitude and
+        phase of long test signals
+        """
+        # Performing test using all pad types
+        for padding in ['nextpow', 'none', 16384]:
+
+            h = elephant.signal_processing.hilbert(
+                self.long_signals, N=padding)
+
+            phase = np.angle(h.magnitude)
+            amplitude = np.abs(h.magnitude)
+            real_value = np.real(h.magnitude)
+
+            # The real part should be equal to the original long_signals
+            assert_array_almost_equal(
+                real_value,
+                self.long_signals.magnitude,
+                decimal=14)
+
+            # Test only in the middle half of the array (border effects)
+            ind1 = int(len(h.times) / 4)
+            ind2 = int(3 * len(h.times) / 4)
+
+            # Calculate difference in phase between signal and original phase
+            # and use smaller of any two phase differences
+            phasediff = np.abs(phase[ind1:ind2, :] - self.phase[ind1:ind2, :])
+            phasediff[phasediff >= np.pi] = \
+                2 * np.pi - phasediff[phasediff >= np.pi]
+
+            # Calculate difference in amplitude between signal and original
+            # amplitude
+            amplitudediff = \
+                amplitude[ind1:ind2, :] - self.amplitude[ind1:ind2, :]
+#
+            assert_allclose(phasediff, 0, atol=0.1)
+            assert_allclose(amplitudediff, 0, atol=0.5)
+
+    def test_hilbert_theoretical_one_period(self):
+        """
+        Tests the output of the hilbert function with regard to amplitude and
+        phase of a short signal covering one cycle (more accurate estimate).
+
+        This unit test is adapted from the scipy library of the hilbert()
+        function.
+        """
+
+        # Precision of testing
+        decimal = 14
+
+        # Performing test using both pad types
+        for padding in ['nextpow', 'none', 512]:
+
+            h = elephant.signal_processing.hilbert(
+                self.one_period, N=padding)
+
+            amplitude = np.abs(h.magnitude)
+            phase = np.angle(h.magnitude)
+            real_value = np.real(h.magnitude)
+
+            # The real part should be equal to the original long_signals:
+            assert_array_almost_equal(
+                real_value,
+                self.one_period.magnitude,
+                decimal=decimal)
+
+            # The absolute value should be 1 everywhere, for this input:
+            assert_array_almost_equal(
+                amplitude,
+                np.ones(self.one_period.magnitude.shape),
+                decimal=decimal)
+
+            # For the 'slow' sine - the phase should go from -pi/2 to pi/2 in
+            # the first 256 bins:
+            assert_array_almost_equal(
+                phase[:256, 0],
+                np.arange(-np.pi / 2, np.pi / 2, np.pi / 256),
+                decimal=decimal)
+            # For the 'slow' cosine - the phase should go from 0 to pi in the
+            # same interval:
+            assert_array_almost_equal(
+                phase[:256, 1],
+                np.arange(0, np.pi, np.pi / 256),
+                decimal=decimal)
+            # The 'fast' sine should make this phase transition in half the
+            # time:
+            assert_array_almost_equal(
+                phase[:128, 2],
+                np.arange(-np.pi / 2, np.pi / 2, np.pi / 128),
+                decimal=decimal)
+            # The 'fast' cosine should make this phase transition in half the
+            # time:
+            assert_array_almost_equal(
+                phase[:128, 3],
+                np.arange(0, np.pi, np.pi / 128),
+                decimal=decimal)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 309 - 0
code/elephant/elephant/test/test_spectral.py

@@ -0,0 +1,309 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the spectral module.
+
+:copyright: Copyright 2015 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+
+import numpy as np
+import scipy.signal as spsig
+import quantities as pq
+import neo.core as n
+
+import elephant.spectral
+
+
+class WelchPSDTestCase(unittest.TestCase):
+    def test_welch_psd_errors(self):
+        # generate a dummy data
+        data = n.AnalogSignal(np.zeros(5000), sampling_period=0.001*pq.s,
+                              units='mV')
+
+        # check for invalid parameter values
+        # - length of segments
+        self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
+                          len_seg=0)
+        self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
+                          len_seg=data.shape[0] * 2)
+        # - number of segments
+        self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
+                          num_seg=0)
+        self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
+                          num_seg=data.shape[0] * 2)
+        # - frequency resolution
+        self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
+                          freq_res=-1)
+        self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
+                          freq_res=data.sampling_rate/(data.shape[0]+1))
+        # - overlap
+        self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
+                          overlap=-1.0)
+        self.assertRaises(ValueError, elephant.spectral.welch_psd, data,
+                          overlap=1.1)
+
+    def test_welch_psd_behavior(self):
+        # generate data by adding white noise and a sinusoid
+        data_length = 5000
+        sampling_period = 0.001
+        signal_freq = 100.0
+        noise = np.random.normal(size=data_length)
+        signal = [np.sin(2*np.pi*signal_freq*t)
+                  for t in np.arange(0, data_length*sampling_period,
+                                     sampling_period)]
+        data = n.AnalogSignal(np.array(signal+noise),
+                                      sampling_period=sampling_period*pq.s,
+                                      units='mV')
+
+        # consistency between different ways of specifying segment length
+        freqs1, psd1 = elephant.spectral.welch_psd(data, len_seg=data_length//5, overlap=0)
+        freqs2, psd2 = elephant.spectral.welch_psd(data, num_seg=5, overlap=0)
+        self.assertTrue((psd1==psd2).all() and (freqs1==freqs2).all())
+
+        # frequency resolution and consistency with data
+        freq_res = 1.0 * pq.Hz
+        freqs, psd = elephant.spectral.welch_psd(data, freq_res=freq_res)
+        self.assertAlmostEqual(freq_res, freqs[1]-freqs[0])
+        self.assertEqual(freqs[psd.argmax()], signal_freq)
+        freqs_np, psd_np = elephant.spectral.welch_psd(data.magnitude.flatten(), fs=1/sampling_period, freq_res=freq_res)
+        self.assertTrue((freqs==freqs_np).all() and (psd==psd_np).all())
+
+        # check of scipy.signal.welch() parameters
+        params = {'window': 'hamming', 'nfft': 1024, 'detrend': 'linear',
+                  'return_onesided': False, 'scaling': 'spectrum'}
+        for key, val in params.items():
+            freqs, psd = elephant.spectral.welch_psd(data, len_seg=1000, overlap=0, **{key: val})
+            freqs_spsig, psd_spsig = spsig.welch(np.rollaxis(data, 0, len(data.shape)),
+                                                 fs=1/sampling_period, nperseg=1000, noverlap=0, **{key: val})
+            self.assertTrue((freqs==freqs_spsig).all() and (psd==psd_spsig).all())
+
+        # - generate multidimensional data for check of parameter `axis`
+        num_channel = 4
+        data_length = 5000
+        data_multidim = np.random.normal(size=(num_channel, data_length))
+        freqs, psd = elephant.spectral.welch_psd(data_multidim)
+        freqs_T, psd_T = elephant.spectral.welch_psd(data_multidim.T, axis=0)
+        self.assertTrue(np.all(freqs==freqs_T))
+        self.assertTrue(np.all(psd==psd_T.T))
+
+    def test_welch_psd_input_types(self):
+        # generate a test data
+        sampling_period = 0.001
+        data = n.AnalogSignal(np.array(np.random.normal(size=5000)),
+                                   sampling_period=sampling_period*pq.s,
+                                   units='mV')
+
+        # outputs from AnalogSignal input are of Quantity type (standard usage)
+        freqs_neo, psd_neo = elephant.spectral.welch_psd(data)
+        self.assertTrue(isinstance(freqs_neo, pq.quantity.Quantity))
+        self.assertTrue(isinstance(psd_neo, pq.quantity.Quantity))
+
+        # outputs from Quantity array input are of Quantity type
+        freqs_pq, psd_pq = elephant.spectral.welch_psd(data.magnitude.flatten()*data.units, fs=1/sampling_period)
+        self.assertTrue(isinstance(freqs_pq, pq.quantity.Quantity))
+        self.assertTrue(isinstance(psd_pq, pq.quantity.Quantity))
+
+        # outputs from Numpy ndarray input are NOT of Quantity type
+        freqs_np, psd_np = elephant.spectral.welch_psd(data.magnitude.flatten(), fs=1/sampling_period)
+        self.assertFalse(isinstance(freqs_np, pq.quantity.Quantity))
+        self.assertFalse(isinstance(psd_np, pq.quantity.Quantity))
+
+        # check if the results from different input types are identical
+        self.assertTrue((freqs_neo==freqs_pq).all() and (psd_neo==psd_pq).all())
+        self.assertTrue((freqs_neo==freqs_np).all() and (psd_neo==psd_np).all())
+
+    def test_welch_psd_multidim_input(self):
+        # generate multidimensional data
+        num_channel = 4
+        data_length = 5000
+        sampling_period = 0.001
+        noise = np.random.normal(size=(num_channel, data_length))
+        data_np = np.array(noise)
+        # Since row-column order in AnalogSignal is different from the
+        # conventional one, `data_np` needs to be transposed when its used to
+        # define an AnalogSignal
+        data_neo = n.AnalogSignal(data_np.T,
+                                       sampling_period=sampling_period*pq.s,
+                                       units='mV')
+        data_neo_1dim = n.AnalogSignal(data_np[0],
+                                       sampling_period=sampling_period*pq.s,
+                                       units='mV')
+
+        # check if the results from different input types are identical
+        freqs_np, psd_np = elephant.spectral.welch_psd(data_np,
+                                                     fs=1/sampling_period)
+        freqs_neo, psd_neo = elephant.spectral.welch_psd(data_neo)
+        freqs_neo_1dim, psd_neo_1dim = elephant.spectral.welch_psd(data_neo_1dim)
+        self.assertTrue(np.all(freqs_np==freqs_neo))
+        self.assertTrue(np.all(psd_np==psd_neo))
+        self.assertTrue(np.all(psd_neo_1dim==psd_neo[0]))
+
+
+class WelchCohereTestCase(unittest.TestCase):
+    def test_welch_cohere_errors(self):
+        # generate a dummy data
+        x = n.AnalogSignal(np.zeros(5000), sampling_period=0.001*pq.s,
+            units='mV')
+        y = n.AnalogSignal(np.zeros(5000), sampling_period=0.001*pq.s,
+            units='mV')
+
+        # check for invalid parameter values
+        # - length of segments
+        self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y,
+            len_seg=0)
+        self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y,
+            len_seg=x.shape[0] * 2)
+        # - number of segments
+        self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y,
+            num_seg=0)
+        self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y,
+            num_seg=x.shape[0] * 2)
+        # - frequency resolution
+        self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y,
+            freq_res=-1)
+        self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y,
+            freq_res=x.sampling_rate/(x.shape[0]+1))
+        # - overlap
+        self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y,
+            overlap=-1.0)
+        self.assertRaises(ValueError, elephant.spectral.welch_cohere, x, y,
+            overlap=1.1)
+
+    def test_welch_cohere_behavior(self):
+        # generate data by adding white noise and a sinusoid
+        data_length = 5000
+        sampling_period = 0.001
+        signal_freq = 100.0
+        noise1 = np.random.normal(size=data_length) * 0.01
+        noise2 = np.random.normal(size=data_length) * 0.01
+        signal1 = [np.cos(2*np.pi*signal_freq*t)
+                  for t in np.arange(0, data_length*sampling_period,
+                sampling_period)]
+        signal2 = [np.sin(2*np.pi*signal_freq*t)
+                   for t in np.arange(0, data_length*sampling_period,
+                sampling_period)]
+        x = n.AnalogSignal(np.array(signal1+noise1), units='mV',
+            sampling_period=sampling_period*pq.s)
+        y = n.AnalogSignal(np.array(signal2+noise2), units='mV',
+            sampling_period=sampling_period*pq.s)
+
+        # consistency between different ways of specifying segment length
+        freqs1, coherency1, phase_lag1 = elephant.spectral.welch_cohere(x, y,
+            len_seg=data_length//5, overlap=0)
+        freqs2, coherency2, phase_lag2 = elephant.spectral.welch_cohere(x, y,
+            num_seg=5, overlap=0)
+        self.assertTrue((coherency1==coherency2).all() and
+                        (phase_lag1==phase_lag2).all() and
+                        (freqs1==freqs2).all())
+
+        # frequency resolution and consistency with data
+        freq_res = 1.0 * pq.Hz
+        freqs, coherency, phase_lag = elephant.spectral.welch_cohere(x, y,
+            freq_res=freq_res)
+        self.assertAlmostEqual(freq_res, freqs[1]-freqs[0])
+        self.assertAlmostEqual(freqs[coherency.argmax()], signal_freq,
+            places=2)
+        self.assertAlmostEqual(phase_lag[coherency.argmax()], np.pi/2,
+            places=2)
+        freqs_np, coherency_np, phase_lag_np =\
+            elephant.spectral.welch_cohere(x.magnitude.flatten(), y.magnitude.flatten(),
+                fs=1/sampling_period, freq_res=freq_res)
+        self.assertTrue((freqs == freqs_np).all() and
+                        (coherency[:, 0] == coherency_np).all() and
+                        (phase_lag[:, 0] == phase_lag_np).all())
+
+        # - check the behavior of parameter `axis` using multidimensional data
+        num_channel = 4
+        data_length = 5000
+        x_multidim = np.random.normal(size=(num_channel, data_length))
+        y_multidim = np.random.normal(size=(num_channel, data_length))
+        freqs, coherency, phase_lag =\
+            elephant.spectral.welch_cohere(x_multidim, y_multidim)
+        freqs_T, coherency_T, phase_lag_T =\
+            elephant.spectral.welch_cohere(x_multidim.T, y_multidim.T, axis=0)
+        self.assertTrue(np.all(freqs==freqs_T))
+        self.assertTrue(np.all(coherency==coherency_T.T))
+        self.assertTrue(np.all(phase_lag==phase_lag_T.T))
+
+    def test_welch_cohere_input_types(self):
+        # generate a test data
+        sampling_period = 0.001
+        x = n.AnalogSignal(np.array(np.random.normal(size=5000)),
+            sampling_period=sampling_period*pq.s,
+            units='mV')
+        y = n.AnalogSignal(np.array(np.random.normal(size=5000)),
+            sampling_period=sampling_period*pq.s,
+            units='mV')
+
+        # outputs from AnalogSignal input are of Quantity type
+        # (standard usage)
+        freqs_neo, coherency_neo, phase_lag_neo =\
+            elephant.spectral.welch_cohere(x, y)
+        self.assertTrue(isinstance(freqs_neo, pq.quantity.Quantity))
+        self.assertTrue(isinstance(phase_lag_neo, pq.quantity.Quantity))
+
+        # outputs from Quantity array input are of Quantity type
+        freqs_pq, coherency_pq, phase_lag_pq =\
+            elephant.spectral.welch_cohere(x.magnitude.flatten()*x.units,
+                y.magnitude.flatten()*y.units, fs=1/sampling_period)
+        self.assertTrue(isinstance(freqs_pq, pq.quantity.Quantity))
+        self.assertTrue(isinstance(phase_lag_pq, pq.quantity.Quantity))
+
+        # outputs from Numpy ndarray input are NOT of Quantity type
+        freqs_np, coherency_np, phase_lag_np =\
+            elephant.spectral.welch_cohere(x.magnitude.flatten(), y.magnitude.flatten(),
+                fs=1/sampling_period)
+        self.assertFalse(isinstance(freqs_np, pq.quantity.Quantity))
+        self.assertFalse(isinstance(phase_lag_np, pq.quantity.Quantity))
+
+        # check if the results from different input types are identical
+        self.assertTrue((freqs_neo==freqs_pq).all() and
+                        (coherency_neo[:, 0]==coherency_pq).all() and
+                        (phase_lag_neo[:, 0]==phase_lag_pq).all())
+        self.assertTrue((freqs_neo==freqs_np).all() and
+                        (coherency_neo[:, 0]==coherency_np).all() and
+                        (phase_lag_neo[:, 0]==phase_lag_np).all())
+
+    def test_welch_cohere_multidim_input(self):
+        # generate multidimensional data
+        num_channel = 4
+        data_length = 5000
+        sampling_period = 0.001
+        x_np = np.array(np.random.normal(size=(num_channel, data_length)))
+        y_np = np.array(np.random.normal(size=(num_channel, data_length)))
+        # Since row-column order in AnalogSignal is different from the
+        # convention in NumPy/SciPy, `data_np` needs to be transposed when its
+        # used to define an AnalogSignal
+        x_neo = n.AnalogSignal(x_np.T, units='mV',
+            sampling_period=sampling_period*pq.s)
+        y_neo = n.AnalogSignal(y_np.T, units='mV',
+            sampling_period=sampling_period*pq.s)
+        x_neo_1dim = n.AnalogSignal(x_np[0], units='mV',
+            sampling_period=sampling_period*pq.s)
+        y_neo_1dim = n.AnalogSignal(y_np[0], units='mV',
+            sampling_period=sampling_period*pq.s)
+
+        # check if the results from different input types are identical
+        freqs_np, coherency_np, phase_lag_np =\
+            elephant.spectral.welch_cohere(x_np, y_np, fs=1/sampling_period)
+        freqs_neo, coherency_neo, phase_lag_neo =\
+            elephant.spectral.welch_cohere(x_neo, y_neo)
+        freqs_neo_1dim, coherency_neo_1dim, phase_lag_neo_1dim =\
+            elephant.spectral.welch_cohere(x_neo_1dim, y_neo_1dim)
+        self.assertTrue(np.all(freqs_np==freqs_neo))
+        self.assertTrue(np.all(coherency_np.T==coherency_neo))
+        self.assertTrue(np.all(phase_lag_np.T==phase_lag_neo))
+        self.assertTrue(np.all(coherency_neo_1dim[:, 0]==coherency_neo[:, 0]))
+        self.assertTrue(np.all(phase_lag_neo_1dim[:, 0]==phase_lag_neo[:, 0]))
+
+
+def suite():
+    suite = unittest.makeSuite(WelchPSDTestCase, 'test')
+    return suite
+
+
+if __name__ == "__main__":
+    runner = unittest.TextTestRunner(verbosity=2)
+    runner.run(suite())

+ 561 - 0
code/elephant/elephant/test/test_spike_train_correlation.py

@@ -0,0 +1,561 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the spike_train_correlation module.
+
+:copyright: Copyright 2015-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+
+import numpy as np
+from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
+import quantities as pq
+import neo
+import elephant.conversion as conv
+import elephant.spike_train_correlation as sc
+
+
+class covariance_TestCase(unittest.TestCase):
+
+    def setUp(self):
+        # These two arrays must be such that they do not have coincidences
+        # spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
+        self.test_array_1d_0 = [
+            1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
+        self.test_array_1d_1 = [
+            1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
+
+        # Build spike trains
+        self.st_0 = neo.SpikeTrain(
+            self.test_array_1d_0, units='ms', t_stop=50.)
+        self.st_1 = neo.SpikeTrain(
+            self.test_array_1d_1, units='ms', t_stop=50.)
+
+        # And binned counterparts
+        self.binned_st = conv.BinnedSpikeTrain(
+            [self.st_0, self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
+            binsize=1 * pq.ms)
+
+    def test_covariance_binned(self):
+        '''
+        Test covariance between two binned spike trains.
+        '''
+
+        # Calculate clipped and unclipped
+        res_clipped = sc.covariance(
+            self.binned_st, binary=True)
+        res_unclipped = sc.covariance(
+            self.binned_st, binary=False)
+
+        # Check dimensions
+        self.assertEqual(len(res_clipped), 2)
+        self.assertEqual(len(res_unclipped), 2)
+
+        # Check result unclipped against result calculated from scratch for
+        # the off-diagonal element
+        mat = self.binned_st.to_array()
+        mean_0 = np.mean(mat[0])
+        mean_1 = np.mean(mat[1])
+        target_from_scratch = \
+            np.dot(mat[0] - mean_0, mat[1] - mean_1) / (len(mat[0]) - 1)
+
+        # Check result unclipped against result calculated by numpy.corrcoef
+        target_numpy = np.cov(mat)
+
+        self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
+        self.assertAlmostEqual(res_unclipped[0][1], target_from_scratch)
+        self.assertAlmostEqual(res_unclipped[1][0], target_from_scratch)
+
+        # Check result clipped against result calculated from scratch for
+        # the off-diagonal elemant
+        mat = self.binned_st.to_bool_array()
+        mean_0 = np.mean(mat[0])
+        mean_1 = np.mean(mat[1])
+        target_from_scratch = \
+            np.dot(mat[0] - mean_0, mat[1] - mean_1) / (len(mat[0]) - 1)
+
+        # Check result unclipped against result calculated by numpy.corrcoef
+        target_numpy = np.cov(mat)
+
+        self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
+        self.assertAlmostEqual(res_clipped[0][1], target_from_scratch)
+        self.assertAlmostEqual(res_clipped[1][0], target_from_scratch)
+
+    def test_covariance_binned_same_spiketrains(self):
+        '''
+        Test if the covariation between two identical binned spike
+        trains evaluates to the expected 2x2 matrix.
+        '''
+        # Calculate correlation
+        binned_st = conv.BinnedSpikeTrain(
+            [self.st_0, self.st_0], t_start=0 * pq.ms, t_stop=50. * pq.ms,
+            binsize=1 * pq.ms)
+        target = sc.covariance(binned_st)
+
+        # Check dimensions
+        self.assertEqual(len(target), 2)
+        # Check result
+        assert_array_equal(target[0][0], target[1][1])
+
+    def test_covariance_binned_short_input(self):
+        '''
+        Test if input list of only one binned spike train yields correct result
+        that matches numpy.cov (covariance with itself)
+        '''
+        # Calculate correlation
+        binned_st = conv.BinnedSpikeTrain(
+            self.st_0, t_start=0 * pq.ms, t_stop=50. * pq.ms,
+            binsize=1 * pq.ms)
+        target = sc.covariance(binned_st)
+
+        # Check result unclipped against result calculated by numpy.corrcoef
+        mat = binned_st.to_bool_array()
+        target_numpy = np.cov(mat)
+
+        # Check result and dimensionality of result
+        self.assertEqual(target.ndim, target_numpy.ndim)
+        self.assertAlmostEqual(target, target_numpy)
+
+
+class corrcoeff_TestCase(unittest.TestCase):
+
+    def setUp(self):
+        # These two arrays must be such that they do not have coincidences
+        # spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
+        self.test_array_1d_0 = [
+            1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
+        self.test_array_1d_1 = [
+            1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
+
+        # Build spike trains
+        self.st_0 = neo.SpikeTrain(
+            self.test_array_1d_0, units='ms', t_stop=50.)
+        self.st_1 = neo.SpikeTrain(
+            self.test_array_1d_1, units='ms', t_stop=50.)
+
+        # And binned counterparts
+        self.binned_st = conv.BinnedSpikeTrain(
+            [self.st_0, self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
+            binsize=1 * pq.ms)
+
+    def test_corrcoef_binned(self):
+        '''
+        Test the correlation coefficient between two binned spike trains.
+        '''
+
+        # Calculate clipped and unclipped
+        res_clipped = sc.corrcoef(
+            self.binned_st, binary=True)
+        res_unclipped = sc.corrcoef(
+            self.binned_st, binary=False)
+
+        # Check dimensions
+        self.assertEqual(len(res_clipped), 2)
+        self.assertEqual(len(res_unclipped), 2)
+
+        # Check result unclipped against result calculated from scratch for
+        # the off-diagonal element
+        mat = self.binned_st.to_array()
+        mean_0 = np.mean(mat[0])
+        mean_1 = np.mean(mat[1])
+        target_from_scratch = \
+            np.dot(mat[0] - mean_0, mat[1] - mean_1) / \
+            np.sqrt(
+                np.dot(mat[0] - mean_0, mat[0] - mean_0) *
+                np.dot(mat[1] - mean_1, mat[1] - mean_1))
+
+        # Check result unclipped against result calculated by numpy.corrcoef
+        target_numpy = np.corrcoef(mat)
+
+        self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
+        self.assertAlmostEqual(res_unclipped[0][1], target_from_scratch)
+        self.assertAlmostEqual(res_unclipped[1][0], target_from_scratch)
+
+        # Check result clipped against result calculated from scratch for
+        # the off-diagonal elemant
+        mat = self.binned_st.to_bool_array()
+        mean_0 = np.mean(mat[0])
+        mean_1 = np.mean(mat[1])
+        target_from_scratch = \
+            np.dot(mat[0] - mean_0, mat[1] - mean_1) / \
+            np.sqrt(
+                np.dot(mat[0] - mean_0, mat[0] - mean_0) *
+                np.dot(mat[1] - mean_1, mat[1] - mean_1))
+
+        # Check result unclipped against result calculated by numpy.corrcoef
+        target_numpy = np.corrcoef(mat)
+
+        self.assertAlmostEqual(target_from_scratch, target_numpy[0][1])
+        self.assertAlmostEqual(res_clipped[0][1], target_from_scratch)
+        self.assertAlmostEqual(res_clipped[1][0], target_from_scratch)
+
+    def test_corrcoef_binned_same_spiketrains(self):
+        '''
+        Test if the correlation coefficient between two identical binned spike
+        trains evaluates to a 2x2 matrix of ones.
+        '''
+        # Calculate correlation
+        binned_st = conv.BinnedSpikeTrain(
+            [self.st_0, self.st_0], t_start=0 * pq.ms, t_stop=50. * pq.ms,
+            binsize=1 * pq.ms)
+        target = sc.corrcoef(binned_st)
+
+        # Check dimensions
+        self.assertEqual(len(target), 2)
+        # Check result
+        assert_array_equal(target, 1.)
+
+    def test_corrcoef_binned_short_input(self):
+        '''
+        Test if input list of one binned spike train yields 1.0.
+        '''
+        # Calculate correlation
+        binned_st = conv.BinnedSpikeTrain(
+            self.st_0, t_start=0 * pq.ms, t_stop=50. * pq.ms,
+            binsize=1 * pq.ms)
+        target = sc.corrcoef(binned_st)
+
+        # Check result and dimensionality of result
+        self.assertEqual(target.ndim, 0)
+        self.assertEqual(target, 1.)
+
+
+class cross_correlation_histogram_TestCase(unittest.TestCase):
+
+    def setUp(self):
+        # These two arrays must be such that they do not have coincidences
+        # spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
+        self.test_array_1d_1 = [
+            1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
+        self.test_array_1d_2 = [
+            1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
+
+        # Build spike trains
+        self.st_1 = neo.SpikeTrain(
+            self.test_array_1d_1, units='ms', t_stop=50.)
+        self.st_2 = neo.SpikeTrain(
+            self.test_array_1d_2, units='ms', t_stop=50.)
+
+        # And binned counterparts
+        self.binned_st1 = conv.BinnedSpikeTrain(
+            [self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
+            binsize=1 * pq.ms)
+        self.binned_st2 = conv.BinnedSpikeTrain(
+            [self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms,
+            binsize=1 * pq.ms)
+        # Binned sts to check errors raising
+        self.st_check_binsize = conv.BinnedSpikeTrain(
+            [self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
+            binsize=5 * pq.ms)
+        self.st_check_t_start = conv.BinnedSpikeTrain(
+            [self.st_1], t_start=1 * pq.ms, t_stop=50. * pq.ms,
+            binsize=1 * pq.ms)
+        self.st_check_t_stop = conv.BinnedSpikeTrain(
+            [self.st_1], t_start=0 * pq.ms, t_stop=40. * pq.ms,
+            binsize=1 * pq.ms)
+        self.st_check_dimension = conv.BinnedSpikeTrain(
+            [self.st_1, self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms,
+            binsize=1 * pq.ms)
+
+    def test_cross_correlation_histogram(self):
+        '''
+        Test generic result of a cross-correlation histogram between two binned
+        spike trains.
+        '''
+        # Calculate CCH using Elephant (normal and binary version) with
+        # mode equal to 'full' (whole spike trains are correlated)
+        cch_clipped, bin_ids_clipped = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='full',
+            binary=True)
+        cch_unclipped, bin_ids_unclipped = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='full', binary=False)
+
+        cch_clipped_mem, bin_ids_clipped_mem = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='full',
+            binary=True, method='memory')
+        cch_unclipped_mem, bin_ids_unclipped_mem = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='full',
+            binary=False, method='memory')
+        # Check consistency two methods
+        assert_array_equal(
+            np.squeeze(cch_clipped.magnitude), np.squeeze(
+                cch_clipped_mem.magnitude))
+        assert_array_equal(
+            np.squeeze(cch_clipped.times), np.squeeze(
+                cch_clipped_mem.times))
+        assert_array_equal(
+            np.squeeze(cch_unclipped.magnitude), np.squeeze(
+                cch_unclipped_mem.magnitude))
+        assert_array_equal(
+            np.squeeze(cch_unclipped.times), np.squeeze(
+                cch_unclipped_mem.times))
+        assert_array_almost_equal(bin_ids_clipped, bin_ids_clipped_mem)
+        assert_array_almost_equal(bin_ids_unclipped, bin_ids_unclipped_mem)
+
+        # Check normal correlation Note: Use numpy correlate to verify result.
+        # Note: numpy conventions for input array 1 and input array 2 are
+        # swapped compared to Elephant!
+        mat1 = self.binned_st1.to_array()[0]
+        mat2 = self.binned_st2.to_array()[0]
+        target_numpy = np.correlate(mat2, mat1, mode='full')
+        assert_array_equal(
+            target_numpy, np.squeeze(cch_unclipped.magnitude))
+
+        # Check correlation using binary spike trains
+        mat1 = np.array(self.binned_st1.to_bool_array()[0], dtype=int)
+        mat2 = np.array(self.binned_st2.to_bool_array()[0], dtype=int)
+        target_numpy = np.correlate(mat2, mat1, mode='full')
+        assert_array_equal(
+            target_numpy, np.squeeze(cch_clipped.magnitude))
+
+        # Check the time axis and bin IDs of the resulting AnalogSignal
+        assert_array_almost_equal(
+            (bin_ids_clipped - 0.5) * self.binned_st1.binsize,
+            cch_unclipped.times)
+        assert_array_almost_equal(
+            (bin_ids_clipped - 0.5) * self.binned_st1.binsize,
+            cch_clipped.times)
+
+        # Calculate CCH using Elephant (normal and binary version) with
+        # mode equal to 'valid' (only completely overlapping intervals of the
+        # spike trains are correlated)
+        cch_clipped, bin_ids_clipped = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='valid',
+            binary=True)
+        cch_unclipped, bin_ids_unclipped = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='valid',
+            binary=False)
+        cch_clipped_mem, bin_ids_clipped_mem = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='valid',
+            binary=True, method='memory')
+        cch_unclipped_mem, bin_ids_unclipped_mem = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='valid',
+            binary=False, method='memory')
+
+        # Check consistency two methods
+        assert_array_equal(
+            np.squeeze(cch_clipped.magnitude), np.squeeze(
+                cch_clipped_mem.magnitude))
+        assert_array_equal(
+            np.squeeze(cch_clipped.times), np.squeeze(
+                cch_clipped_mem.times))
+        assert_array_equal(
+            np.squeeze(cch_unclipped.magnitude), np.squeeze(
+                cch_unclipped_mem.magnitude))
+        assert_array_equal(
+            np.squeeze(cch_unclipped.times), np.squeeze(
+                cch_unclipped_mem.times))
+        assert_array_equal(bin_ids_clipped, bin_ids_clipped_mem)
+        assert_array_equal(bin_ids_unclipped, bin_ids_unclipped_mem)
+
+        # Check normal correlation Note: Use numpy correlate to verify result.
+        # Note: numpy conventions for input array 1 and input array 2 are
+        # swapped compared to Elephant!
+        mat1 = self.binned_st1.to_array()[0]
+        mat2 = self.binned_st2.to_array()[0]
+        target_numpy = np.correlate(mat2, mat1, mode='valid')
+        assert_array_equal(
+            target_numpy, np.squeeze(cch_unclipped.magnitude))
+
+        # Check correlation using binary spike trains
+        mat1 = np.array(self.binned_st1.to_bool_array()[0], dtype=int)
+        mat2 = np.array(self.binned_st2.to_bool_array()[0], dtype=int)
+        target_numpy = np.correlate(mat2, mat1, mode='valid')
+        assert_array_equal(
+            target_numpy, np.squeeze(cch_clipped.magnitude))
+
+        # Check the time axis and bin IDs of the resulting AnalogSignal
+        assert_array_equal(
+            (bin_ids_clipped - 0.5) * self.binned_st1.binsize,
+            cch_unclipped.times)
+        assert_array_equal(
+            (bin_ids_clipped - 0.5) * self.binned_st1.binsize,
+            cch_clipped.times)
+
+        # Check for wrong window parameter setting
+        self.assertRaises(
+            KeyError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window='dsaij')
+        self.assertRaises(
+            KeyError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window='dsaij', method='memory')
+
+    def test_raising_error_wrong_inputs(self):
+        '''Check that an exception is thrown if the two spike trains are not
+        fullfilling the requirement of the function'''
+        # Check the binsizes are the same
+        self.assertRaises(
+            AssertionError,
+            sc.cross_correlation_histogram, self.binned_st1,
+            self.st_check_binsize)
+        # Check different t_start and t_stop
+        self.assertRaises(
+            AssertionError, sc.cross_correlation_histogram,
+            self.st_check_t_start, self.binned_st2)
+        self.assertRaises(
+            AssertionError, sc.cross_correlation_histogram,
+            self.st_check_t_stop, self.binned_st2)
+        # Check input are one dimensional
+        self.assertRaises(
+            AssertionError, sc.cross_correlation_histogram,
+            self.st_check_dimension, self.binned_st2)
+        self.assertRaises(
+            AssertionError, sc.cross_correlation_histogram,
+            self.binned_st2, self.st_check_dimension)
+
+    def test_window(self):
+        '''Test if the window parameter is correctly interpreted.'''
+        cch_win, bin_ids = sc.cch(
+            self.binned_st1, self.binned_st2, window=[-30, 30])
+        cch_win_mem, bin_ids_mem = sc.cch(
+            self.binned_st1, self.binned_st2, window=[-30, 30])
+
+        assert_array_equal(bin_ids, np.arange(-30, 31, 1))
+        assert_array_equal(
+            (bin_ids - 0.5) * self.binned_st1.binsize, cch_win.times)
+
+        assert_array_equal(bin_ids_mem, np.arange(-30, 31, 1))
+        assert_array_equal(
+            (bin_ids_mem - 0.5) * self.binned_st1.binsize, cch_win.times)
+
+        assert_array_equal(cch_win, cch_win_mem)
+        cch_unclipped, _ = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='full', binary=False)
+        assert_array_equal(cch_win, cch_unclipped[19:80])
+
+        cch_win, bin_ids = sc.cch(
+            self.binned_st1, self.binned_st2, window=[-25*pq.ms, 25*pq.ms])
+        cch_win_mem, bin_ids_mem = sc.cch(
+            self.binned_st1, self.binned_st2, window=[-25*pq.ms, 25*pq.ms],
+            method='memory')
+
+        assert_array_equal(bin_ids, np.arange(-25, 26, 1))
+        assert_array_equal(
+            (bin_ids - 0.5) * self.binned_st1.binsize, cch_win.times)
+
+        assert_array_equal(bin_ids_mem, np.arange(-25, 26, 1))
+        assert_array_equal(
+            (bin_ids_mem - 0.5) * self.binned_st1.binsize, cch_win.times)
+
+        assert_array_equal(cch_win, cch_win_mem)
+
+        _, bin_ids = sc.cch(
+            self.binned_st1, self.binned_st2, window=[20, 30])
+        _, bin_ids_mem = sc.cch(
+            self.binned_st1, self.binned_st2, window=[20, 30], method='memory')
+
+        assert_array_equal(bin_ids, np.arange(20, 31, 1))
+        assert_array_equal(bin_ids_mem, np.arange(20, 31, 1))
+
+        _, bin_ids = sc.cch(
+            self.binned_st1, self.binned_st2, window=[-30, -20])
+
+        _, bin_ids_mem = sc.cch(
+            self.binned_st1, self.binned_st2, window=[-30, -20],
+            method='memory')
+
+        assert_array_equal(bin_ids, np.arange(-30, -19, 1))
+        assert_array_equal(bin_ids_mem, np.arange(-30, -19, 1))
+
+        # Cehck for wrong assignments to the window parameter
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-60, 50])
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-60, 50], method='memory')
+
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-50, 60])
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-50, 60], method='memory')
+
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-25.5*pq.ms, 25*pq.ms])
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-25.5*pq.ms, 25*pq.ms], method='memory')
+
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-25*pq.ms, 25.5*pq.ms])
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-25*pq.ms, 25.5*pq.ms], method='memory')
+
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-60*pq.ms, 50*pq.ms])
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-60*pq.ms, 50*pq.ms], method='memory')
+
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-50*pq.ms, 60*pq.ms])
+        self.assertRaises(
+            ValueError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-50*pq.ms, 60*pq.ms], method='memory')
+
+    def test_border_correction(self):
+        '''Test if the border correction for bins at the edges is correctly
+        performed'''
+        cch_corrected, _ = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='full',
+            border_correction=True, binary=False, kernel=None)
+        cch_corrected_mem, _ = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='full',
+            border_correction=True, binary=False, kernel=None, method='memory')
+        cch, _ = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='full',
+            border_correction=False, binary=False, kernel=None)
+        cch_mem, _ = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, window='full',
+            border_correction=False, binary=False, kernel=None,
+            method='memory')
+
+        self.assertNotEqual(cch.all(), cch_corrected.all())
+        self.assertNotEqual(cch_mem.all(), cch_corrected_mem.all())
+
+    def test_kernel(self):
+        '''Test if the smoothing kernel is correctly defined, and wheter it is
+        applied properly.'''
+        smoothed_cch, _ = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, kernel=np.ones(3))
+        smoothed_cch_mem, _ = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, kernel=np.ones(3),
+            method='memory')
+
+        cch, _ = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, kernel=None)
+        cch_mem, _ = sc.cross_correlation_histogram(
+            self.binned_st1, self.binned_st2, kernel=None, method='memory')
+
+        self.assertNotEqual(smoothed_cch.all, cch.all)
+        self.assertNotEqual(smoothed_cch_mem.all, cch_mem.all)
+
+        self.assertRaises(
+            ValueError, sc.cch, self.binned_st1, self.binned_st2,
+            kernel=np.ones(100))
+        self.assertRaises(
+            ValueError, sc.cch, self.binned_st1, self.binned_st2,
+            kernel=np.ones(100), method='memory')
+
+        self.assertRaises(
+            ValueError, sc.cch, self.binned_st1, self.binned_st2, kernel='BOX')
+        self.assertRaises(
+            ValueError, sc.cch, self.binned_st1, self.binned_st2, kernel='BOX',
+            method='memory')
+
+    def test_exist_alias(self):
+        '''
+        Test if alias cch still exists.
+        '''
+        self.assertEqual(sc.cross_correlation_histogram, sc.cch)
+
+if __name__ == '__main__':
+    unittest.main()

+ 520 - 0
code/elephant/elephant/test/test_spike_train_dissimilarity.py

@@ -0,0 +1,520 @@
+# -*- coding: utf-8 -*-
+"""
+Tests for the spike train dissimilarity measures module.
+
+:copyright: Copyright 2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+from neo import SpikeTrain
+import numpy as np
+from numpy.testing import assert_array_equal, assert_array_almost_equal
+import scipy.integrate as spint
+from quantities import ms, s, Hz
+import elephant.kernels as kernels
+import elephant.spike_train_generation as stg
+import elephant.spike_train_dissimilarity as stds
+
+class TimeScaleDependSpikeTrainDissimMeasures_TestCase(unittest.TestCase):
+    def setUp(self):
+        self.st00 = SpikeTrain([], units='ms', t_stop=1000.0)
+        self.st01 = SpikeTrain([1], units='ms', t_stop=1000.0)
+        self.st02 = SpikeTrain([2], units='ms', t_stop=1000.0)
+        self.st03 = SpikeTrain([2.9], units='ms', t_stop=1000.0)
+        self.st04 = SpikeTrain([3.1], units='ms', t_stop=1000.0)
+        self.st05 = SpikeTrain([5], units='ms', t_stop=1000.0)
+        self.st06 = SpikeTrain([500], units='ms', t_stop=1000.0)
+        self.st07 = SpikeTrain([12, 32], units='ms', t_stop=1000.0)
+        self.st08 = SpikeTrain([32, 52], units='ms', t_stop=1000.0)
+        self.st09 = SpikeTrain([42], units='ms', t_stop=1000.0)
+        self.st10 = SpikeTrain([18, 60], units='ms', t_stop=1000.0)
+        self.st11 = SpikeTrain([10, 20, 30, 40], units='ms', t_stop=1000.0)
+        self.st12 = SpikeTrain([40, 30, 20, 10], units='ms', t_stop=1000.0)
+        self.st13 = SpikeTrain([15, 25, 35, 45], units='ms', t_stop=1000.0)
+        self.st14 = SpikeTrain([10, 20, 30, 40, 50], units='ms', t_stop=1000.0)
+        self.st15 = SpikeTrain([0.01, 0.02, 0.03, 0.04, 0.05],
+                               units='s', t_stop=1000.0)
+        self.st16 = SpikeTrain([12, 16, 28, 30, 42], units='ms', t_stop=1000.0)
+        self.st21 = stg.homogeneous_poisson_process(50*Hz, 0*ms, 1000*ms)
+        self.st22 = stg.homogeneous_poisson_process(40*Hz, 0*ms, 1000*ms)
+        self.st23 = stg.homogeneous_poisson_process(30*Hz, 0*ms, 1000*ms)
+        self.rd_st_list = [self.st21, self.st22, self.st23]
+        self.st31 = SpikeTrain([12.0], units='ms', t_stop=1000.0)
+        self.st32 = SpikeTrain([12.0, 12.0], units='ms', t_stop=1000.0)
+        self.st33 = SpikeTrain([20.0], units='ms', t_stop=1000.0)
+        self.st34 = SpikeTrain([20.0, 20.0], units='ms', t_stop=1000.0)
+        self.array1 = np.arange(1, 10)
+        self.array2 = np.arange(1.2, 10)
+        self.qarray1 = self.array1 * Hz
+        self.qarray2 = self.array2 * Hz
+        self.tau0 = 0.0 * ms
+        self.q0 = np.inf / ms
+        self.tau1 = 0.000000001 * ms
+        self.q1 = 1.0 / self.tau1
+        self.tau2 = 1.0 * ms
+        self.q2 = 1.0 / self.tau2
+        self.tau3 = 10.0 * ms
+        self.q3 = 1.0 / self.tau3
+        self.tau4 = 100.0 * ms
+        self.q4 = 1.0 / self.tau4
+        self.tau5 = 1000000000.0 * ms
+        self.q5 = 1.0 / self.tau5
+        self.tau6 = np.inf * ms
+        self.q6 = 0.0 / ms
+        self.tau7 = 0.01 * s
+        self.q7 = 1.0 / self.tau7
+        self.t = np.linspace(0, 200, 20000001) * ms
+
+    def test_wrong_input(self):
+        self.assertRaises(TypeError, stds.victor_purpura_dist,
+                          [self.array1, self.array2], self.q3)
+        self.assertRaises(TypeError, stds.victor_purpura_dist,
+                          [self.qarray1, self.qarray2], self.q3)
+        self.assertRaises(TypeError, stds.victor_purpura_dist,
+                          [self.qarray1, self.qarray2], 5.0 * ms)
+
+        self.assertRaises(TypeError, stds.victor_purpura_dist,
+                          [self.array1, self.array2], self.q3,
+                          algorithm='intuitive')
+        self.assertRaises(TypeError, stds.victor_purpura_dist,
+                          [self.qarray1, self.qarray2], self.q3,
+                          algorithm='intuitive')
+        self.assertRaises(TypeError, stds.victor_purpura_dist,
+                          [self.qarray1, self.qarray2], 5.0 * ms,
+                          algorithm='intuitive')
+
+        self.assertRaises(TypeError, stds.van_rossum_dist,
+                          [self.array1, self.array2], self.tau3)
+        self.assertRaises(TypeError, stds.van_rossum_dist,
+                          [self.qarray1, self.qarray2], self.tau3)
+        self.assertRaises(TypeError, stds.van_rossum_dist,
+                          [self.qarray1, self.qarray2], 5.0 * Hz)
+
+        self.assertRaises(TypeError, stds.victor_purpura_dist,
+                          [self.st11, self.st13], self.tau2)
+        self.assertRaises(TypeError, stds.victor_purpura_dist,
+                          [self.st11, self.st13], 5.0)
+        self.assertRaises(TypeError, stds.victor_purpura_dist,
+                          [self.st11, self.st13], self.tau2,
+                          algorithm='intuitive')
+        self.assertRaises(TypeError, stds.victor_purpura_dist,
+                          [self.st11, self.st13], 5.0,
+                          algorithm='intuitive')
+        self.assertRaises(TypeError, stds.van_rossum_dist,
+                          [self.st11, self.st13], self.q4)
+        self.assertRaises(TypeError, stds.van_rossum_dist,
+                          [self.st11, self.st13], 5.0)
+
+        self.assertRaises(NotImplementedError, stds.victor_purpura_dist,
+                          [self.st01, self.st02], self.q3,
+                          kernel=kernels.Kernel(2.0 / self.q3))
+        self.assertRaises(NotImplementedError, stds.victor_purpura_dist,
+                          [self.st01, self.st02], self.q3,
+                          kernel=kernels.SymmetricKernel(2.0 / self.q3))
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st02], self.q1,
+                             kernel=kernels.TriangularKernel(
+                                 2.0 / (np.sqrt(6.0) * self.q2)))[0, 1],
+                         stds.victor_purpura_dist(
+                             [self.st01, self.st02], self.q3,
+                             kernel=kernels.TriangularKernel(
+                                 2.0 / (np.sqrt(6.0) * self.q2)))[0, 1])
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st02],
+                             kernel=kernels.TriangularKernel(
+                                 2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], 1.0)
+        self.assertNotEqual(stds.victor_purpura_dist(
+                                [self.st01, self.st02],
+                                kernel=kernels.AlphaKernel(
+                                   2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], 1.0)
+
+        self.assertRaises(NameError, stds.victor_purpura_dist,
+                          [self.st11, self.st13], self.q2, algorithm='slow')
+
+    def test_victor_purpura_distance_fast(self):
+        # Tests of distances of simplest spike trains:
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st00, self.st00], self.q2)[0, 1], 0.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st00, self.st01], self.q2)[0, 1], 1.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st00], self.q2)[0, 1], 1.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st01], self.q2)[0, 1], 0.0)
+        # Tests of distances under elementary spike operations
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st02], self.q2)[0, 1], 1.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st03], self.q2)[0, 1], 1.9)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st04], self.q2)[0, 1], 2.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st05], self.q2)[0, 1], 2.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st00, self.st07], self.q2)[0, 1], 2.0)
+        self.assertAlmostEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st08], self.q4)[0, 1], 0.4)
+        self.assertAlmostEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st10], self.q3)[0, 1], 0.6 + 2)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st11, self.st14], self.q2)[0, 1], 1)
+        # Tests on timescales
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st11, self.st14], self.q1)[0, 1],
+                         stds.victor_purpura_dist(
+                             [self.st11, self.st14], self.q5)[0, 1])
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st11], self.q0)[0, 1], 6.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st11], self.q1)[0, 1], 6.0)
+        self.assertAlmostEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st11], self.q5)[0, 1], 2.0, 5)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st11], self.q6)[0, 1], 2.0)
+        # Tests on unordered spiketrains
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st11, self.st13], self.q4)[0, 1],
+                         stds.victor_purpura_dist(
+                             [self.st12, self.st13], self.q4)[0, 1])
+        self.assertNotEqual(stds.victor_purpura_dist(
+                                [self.st11, self.st13], self.q4,
+                                sort=False)[0, 1],
+                            stds.victor_purpura_dist(
+                                [self.st12, self.st13], self.q4,
+                                sort=False)[0, 1])
+        # Tests on metric properties with random spiketrains
+        # (explicit calculation of second metric axiom in particular case,
+        # because from dist_matrix it is trivial)
+        dist_matrix = stds.victor_purpura_dist(
+                              [self.st21, self.st22, self.st23], self.q3)
+        for i in range(3):
+            for j in range(3):
+                self.assertGreaterEqual(dist_matrix[i, j], 0)
+                if dist_matrix[i, j] == 0:
+                    assert_array_equal(self.rd_st_list[i], self.rd_st_list[j])
+        assert_array_equal(stds.victor_purpura_dist(
+                               [self.st21, self.st22], self.q3), 
+                           stds.victor_purpura_dist(
+                               [self.st22, self.st21], self.q3))
+        self.assertLessEqual(dist_matrix[0, 1],
+                             dist_matrix[0, 2] + dist_matrix[1, 2])
+        self.assertLessEqual(dist_matrix[0, 2],
+                             dist_matrix[1, 2] + dist_matrix[0, 1])
+        self.assertLessEqual(dist_matrix[1, 2],
+                             dist_matrix[0, 1] + dist_matrix[0, 2])
+        # Tests on proper unit conversion
+        self.assertAlmostEqual(
+              stds.victor_purpura_dist([self.st14, self.st16], self.q3)[0, 1],
+              stds.victor_purpura_dist([self.st15, self.st16], self.q3)[0, 1])
+        self.assertAlmostEqual(
+              stds.victor_purpura_dist([self.st16, self.st14], self.q3)[0, 1],
+              stds.victor_purpura_dist([self.st16, self.st15], self.q3)[0, 1])
+        self.assertEqual(
+              stds.victor_purpura_dist([self.st01, self.st05], self.q3)[0, 1],
+              stds.victor_purpura_dist([self.st01, self.st05], self.q7)[0, 1])
+        # Tests on algorithmic behaviour for equal spike times
+        self.assertEqual(
+              stds.victor_purpura_dist([self.st31, self.st34], self.q3)[0, 1],
+              0.8 + 1.0)
+        self.assertEqual(
+              stds.victor_purpura_dist([self.st31, self.st34], self.q3)[0, 1],
+              stds.victor_purpura_dist([self.st32, self.st33], self.q3)[0, 1])
+        self.assertEqual(
+              stds.victor_purpura_dist(
+                  [self.st31, self.st33], self.q3)[0, 1] * 2.0,
+              stds.victor_purpura_dist(
+                  [self.st32, self.st34], self.q3)[0, 1])
+        # Tests on spike train list lengthes smaller than 2
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st21], self.q3)[0, 0], 0)
+        self.assertEqual(len(stds.victor_purpura_dist([], self.q3)), 0)
+
+    def test_victor_purpura_distance_intuitive(self):
+        # Tests of distances of simplest spike trains
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st00, self.st00], self.q2,
+                             algorithm='intuitive')[0, 1], 0.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st00, self.st01], self.q2,
+                             algorithm='intuitive')[0, 1], 1.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st00], self.q2,
+                             algorithm='intuitive')[0, 1], 1.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st01], self.q2,
+                             algorithm='intuitive')[0, 1], 0.0)
+        # Tests of distances under elementary spike operations
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st02], self.q2,
+                             algorithm='intuitive')[0, 1], 1.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st03], self.q2,
+                             algorithm='intuitive')[0, 1], 1.9)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st04], self.q2,
+                             algorithm='intuitive')[0, 1], 2.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st05], self.q2,
+                             algorithm='intuitive')[0, 1], 2.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st00, self.st07], self.q2,
+                             algorithm='intuitive')[0, 1], 2.0)
+        self.assertAlmostEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st08], self.q4,
+                             algorithm='intuitive')[0, 1], 0.4)
+        self.assertAlmostEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st10], self.q3,
+                             algorithm='intuitive')[0, 1], 2.6)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st11, self.st14], self.q2,
+                             algorithm='intuitive')[0, 1], 1)
+        # Tests on timescales
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st11, self.st14], self.q1,
+                             algorithm='intuitive')[0, 1],
+                         stds.victor_purpura_dist(
+                             [self.st11, self.st14], self.q5,
+                             algorithm='intuitive')[0, 1])
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st11], self.q0,
+                             algorithm='intuitive')[0, 1], 6.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st11], self.q1,
+                             algorithm='intuitive')[0, 1], 6.0)
+        self.assertAlmostEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st11], self.q5,
+                             algorithm='intuitive')[0, 1], 2.0, 5)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st07, self.st11], self.q6,
+                             algorithm='intuitive')[0, 1], 2.0)
+        # Tests on unordered spiketrains
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st11, self.st13], self.q4,
+                             algorithm='intuitive')[0, 1],
+                         stds.victor_purpura_dist(
+                             [self.st12, self.st13], self.q4,
+                             algorithm='intuitive')[0, 1])
+        self.assertNotEqual(stds.victor_purpura_dist(
+                                [self.st11, self.st13], self.q4,
+                                sort=False, algorithm='intuitive')[0, 1],
+                            stds.victor_purpura_dist(
+                                [self.st12, self.st13], self.q4,
+                                sort=False, algorithm='intuitive')[0, 1])
+        # Tests on metric properties with random spiketrains
+        # (explicit calculation of second metric axiom in particular case,
+        # because from dist_matrix it is trivial)
+        dist_matrix = stds.victor_purpura_dist(
+                          [self.st21, self.st22, self.st23],
+                          self.q3, algorithm='intuitive')
+        for i in range(3):
+            for j in range(3):
+                self.assertGreaterEqual(dist_matrix[i, j], 0)
+                if dist_matrix[i, j] == 0:
+                    assert_array_equal(self.rd_st_list[i], self.rd_st_list[j])
+        assert_array_equal(stds.victor_purpura_dist(
+                               [self.st21, self.st22], self.q3,
+                               algorithm='intuitive'),
+                           stds.victor_purpura_dist(
+                               [self.st22, self.st21], self.q3,
+                               algorithm='intuitive'))
+        self.assertLessEqual(dist_matrix[0, 1],
+                             dist_matrix[0, 2] + dist_matrix[1, 2])
+        self.assertLessEqual(dist_matrix[0, 2],
+                             dist_matrix[1, 2] + dist_matrix[0, 1])
+        self.assertLessEqual(dist_matrix[1, 2],
+                             dist_matrix[0, 1] + dist_matrix[0, 2])
+        # Tests on proper unit conversion
+        self.assertAlmostEqual(stds.victor_purpura_dist(
+                                   [self.st14, self.st16], self.q3,
+                                   algorithm='intuitive')[0, 1],
+                               stds.victor_purpura_dist(
+                                   [self.st15, self.st16], self.q3,
+                                   algorithm='intuitive')[0, 1])
+        self.assertAlmostEqual(stds.victor_purpura_dist(
+                                   [self.st16, self.st14], self.q3,
+                                   algorithm='intuitive')[0, 1],
+                               stds.victor_purpura_dist(
+                                   [self.st16, self.st15], self.q3,
+                                   algorithm='intuitive')[0, 1])
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st01, self.st05], self.q3,
+                             algorithm='intuitive')[0, 1],
+                         stds.victor_purpura_dist(
+                             [self.st01, self.st05], self.q7,
+                             algorithm='intuitive')[0, 1])
+        # Tests on algorithmic behaviour for equal spike times
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st31, self.st34], self.q3,
+                             algorithm='intuitive')[0, 1],
+                         0.8 + 1.0)
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st31, self.st34], self.q3,
+                             algorithm='intuitive')[0, 1],
+                         stds.victor_purpura_dist(
+                             [self.st32, self.st33], self.q3,
+                             algorithm='intuitive')[0, 1])
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st31, self.st33], self.q3,
+                             algorithm='intuitive')[0, 1] * 2.0,
+                         stds.victor_purpura_dist(
+                             [self.st32, self.st34], self.q3,
+                             algorithm='intuitive')[0, 1])
+        # Tests on spike train list lengthes smaller than 2
+        self.assertEqual(stds.victor_purpura_dist(
+                             [self.st21], self.q3,
+                             algorithm='intuitive')[0, 0], 0)
+        self.assertEqual(len(stds.victor_purpura_dist(
+                             [], self.q3, algorithm='intuitive')), 0)
+
+    def test_victor_purpura_algorithm_comparison(self):
+        assert_array_almost_equal(
+                    stds.victor_purpura_dist([self.st21, self.st22, self.st23],
+                                             self.q3), 
+                    stds.victor_purpura_dist([self.st21, self.st22, self.st23],
+                                             self.q3, algorithm='intuitive'))
+
+    def test_van_rossum_distance(self):
+        # Tests of distances of simplest spike trains
+        self.assertEqual(stds.van_rossum_dist(
+                             [self.st00, self.st00], self.tau2)[0, 1], 0.0)
+        self.assertEqual(stds.van_rossum_dist(
+                             [self.st00, self.st01], self.tau2)[0, 1], 1.0)
+        self.assertEqual(stds.van_rossum_dist(
+                             [self.st01, self.st00], self.tau2)[0, 1], 1.0)
+        self.assertEqual(stds.van_rossum_dist(
+                             [self.st01, self.st01], self.tau2)[0, 1], 0.0)
+        # Tests of distances under elementary spike operations
+        self.assertAlmostEqual(stds.van_rossum_dist(
+                                   [self.st01, self.st02], self.tau2)[0, 1],
+                               float(np.sqrt(2*(1.0-np.exp(-np.absolute(
+                                         ((self.st01[0]-self.st02[0]) /
+                                             self.tau2).simplified))))))
+        self.assertAlmostEqual(stds.van_rossum_dist(
+                                   [self.st01, self.st05], self.tau2)[0, 1],
+                               float(np.sqrt(2*(1.0-np.exp(-np.absolute(
+                                         ((self.st01[0]-self.st05[0]) /
+                                             self.tau2).simplified))))))
+        self.assertAlmostEqual(stds.van_rossum_dist(
+                                   [self.st01, self.st05], self.tau2)[0, 1],
+                               np.sqrt(2.0), 1)
+        self.assertAlmostEqual(stds.van_rossum_dist(
+                                   [self.st01, self.st06], self.tau2)[0, 1],
+                               np.sqrt(2.0), 20)
+        self.assertAlmostEqual(stds.van_rossum_dist(
+                                   [self.st00, self.st07], self.tau1)[0, 1],
+                               np.sqrt(0 + 2))
+        self.assertAlmostEqual(stds.van_rossum_dist(
+                                   [self.st07, self.st08], self.tau4)[0, 1],
+                               float(np.sqrt(2*(1.0-np.exp(-np.absolute(
+                                         ((self.st07[0]-self.st08[-1]) /
+                                             self.tau4).simplified))))))
+        f_minus_g_squared = (
+               (self.t > self.st08[0]) * np.exp(
+                            -((self.t-self.st08[0])/self.tau3).simplified) +
+               (self.t > self.st08[1]) * np.exp(
+                            -((self.t-self.st08[1])/self.tau3).simplified) -
+               (self.t > self.st09[0]) * np.exp(
+                            -((self.t-self.st09[0])/self.tau3).simplified))**2
+        distance = np.sqrt(2.0 * spint.cumtrapz(
+                           y=f_minus_g_squared, x=self.t.magnitude)[-1] /
+                           self.tau3.rescale(self.t.units).magnitude)
+        self.assertAlmostEqual(stds.van_rossum_dist(
+                       [self.st08, self.st09], self.tau3)[0, 1], distance, 5)
+        self.assertAlmostEqual(stds.van_rossum_dist(
+                             [self.st11, self.st14], self.tau2)[0, 1], 1)
+        # Tests on timescales
+        self.assertAlmostEqual(
+                stds.van_rossum_dist([self.st11, self.st14], self.tau1)[0, 1],
+                stds.van_rossum_dist([self.st11, self.st14], self.tau5)[0, 1])
+
+        self.assertAlmostEqual(
+                stds.van_rossum_dist([self.st07, self.st11], self.tau0)[0, 1],
+                np.sqrt(len(self.st07) + len(self.st11)))
+        self.assertAlmostEqual(
+                stds.van_rossum_dist([self.st07, self.st14], self.tau0)[0, 1],
+                np.sqrt(len(self.st07) + len(self.st14)))
+        self.assertAlmostEqual(
+                stds.van_rossum_dist([self.st07, self.st11], self.tau1)[0, 1],
+                np.sqrt(len(self.st07) + len(self.st11)))
+        self.assertAlmostEqual(
+                stds.van_rossum_dist([self.st07, self.st14], self.tau1)[0, 1],
+                np.sqrt(len(self.st07) + len(self.st14)))
+        self.assertAlmostEqual(
+                stds.van_rossum_dist([self.st07, self.st11], self.tau5)[0, 1],
+                np.absolute(len(self.st07) - len(self.st11)))
+        self.assertAlmostEqual(
+                stds.van_rossum_dist([self.st07, self.st14], self.tau5)[0, 1],
+                np.absolute(len(self.st07) - len(self.st14)))
+        self.assertAlmostEqual(
+                stds.van_rossum_dist([self.st07, self.st11], self.tau6)[0, 1],
+                np.absolute(len(self.st07) - len(self.st11)))
+        self.assertAlmostEqual(
+                stds.van_rossum_dist([self.st07, self.st14], self.tau6)[0, 1],
+                np.absolute(len(self.st07) - len(self.st14)))
+        # Tests on unordered spiketrains
+        self.assertEqual(
+                stds.van_rossum_dist([self.st11, self.st13], self.tau4)[0, 1],
+                stds.van_rossum_dist([self.st12, self.st13], self.tau4)[0, 1])
+        self.assertNotEqual(
+                stds.van_rossum_dist([self.st11, self.st13],
+                                     self.tau4, sort=False)[0, 1],
+                stds.van_rossum_dist([self.st12, self.st13],
+                                     self.tau4, sort=False)[0, 1])
+        # Tests on metric properties with random spiketrains 
+        # (explicit calculation of second metric axiom in particular case,
+        # because from dist_matrix it is trivial)
+        dist_matrix = stds.van_rossum_dist(
+                          [self.st21, self.st22, self.st23], self.tau3)
+        for i in range(3):
+            for j in range(3):
+                self.assertGreaterEqual(dist_matrix[i, j], 0)
+                if dist_matrix[i, j] == 0:
+                    assert_array_equal(self.rd_st_list[i], self.rd_st_list[j])
+        assert_array_equal(
+                stds.van_rossum_dist([self.st21, self.st22], self.tau3),
+                stds.van_rossum_dist([self.st22, self.st21], self.tau3))
+        self.assertLessEqual(dist_matrix[0, 1],
+                             dist_matrix[0, 2] + dist_matrix[1, 2])
+        self.assertLessEqual(dist_matrix[0, 2],
+                             dist_matrix[1, 2] + dist_matrix[0, 1])
+        self.assertLessEqual(dist_matrix[1, 2],
+                             dist_matrix[0, 1] + dist_matrix[0, 2])
+        # Tests on proper unit conversion
+        self.assertAlmostEqual(
+                stds.van_rossum_dist([self.st14, self.st16], self.tau3)[0, 1],
+                stds.van_rossum_dist([self.st15, self.st16], self.tau3)[0, 1])
+        self.assertAlmostEqual(
+                stds.van_rossum_dist([self.st16, self.st14], self.tau3)[0, 1],
+                stds.van_rossum_dist([self.st16, self.st15], self.tau3)[0, 1])
+        self.assertEqual(
+                stds.van_rossum_dist([self.st01, self.st05], self.tau3)[0, 1],
+                stds.van_rossum_dist([self.st01, self.st05], self.tau7)[0, 1])
+        # Tests on algorithmic behaviour for equal spike times
+        f_minus_g_squared = (
+               (self.t > self.st31[0]) * np.exp(
+                            -((self.t-self.st31[0])/self.tau3).simplified) -
+               (self.t > self.st34[0]) * np.exp(
+                            -((self.t-self.st34[0])/self.tau3).simplified) -
+               (self.t > self.st34[1]) * np.exp(
+                            -((self.t-self.st34[1])/self.tau3).simplified))**2
+        distance = np.sqrt(2.0 * spint.cumtrapz(
+                           y=f_minus_g_squared, x=self.t.magnitude)[-1] /
+                           self.tau3.rescale(self.t.units).magnitude)
+        self.assertAlmostEqual(stds.van_rossum_dist([self.st31, self.st34],
+                                                    self.tau3)[0, 1],
+                               distance, 5)
+        self.assertEqual(stds.van_rossum_dist([self.st31, self.st34],
+                                              self.tau3)[0, 1],
+                         stds.van_rossum_dist([self.st32, self.st33],
+                                              self.tau3)[0, 1])
+        self.assertEqual(stds.van_rossum_dist([self.st31, self.st33],
+                                              self.tau3)[0, 1] * 2.0,
+                         stds.van_rossum_dist([self.st32, self.st34],
+                                              self.tau3)[0, 1])
+        # Tests on spike train list lengthes smaller than 2
+        self.assertEqual(stds.van_rossum_dist([self.st21], self.tau3)[0, 0], 0)
+        self.assertEqual(len(stds.van_rossum_dist([], self.tau3)), 0)
+
+if __name__ == '__main__':
+    unittest.main()

+ 580 - 0
code/elephant/elephant/test/test_spike_train_generation.py

@@ -0,0 +1,580 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the spike_train_generation module.
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+from __future__ import division
+import unittest
+import os
+import warnings
+
+import neo
+import numpy as np
+from numpy.testing.utils import assert_array_almost_equal
+from scipy.stats import kstest, expon
+from quantities import ms, second, Hz, kHz, mV, dimensionless
+import elephant.spike_train_generation as stgen
+from elephant.statistics import isi
+
+
+def pdiff(a, b):
+    """Difference between a and b as a fraction of a
+
+    i.e. abs((a - b)/a)
+    """
+    return abs((a - b)/a)
+
+
+class AnalogSignalThresholdDetectionTestCase(unittest.TestCase):
+
+    def setUp(self):
+        pass
+
+    def test_threshold_detection(self):
+        # Test whether spikes are extracted at the correct times from
+        # an analog signal.
+
+        # Load membrane potential simulated using Brian2
+        # according to make_spike_extraction_test_data.py.
+        curr_dir = os.path.dirname(os.path.realpath(__file__))
+        npz_file_loc = os.path.join(curr_dir,'spike_extraction_test_data.npz')
+        iom2 = neo.io.PyNNNumpyIO(npz_file_loc)
+        data = iom2.read()
+        vm = data[0].segments[0].analogsignals[0]
+        spike_train = stgen.threshold_detection(vm)
+        try:
+            len(spike_train)
+        except TypeError: # Handles an error in Neo related to some zero length
+                          # spike trains being treated as unsized objects.
+            warnings.warn(("The spike train may be an unsized object. This may be related "
+                            "to an issue in Neo with some zero-length SpikeTrain objects. "
+                            "Bypassing this by creating an empty SpikeTrain object."))
+            spike_train = neo.core.SpikeTrain([],t_start=spike_train.t_start,
+                                                 t_stop=spike_train.t_stop,
+                                                 units=spike_train.units)
+
+        # Correct values determined previously.
+        true_spike_train = [0.0123, 0.0354, 0.0712, 0.1191,
+                            0.1694, 0.22, 0.2711]
+
+        # Does threshold_detection gives the correct number of spikes?
+        self.assertEqual(len(spike_train),len(true_spike_train))
+        # Does threshold_detection gives the correct times for the spikes?
+        try:
+            assert_array_almost_equal(spike_train,spike_train)
+        except AttributeError: # If numpy version too old to have allclose
+            self.assertTrue(np.array_equal(spike_train,spike_train))
+
+
+class AnalogSignalPeakDetectionTestCase(unittest.TestCase):
+
+    def setUp(self):
+        curr_dir = os.path.dirname(os.path.realpath(__file__))
+        npz_file_loc = os.path.join(curr_dir, 'spike_extraction_test_data.npz')
+        iom2 = neo.io.PyNNNumpyIO(npz_file_loc)
+        data = iom2.read()
+        self.vm = data[0].segments[0].analogsignals[0]
+        self.true_time_stamps = [0.0124,  0.0354,  0.0713,  0.1192,  0.1695,
+                                 0.2201,  0.2711] * second
+
+    def test_peak_detection_time_stamps(self):
+        # Test with default arguments
+        result = stgen.peak_detection(self.vm)
+        self.assertEqual(len(self.true_time_stamps), len(result))
+        self.assertIsInstance(result, neo.core.SpikeTrain)
+
+        try:
+            assert_array_almost_equal(result, self.true_time_stamps)
+        except AttributeError:
+            self.assertTrue(np.array_equal(result, self.true_time_stamps))
+
+    def test_peak_detection_threshold(self):
+        # Test for empty SpikeTrain when threshold is too high
+        result = stgen.peak_detection(self.vm, threshold=30 * mV)
+        self.assertEqual(len(result), 0)
+
+class AnalogSignalSpikeExtractionTestCase(unittest.TestCase):
+    
+    def setUp(self):
+        curr_dir = os.path.dirname(os.path.realpath(__file__))
+        npz_file_loc = os.path.join(curr_dir, 'spike_extraction_test_data.npz')
+        iom2 = neo.io.PyNNNumpyIO(npz_file_loc)
+        data = iom2.read()
+        self.vm = data[0].segments[0].analogsignals[0]
+        self.first_spike = np.array([-0.04084546, -0.03892033, -0.03664779,
+                                     -0.03392689, -0.03061474, -0.02650277,
+                                     -0.0212756, -0.01443531, -0.00515365,
+                                     0.00803962, 0.02797951, -0.07,
+                                     -0.06974495, -0.06950466, -0.06927778,
+                                     -0.06906314, -0.06885969, -0.06866651,
+                                     -0.06848277, -0.06830773, -0.06814071,
+                                     -0.06798113, -0.06782843, -0.06768213,
+                                     -0.06754178, -0.06740699, -0.06727737,
+                                     -0.06715259, -0.06703235, -0.06691635])
+    
+    def test_spike_extraction_waveform(self):
+        spike_train = stgen.spike_extraction(self.vm.reshape(-1),
+                                             extr_interval = (-1*ms, 2*ms))
+        try:
+            assert_array_almost_equal(spike_train.waveforms[0][0].magnitude.reshape(-1),
+                                      self.first_spike)
+        except AttributeError:
+            self.assertTrue(
+                np.array_equal(spike_train.waveforms[0][0].magnitude,
+                               self.first_spike))
+
+        
+
+class HomogeneousPoissonProcessTestCase(unittest.TestCase):
+
+    def setUp(self):
+        pass
+
+    def test_statistics(self):
+        # This is a statistical test that has a non-zero chance of failure
+        # during normal operation. Thus, we set the random seed to a value that
+        # creates a realization passing the test.
+        np.random.seed(seed=12345)
+        
+        for rate in [123.0*Hz, 0.123*kHz]:
+            for t_stop in [2345*ms, 2.345*second]:
+                spiketrain = stgen.homogeneous_poisson_process(rate, t_stop=t_stop)
+                intervals = isi(spiketrain)
+
+                expected_spike_count = int((rate * t_stop).simplified)
+                self.assertLess(pdiff(expected_spike_count, spiketrain.size), 0.2)  # should fail about 1 time in 1000
+
+                expected_mean_isi = (1/rate)
+                self.assertLess(pdiff(expected_mean_isi, intervals.mean()), 0.2)
+
+                expected_first_spike = 0*ms
+                self.assertLess(spiketrain[0] - expected_first_spike, 7*expected_mean_isi)
+
+                expected_last_spike = t_stop
+                self.assertLess(expected_last_spike - spiketrain[-1], 7*expected_mean_isi)
+
+                # Kolmogorov-Smirnov test
+                D, p = kstest(intervals.rescale(t_stop.units),
+                              "expon",
+                              args=(0, expected_mean_isi.rescale(t_stop.units)),  # args are (loc, scale)
+                              alternative='two-sided')
+                self.assertGreater(p, 0.001)
+                self.assertLess(D, 0.12)
+
+    def test_low_rates(self):
+        spiketrain = stgen.homogeneous_poisson_process(0*Hz, t_stop=1000*ms)
+        self.assertEqual(spiketrain.size, 0)
+        # not really a test, just making sure that all code paths are covered
+        for i in range(10):
+            spiketrain = stgen.homogeneous_poisson_process(1*Hz, t_stop=1000*ms)
+
+    def test_buffer_overrun(self):
+        np.random.seed(6085)  # this seed should produce a buffer overrun
+        t_stop=1000*ms
+        rate = 10*Hz
+        spiketrain = stgen.homogeneous_poisson_process(rate, t_stop=t_stop)
+        expected_last_spike = t_stop
+        expected_mean_isi = (1/rate).rescale(ms)
+        self.assertLess(expected_last_spike - spiketrain[-1], 4*expected_mean_isi)
+
+
+class HomogeneousGammaProcessTestCase(unittest.TestCase):
+
+    def setUp(self):
+        pass
+
+    def test_statistics(self):
+        # This is a statistical test that has a non-zero chance of failure
+        # during normal operation. Thus, we set the random seed to a value that
+        # creates a realization passing the test.
+        np.random.seed(seed=12345)
+
+        a = 3.0
+        for b in (67.0*Hz, 0.067*kHz):
+            for t_stop in (2345*ms, 2.345*second):
+                spiketrain = stgen.homogeneous_gamma_process(a, b, t_stop=t_stop)
+                intervals = isi(spiketrain)
+
+                expected_spike_count = int((b/a * t_stop).simplified)
+                self.assertLess(pdiff(expected_spike_count, spiketrain.size), 0.25)  # should fail about 1 time in 1000
+
+                expected_mean_isi = (a/b).rescale(ms)
+                self.assertLess(pdiff(expected_mean_isi, intervals.mean()), 0.3)
+
+                expected_first_spike = 0*ms
+                self.assertLess(spiketrain[0] - expected_first_spike, 4*expected_mean_isi)
+
+                expected_last_spike = t_stop
+                self.assertLess(expected_last_spike - spiketrain[-1], 4*expected_mean_isi)
+
+                # Kolmogorov-Smirnov test
+                D, p = kstest(intervals.rescale(t_stop.units),
+                              "gamma",
+                              args=(a, 0, (1/b).rescale(t_stop.units)),  # args are (a, loc, scale)
+                              alternative='two-sided')
+                self.assertGreater(p, 0.001)
+                self.assertLess(D, 0.25)
+
+
+class _n_poisson_TestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.n = 4
+        self.rate = 10*Hz
+        self.rates = range(1, self.n + 1)*Hz
+        self.t_stop = 10000*ms
+
+    def test_poisson(self):
+
+        # Check the output types for input rate + n number of neurons
+        pp = stgen._n_poisson(rate=self.rate, t_stop=self.t_stop, n=self.n)
+        self.assertIsInstance(pp, list)
+        self.assertIsInstance(pp[0], neo.core.spiketrain.SpikeTrain)
+        self.assertEqual(pp[0].simplified.units, 1000*ms)
+        self.assertEqual(len(pp), self.n)
+
+        # Check the output types for input list of rates
+        pp = stgen._n_poisson(rate=self.rates, t_stop=self.t_stop)
+        self.assertIsInstance(pp, list)
+        self.assertIsInstance(pp[0], neo.core.spiketrain.SpikeTrain)
+        self.assertEqual(pp[0].simplified.units, 1000*ms)
+        self.assertEqual(len(pp), self.n)
+
+    def test_poisson_error(self):
+
+        # Dimensionless rate
+        self.assertRaises(
+            ValueError, stgen._n_poisson, rate=5, t_stop=self.t_stop)
+        # Negative rate
+        self.assertRaises(
+            ValueError, stgen._n_poisson, rate=-5*Hz, t_stop=self.t_stop)
+        # Negative value when rate is a list
+        self.assertRaises(
+            ValueError, stgen._n_poisson, rate=[-5, 3]*Hz, t_stop=self.t_stop)
+        # Negative n
+        self.assertRaises(
+            ValueError, stgen._n_poisson, rate=self.rate, t_stop=self.t_stop,
+            n=-1)
+        # t_start>t_stop
+        self.assertRaises(
+            ValueError, stgen._n_poisson, rate=self.rate, t_start=4*ms,
+            t_stop=3*ms, n=3)
+
+
+class singleinteractionprocess_TestCase(unittest.TestCase):
+    def setUp(self):
+        self.n = 4
+        self.rate = 10*Hz
+        self.rates = range(1, self.n + 1)*Hz
+        self.t_stop = 10000*ms
+        self.rate_c = 1*Hz
+
+    def test_sip(self):
+
+        # Generate an example SIP mode
+        sip, coinc = stgen.single_interaction_process(
+            n=self.n, t_stop=self.t_stop, rate=self.rate,
+            rate_c=self.rate_c, return_coinc=True)
+
+        # Check the output types
+        self.assertEqual(type(sip), list)
+        self.assertEqual(type(sip[0]), neo.core.spiketrain.SpikeTrain)
+        self.assertEqual(type(coinc[0]), neo.core.spiketrain.SpikeTrain)
+        self.assertEqual(sip[0].simplified.units,  1000*ms)
+        self.assertEqual(coinc[0].simplified.units,  1000*ms)
+
+        # Check the output length
+        self.assertEqual(len(sip), self.n)
+        self.assertEqual(
+            len(coinc[0]), (self.rate_c*self.t_stop).rescale(dimensionless))
+
+        # Generate an example SIP mode giving a list of rates as imput
+        sip, coinc = stgen.single_interaction_process(
+            t_stop=self.t_stop, rate=self.rates,
+            rate_c=self.rate_c, return_coinc=True)
+
+        # Check the output types
+        self.assertEqual(type(sip), list)
+        self.assertEqual(type(sip[0]), neo.core.spiketrain.SpikeTrain)
+        self.assertEqual(type(coinc[0]), neo.core.spiketrain.SpikeTrain)
+        self.assertEqual(sip[0].simplified.units,  1000*ms)
+        self.assertEqual(coinc[0].simplified.units,  1000*ms)
+
+        # Check the output length
+        self.assertEqual(len(sip), self.n)
+        self.assertEqual(
+            len(coinc[0]), (self.rate_c*self.t_stop).rescale(dimensionless))
+
+        # Generate an example SIP mode stochastic number of coincidences
+        sip = stgen.single_interaction_process(
+            n=self.n, t_stop=self.t_stop, rate=self.rate,
+            rate_c=self.rate_c, coincidences='stochastic', return_coinc=False)
+
+        # Check the output types
+        self.assertEqual(type(sip), list)
+        self.assertEqual(type(sip[0]), neo.core.spiketrain.SpikeTrain)
+        self.assertEqual(sip[0].simplified.units,  1000*ms)
+
+    def test_sip_error(self):
+        # Negative rate
+        self.assertRaises(
+            ValueError, stgen.single_interaction_process, n=self.n, rate=-5*Hz,
+            rate_c=self.rate_c, t_stop=self.t_stop)
+        # Negative coincidence rate
+        self.assertRaises(
+            ValueError, stgen.single_interaction_process, n=self.n,
+            rate=self.rate, rate_c=-3*Hz, t_stop=self.t_stop)
+        # Negative value when rate is a list
+        self.assertRaises(
+            ValueError, stgen.single_interaction_process, n=self.n,
+            rate=[-5, 3, 4, 2]*Hz, rate_c=self.rate_c, t_stop=self.t_stop)
+        # Negative n
+        self.assertRaises(
+            ValueError, stgen.single_interaction_process, n=-1,
+            rate=self.rate, rate_c=self.rate_c, t_stop=self.t_stop)
+        # Rate_c < rate
+        self.assertRaises(
+            ValueError, stgen.single_interaction_process, n=self.n,
+            rate=self.rate, rate_c=self.rate + 1*Hz, t_stop=self.t_stop)
+
+
+class cppTestCase(unittest.TestCase):
+    def test_cpp_hom(self):
+        # testing output with generic inputs
+        A = [0, .9, .1]
+        t_stop = 10 * 1000 * ms
+        t_start = 5 * 1000 * ms
+        rate = 3 * Hz
+        cpp_hom = stgen.cpp(rate, A, t_stop, t_start=t_start)
+        # testing the ouput formats
+        self.assertEqual(
+            [type(train) for train in cpp_hom], [neo.SpikeTrain]*len(cpp_hom))
+        self.assertEqual(cpp_hom[0].simplified.units, 1000 * ms)
+        self.assertEqual(type(cpp_hom), list)
+        # testing quantities format of the output
+        self.assertEqual(
+            [train.simplified.units for train in cpp_hom], [1000 * ms]*len(
+                cpp_hom))
+        # testing output t_start t_stop
+        for st in cpp_hom:
+            self.assertEqual(st.t_stop, t_stop)
+            self.assertEqual(st.t_start, t_start)
+        self.assertEqual(len(cpp_hom), len(A) - 1)
+
+        # testing the units
+        A = [0, 0.9, 0.1]
+        t_stop = 10000*ms
+        t_start = 5 * 1000 * ms
+        rate = 3 * Hz
+        cpp_unit = stgen.cpp(rate, A, t_stop, t_start=t_start)
+
+        self.assertEqual(cpp_unit[0].units, t_stop.units)
+        self.assertEqual(cpp_unit[0].t_stop.units, t_stop.units)
+        self.assertEqual(cpp_unit[0].t_start.units, t_stop.units)
+
+        # testing output without copy of spikes
+        A = [1]
+        t_stop = 10 * 1000 * ms
+        t_start = 5 * 1000 * ms
+        rate = 3 * Hz
+        cpp_hom_empty = stgen.cpp(rate, A, t_stop, t_start=t_start)
+
+        self.assertEqual(
+            [len(train) for train in cpp_hom_empty], [0]*len(cpp_hom_empty))
+
+        # testing output with rate equal to 0
+        A = [0, .9, .1]
+        t_stop = 10 * 1000 * ms
+        t_start = 5 * 1000 * ms
+        rate = 0 * Hz
+        cpp_hom_empty_r = stgen.cpp(rate, A, t_stop, t_start=t_start)
+        self.assertEqual(
+            [len(train) for train in cpp_hom_empty_r], [0]*len(
+                cpp_hom_empty_r))
+
+        # testing output with same spike trains in output
+        A = [0, 0, 1]
+        t_stop = 10 * 1000 * ms
+        t_start = 5 * 1000 * ms
+        rate = 3 * Hz
+        cpp_hom_eq = stgen.cpp(rate, A, t_stop, t_start=t_start)
+
+        self.assertTrue(
+            np.allclose(cpp_hom_eq[0].magnitude, cpp_hom_eq[1].magnitude))
+
+    def test_cpp_hom_errors(self):
+        # testing raises of ValueError (wrong inputs)
+        # testing empty amplitude
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[], t_stop=10*1000 * ms, rate=3*Hz)
+
+        # testing sum of amplitude>1
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[1, 1, 1], t_stop=10*1000 * ms, rate=3*Hz)
+        # testing negative value in the amplitude
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[-1, 1, 1], t_stop=10*1000 * ms,
+            rate=3*Hz)
+        # test negative rate
+        self.assertRaises(
+            AssertionError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
+            rate=-3*Hz)
+        # test wrong unit for rate
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
+            rate=3*1000 * ms)
+
+        # testing raises of AttributeError (missing input units)
+        # Testing missing unit to t_stop
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10, rate=3*Hz)
+        # Testing missing unit to t_start
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms, rate=3*Hz,
+            t_start=3)
+        # testing rate missing unit
+        self.assertRaises(
+            AttributeError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
+            rate=3)
+
+    def test_cpp_het(self):
+        # testing output with generic inputs
+        A = [0, .9, .1]
+        t_stop = 10 * 1000 * ms
+        t_start = 5 * 1000 * ms
+        rate = [3, 4] * Hz
+        cpp_het = stgen.cpp(rate, A, t_stop, t_start=t_start)
+        # testing the ouput formats
+        self.assertEqual(
+            [type(train) for train in cpp_het], [neo.SpikeTrain]*len(cpp_het))
+        self.assertEqual(cpp_het[0].simplified.units, 1000 * ms)
+        self.assertEqual(type(cpp_het), list)
+        # testing units
+        self.assertEqual(
+            [train.simplified.units for train in cpp_het], [1000 * ms]*len(
+                cpp_het))
+        # testing output t_start and t_stop
+        for st in cpp_het:
+            self.assertEqual(st.t_stop, t_stop)
+            self.assertEqual(st.t_start, t_start)
+        # testing the number of output spiketrains
+        self.assertEqual(len(cpp_het), len(A) - 1)
+        self.assertEqual(len(cpp_het), len(rate))
+
+        # testing the units
+        A = [0, 0.9, 0.1]
+        t_stop = 10000*ms
+        t_start = 5 * 1000 * ms
+        rate = [3, 4] * Hz
+        cpp_unit = stgen.cpp(rate, A, t_stop, t_start=t_start)
+
+        self.assertEqual(cpp_unit[0].units, t_stop.units)
+        self.assertEqual(cpp_unit[0].t_stop.units, t_stop.units)
+        self.assertEqual(cpp_unit[0].t_start.units, t_stop.units)
+        # testing without copying any spikes
+        A = [1, 0, 0]
+        t_stop = 10 * 1000 * ms
+        t_start = 5 * 1000 * ms
+        rate = [3, 4] * Hz
+        cpp_het_empty = stgen.cpp(rate, A, t_stop, t_start=t_start)
+
+        self.assertEqual(len(cpp_het_empty[0]), 0)
+
+        # testing output with rate equal to 0
+        A = [0, .9, .1]
+        t_stop = 10 * 1000 * ms
+        t_start = 5 * 1000 * ms
+        rate = [0, 0] * Hz
+        cpp_het_empty_r = stgen.cpp(rate, A, t_stop, t_start=t_start)
+        self.assertEqual(
+            [len(train) for train in cpp_het_empty_r], [0]*len(
+                cpp_het_empty_r))
+
+        # testing completely sync spiketrains
+        A = [0, 0, 1]
+        t_stop = 10 * 1000 * ms
+        t_start = 5 * 1000 * ms
+        rate = [3, 3] * Hz
+        cpp_het_eq = stgen.cpp(rate, A, t_stop, t_start=t_start)
+
+        self.assertTrue(np.allclose(
+            cpp_het_eq[0].magnitude, cpp_het_eq[1].magnitude))
+
+    def test_cpp_het_err(self):
+        # testing raises of ValueError (wrong inputs)
+        # testing empty amplitude
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[], t_stop=10*1000 * ms, rate=[3, 4]*Hz)
+        # testing sum amplitude>1
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[1, 1, 1], t_stop=10*1000 * ms,
+            rate=[3, 4]*Hz)
+        # testing amplitude negative value
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[-1, 1, 1], t_stop=10*1000 * ms,
+            rate=[3, 4]*Hz)
+        # testing negative rate
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
+            rate=[-3, 4]*Hz)
+        # testing empty rate
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms, rate=[]*Hz)
+        # testing empty amplitude
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[], t_stop=10*1000 * ms, rate=[3, 4]*Hz)
+        # testing different len(A)-1 and len(rate)
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[0, 1], t_stop=10*1000 * ms, rate=[3, 4]*Hz)
+        # testing rate with different unit from Hz
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[0, 1], t_stop=10*1000 * ms,
+            rate=[3, 4]*1000 * ms)
+        # Testing analytical constrain between amplitude and rate
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[0, 0, 1], t_stop=10*1000 * ms,
+            rate=[3, 4]*Hz, t_start=3)
+
+        # testing raises of AttributeError (missing input units)
+        # Testing missing unit to t_stop
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10, rate=[3, 4]*Hz)
+        # Testing missing unit to t_start
+        self.assertRaises(
+            ValueError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
+            rate=[3, 4]*Hz, t_start=3)
+        # Testing missing unit to rate
+        self.assertRaises(
+            AttributeError, stgen.cpp, A=[0, 1, 0], t_stop=10*1000 * ms,
+            rate=[3, 4])
+
+    def test_cpp_jttered(self):
+        # testing output with generic inputs
+        A = [0, .9, .1]
+        t_stop = 10 * 1000 * ms
+        t_start = 5 * 1000 * ms
+        rate = 3 * Hz
+        cpp_shift = stgen.cpp(
+            rate, A, t_stop, t_start=t_start, shift=3*ms)
+        # testing the ouput formats
+        self.assertEqual(
+            [type(train) for train in cpp_shift], [neo.SpikeTrain]*len(
+                cpp_shift))
+        self.assertEqual(cpp_shift[0].simplified.units, 1000 * ms)
+        self.assertEqual(type(cpp_shift), list)
+        # testing quantities format of the output
+        self.assertEqual(
+            [train.simplified.units for train in cpp_shift],
+            [1000 * ms]*len(cpp_shift))
+        # testing output t_start t_stop
+        for st in cpp_shift:
+            self.assertEqual(st.t_stop, t_stop)
+            self.assertEqual(st.t_start, t_start)
+        self.assertEqual(len(cpp_shift), len(A) - 1)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 319 - 0
code/elephant/elephant/test/test_spike_train_surrogates.py

@@ -0,0 +1,319 @@
+# -*- coding: utf-8 -*-
+"""
+unittests for spike_train_surrogates module.
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+import elephant.spike_train_surrogates as surr
+import numpy as np
+import quantities as pq
+import neo
+
+np.random.seed(0)
+
+
+class SurrogatesTestCase(unittest.TestCase):
+
+    def test_dither_spikes_output_format(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        nr_surr = 2
+        dither = 10 * pq.ms
+        surrs = surr.dither_spikes(st, dither=dither, n=nr_surr)
+
+        self.assertIsInstance(surrs, list)
+        self.assertEqual(len(surrs), nr_surr)
+
+        for surrog in surrs:
+            self.assertIsInstance(surrs[0], neo.SpikeTrain)
+            self.assertEqual(surrog.units, st.units)
+            self.assertEqual(surrog.t_start, st.t_start)
+            self.assertEqual(surrog.t_stop, st.t_stop)
+            self.assertEqual(len(surrog), len(st))
+
+    def test_dither_spikes_empty_train(self):
+
+        st = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms)
+
+        dither = 10 * pq.ms
+        surrog = surr.dither_spikes(st, dither=dither, n=1)[0]
+        self.assertEqual(len(surrog), 0)
+
+    def test_dither_spikes_output_decimals(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        nr_surr = 2
+        dither = 10 * pq.ms
+        np.random.seed(42)
+        surrs = surr.dither_spikes(st, dither=dither, decimals=3, n=nr_surr)
+
+        np.random.seed(42)
+        dither_values = np.random.random_sample((nr_surr, len(st)))
+        expected_non_dithered = np.sum(dither_values==0)
+
+        observed_non_dithered = 0
+        for surrog in surrs:
+            for i in range(len(surrog)):
+                if surrog[i] - int(surrog[i]) * pq.ms == surrog[i] - surrog[i]:
+                    observed_non_dithered += 1
+
+        self.assertEqual(observed_non_dithered, expected_non_dithered)
+
+    def test_dither_spikes_false_edges(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        nr_surr = 2
+        dither = 10 * pq.ms
+        surrs = surr.dither_spikes(st, dither=dither, n=nr_surr, edges=False)
+
+        for surrog in surrs:
+            for i in range(len(surrog)):
+                self.assertLessEqual(surrog[i], st.t_stop)
+
+    def test_randomise_spikes_output_format(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        nr_surr = 2
+        surrs = surr.randomise_spikes(st, n=nr_surr)
+
+        self.assertIsInstance(surrs, list)
+        self.assertEqual(len(surrs), nr_surr)
+
+        for surrog in surrs:
+            self.assertIsInstance(surrs[0], neo.SpikeTrain)
+            self.assertEqual(surrog.units, st.units)
+            self.assertEqual(surrog.t_start, st.t_start)
+            self.assertEqual(surrog.t_stop, st.t_stop)
+            self.assertEqual(len(surrog), len(st))
+
+    def test_randomise_spikes_empty_train(self):
+
+        st = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms)
+
+        surrog = surr.randomise_spikes(st, n=1)[0]
+        self.assertEqual(len(surrog), 0)
+
+    def test_randomise_spikes_output_decimals(self):
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        nr_surr = 2
+        surrs = surr.randomise_spikes(st, n=nr_surr, decimals=3)
+
+        for surrog in surrs:
+            for i in range(len(surrog)):
+                self.assertNotEqual(surrog[i] - int(surrog[i]) * pq.ms,
+                                    surrog[i] - surrog[i])
+
+    def test_shuffle_isis_output_format(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        nr_surr = 2
+        surrs = surr.shuffle_isis(st, n=nr_surr)
+
+        self.assertIsInstance(surrs, list)
+        self.assertEqual(len(surrs), nr_surr)
+
+        for surrog in surrs:
+            self.assertIsInstance(surrs[0], neo.SpikeTrain)
+            self.assertEqual(surrog.units, st.units)
+            self.assertEqual(surrog.t_start, st.t_start)
+            self.assertEqual(surrog.t_stop, st.t_stop)
+            self.assertEqual(len(surrog), len(st))
+
+    def test_shuffle_isis_empty_train(self):
+
+        st = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms)
+
+        surrog = surr.shuffle_isis(st, n=1)[0]
+        self.assertEqual(len(surrog), 0)
+
+    def test_shuffle_isis_same_isis(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        surrog = surr.shuffle_isis(st, n=1)[0]
+
+        st_pq = st.view(pq.Quantity)
+        surr_pq = surrog.view(pq.Quantity)
+
+        isi0_orig = st[0] - st.t_start
+        ISIs_orig = np.sort([isi0_orig] + [isi for isi in np.diff(st_pq)])
+
+        isi0_surr = surrog[0] - surrog.t_start
+        ISIs_surr = np.sort([isi0_surr] + [isi for isi in np.diff(surr_pq)])
+
+        self.assertTrue(np.all(ISIs_orig == ISIs_surr))
+
+    def test_shuffle_isis_output_decimals(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        surrog = surr.shuffle_isis(st, n=1, decimals=95)[0]
+
+        st_pq = st.view(pq.Quantity)
+        surr_pq = surrog.view(pq.Quantity)
+
+        isi0_orig = st[0] - st.t_start
+        ISIs_orig = np.sort([isi0_orig] + [isi for isi in np.diff(st_pq)])
+
+        isi0_surr = surrog[0] - surrog.t_start
+        ISIs_surr = np.sort([isi0_surr] + [isi for isi in np.diff(surr_pq)])
+
+        self.assertTrue(np.all(ISIs_orig == ISIs_surr))
+
+    def test_dither_spike_train_output_format(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        nr_surr = 2
+        shift = 10 * pq.ms
+        surrs = surr.dither_spike_train(st, shift=shift, n=nr_surr)
+
+        self.assertIsInstance(surrs, list)
+        self.assertEqual(len(surrs), nr_surr)
+
+        for surrog in surrs:
+            self.assertIsInstance(surrs[0], neo.SpikeTrain)
+            self.assertEqual(surrog.units, st.units)
+            self.assertEqual(surrog.t_start, st.t_start)
+            self.assertEqual(surrog.t_stop, st.t_stop)
+            self.assertEqual(len(surrog), len(st))
+
+    def test_dither_spike_train_empty_train(self):
+
+        st = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms)
+
+        shift = 10 * pq.ms
+        surrog = surr.dither_spike_train(st, shift=shift, n=1)[0]
+        self.assertEqual(len(surrog), 0)
+
+    def test_dither_spike_train_output_decimals(self):
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        nr_surr = 2
+        shift = 10 * pq.ms
+        surrs = surr.dither_spike_train(st, shift=shift, n=nr_surr, decimals=3)
+
+        for surrog in surrs:
+            for i in range(len(surrog)):
+                self.assertNotEqual(surrog[i] - int(surrog[i]) * pq.ms,
+                                    surrog[i] - surrog[i])
+
+    def test_dither_spike_train_false_edges(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        nr_surr = 2
+        shift = 10 * pq.ms
+        surrs = surr.dither_spike_train(
+            st, shift=shift, n=nr_surr, edges=False)
+
+        for surrog in surrs:
+            for i in range(len(surrog)):
+                self.assertLessEqual(surrog[i], st.t_stop)
+
+    def test_jitter_spikes_output_format(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        nr_surr = 2
+        binsize = 100 * pq.ms
+        surrs = surr.jitter_spikes(st, binsize=binsize, n=nr_surr)
+
+        self.assertIsInstance(surrs, list)
+        self.assertEqual(len(surrs), nr_surr)
+
+        for surrog in surrs:
+            self.assertIsInstance(surrs[0], neo.SpikeTrain)
+            self.assertEqual(surrog.units, st.units)
+            self.assertEqual(surrog.t_start, st.t_start)
+            self.assertEqual(surrog.t_stop, st.t_stop)
+            self.assertEqual(len(surrog), len(st))
+
+    def test_jitter_spikes_empty_train(self):
+
+        st = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms)
+
+        binsize = 75 * pq.ms
+        surrog = surr.jitter_spikes(st, binsize=binsize, n=1)[0]
+        self.assertEqual(len(surrog), 0)
+
+    def test_jitter_spikes_same_bins(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+
+        binsize = 100 * pq.ms
+        surrog = surr.jitter_spikes(st, binsize=binsize, n=1)[0]
+
+        bin_ids_orig = np.array((st.view(pq.Quantity) / binsize).rescale(
+            pq.dimensionless).magnitude, dtype=int)
+        bin_ids_surr = np.array((surrog.view(pq.Quantity) / binsize).rescale(
+            pq.dimensionless).magnitude, dtype=int)
+        self.assertTrue(np.all(bin_ids_orig == bin_ids_surr))
+
+        # Bug encountered when the original and surrogate trains have
+        # different number of spikes
+        self.assertEqual(len(st), len(surrog))
+
+    def test_jitter_spikes_unequal_binsize(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 480] * pq.ms, t_stop=500 * pq.ms)
+
+        binsize = 75 * pq.ms
+        surrog = surr.jitter_spikes(st, binsize=binsize, n=1)[0]
+
+        bin_ids_orig = np.array((st.view(pq.Quantity) / binsize).rescale(
+            pq.dimensionless).magnitude, dtype=int)
+        bin_ids_surr = np.array((surrog.view(pq.Quantity) / binsize).rescale(
+            pq.dimensionless).magnitude, dtype=int)
+
+        self.assertTrue(np.all(bin_ids_orig == bin_ids_surr))
+
+    def test_surr_method(self):
+
+        st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
+        nr_surr = 2
+        surrs = surr.surrogates(st, dt=3 * pq.ms, n=nr_surr,
+                                surr_method='shuffle_isis', edges=False)
+
+        self.assertRaises(ValueError, surr.surrogates, st, n=1,
+                          surr_method='spike_shifting',
+                          dt=None, decimals=None, edges=True)
+        self.assertTrue(len(surrs) == nr_surr)
+
+        nr_surr2 = 4
+        surrs2 = surr.surrogates(st, dt=5 * pq.ms, n=nr_surr2,
+                                 surr_method='dither_spike_train', edges=True)
+
+        for surrog in surrs:
+            self.assertTrue(isinstance(surrs[0], neo.SpikeTrain))
+            self.assertEqual(surrog.units, st.units)
+            self.assertEqual(surrog.t_start, st.t_start)
+            self.assertEqual(surrog.t_stop, st.t_stop)
+            self.assertEqual(len(surrog), len(st))
+        self.assertTrue(len(surrs) == nr_surr)
+
+        for surrog in surrs2:
+            self.assertTrue(isinstance(surrs2[0], neo.SpikeTrain))
+            self.assertEqual(surrog.units, st.units)
+            self.assertEqual(surrog.t_start, st.t_start)
+            self.assertEqual(surrog.t_stop, st.t_stop)
+            self.assertEqual(len(surrog), len(st))
+        self.assertTrue(len(surrs2) == nr_surr2)
+
+
+def suite():
+    suite = unittest.makeSuite(SurrogatesTestCase, 'test')
+    return suite
+
+if __name__ == "__main__":
+    runner = unittest.TextTestRunner(verbosity=2)
+    runner.run(suite())

+ 414 - 0
code/elephant/elephant/test/test_sta.py

@@ -0,0 +1,414 @@
+# -*- coding: utf-8 -*-
+"""
+Tests for the function sta module
+
+:copyright: Copyright 2015-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+import math
+import numpy as np
+import scipy
+from numpy.testing import assert_array_equal
+from numpy.testing.utils import assert_array_almost_equal
+import neo
+from neo import AnalogSignal, SpikeTrain
+from elephant.conversion import BinnedSpikeTrain
+import quantities as pq
+from quantities import ms, mV, Hz
+import elephant.sta as sta
+import warnings
+
+class sta_TestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.asiga0 = AnalogSignal(np.array([
+            np.sin(np.arange(0, 20 * math.pi, 0.1))]).T, 
+            units='mV', sampling_rate=10 / ms)
+        self.asiga1 = AnalogSignal(np.array([
+            np.sin(np.arange(0, 20 * math.pi, 0.1)), 
+            np.cos(np.arange(0, 20 * math.pi, 0.1))]).T, 
+            units='mV', sampling_rate=10 / ms)
+        self.asiga2 = AnalogSignal(np.array([
+            np.sin(np.arange(0, 20 * math.pi, 0.1)), 
+            np.cos(np.arange(0, 20 * math.pi, 0.1)), 
+            np.tan(np.arange(0, 20 * math.pi, 0.1))]).T, 
+            units='mV', sampling_rate=10 / ms)
+        self.st0 = SpikeTrain(
+            [9 * math.pi, 10 * math.pi, 11 * math.pi, 12 * math.pi], 
+            units='ms', t_stop=self.asiga0.t_stop)
+        self.lst = [SpikeTrain(
+            [9 * math.pi, 10 * math.pi, 11 * math.pi, 12 * math.pi], 
+            units='ms', t_stop=self.asiga1.t_stop), 
+            SpikeTrain([30, 35, 40], units='ms', t_stop=self.asiga1.t_stop)]
+
+    #***********************************************************************
+    #************************ Test for typical values **********************
+
+    def test_spike_triggered_average_with_n_spikes_on_constant_function(self):
+        '''Signal should average to the input'''
+        const = 13.8
+        x = const * np.ones(201)
+        asiga = AnalogSignal(
+            np.array([x]).T, units='mV', sampling_rate=10 / ms)
+        st = SpikeTrain([3, 5.6, 7, 7.1, 16, 16.3], units='ms', t_stop=20)
+        window_starttime = -2 * ms
+        window_endtime = 2 * ms
+        STA = sta.spike_triggered_average(
+            asiga, st, (window_starttime, window_endtime))
+        a = int(((window_endtime - window_starttime) *
+                asiga.sampling_rate).simplified)
+        cutout = asiga[0: a]
+        cutout.t_start = window_starttime
+        assert_array_almost_equal(STA, cutout, 12)
+
+    def test_spike_triggered_average_with_shifted_sin_wave(self):
+        '''Signal should average to zero'''
+        STA = sta.spike_triggered_average(
+            self.asiga0, self.st0, (-4 * ms, 4 * ms))
+        target = 5e-2 * mV
+        self.assertEqual(np.abs(STA).max().dimensionality.simplified, 
+                         pq.Quantity(1, "V").dimensionality.simplified)
+        self.assertLess(np.abs(STA).max(), target)
+
+    def test_only_one_spike(self):
+        '''The output should be the same as the input'''
+        x = np.arange(0, 20, 0.1)
+        y = x**2
+        sr = 10 / ms
+        z = AnalogSignal(np.array([y]).T, units='mV', sampling_rate=sr)
+        spiketime = 8 * ms
+        spiketime_in_ms = int((spiketime / ms).simplified)
+        st = SpikeTrain([spiketime_in_ms], units='ms', t_stop=20)
+        window_starttime = -3 * ms
+        window_endtime = 5 * ms
+        STA = sta.spike_triggered_average(
+            z, st, (window_starttime, window_endtime))
+        cutout = z[int(((spiketime + window_starttime) * sr).simplified): 
+            int(((spiketime + window_endtime) * sr).simplified)]
+        cutout.t_start = window_starttime
+        assert_array_equal(STA, cutout)
+
+    def test_usage_of_spikes(self):
+        st = SpikeTrain([16.5 * math.pi, 17.5 * math.pi, 
+            18.5 * math.pi, 19.5 * math.pi], units='ms', t_stop=20 * math.pi)
+        STA = sta.spike_triggered_average(
+            self.asiga0, st, (-math.pi * ms, math.pi * ms))
+        self.assertEqual(STA.annotations['used_spikes'], 3)
+        self.assertEqual(STA.annotations['unused_spikes'], 1)
+
+
+    #***********************************************************************
+    #**** Test for an invalid value, to check that the function raises *****
+    #********* an exception or returns an error code ***********************
+
+    def test_analog_signal_of_wrong_type(self):
+        '''Analog signal given as list, but must be AnalogSignal'''
+        asiga = [0, 1, 2, 3, 4]
+        self.assertRaises(TypeError, sta.spike_triggered_average, 
+            asiga, self.st0, (-2 * ms, 2 * ms))
+
+    def test_spiketrain_of_list_type_in_wrong_sense(self):
+        st = [10, 11, 12]
+        self.assertRaises(TypeError, sta.spike_triggered_average, 
+            self.asiga0, st, (1 * ms, 2 * ms))
+
+    def test_spiketrain_of_nonlist_and_nonspiketrain_type(self):
+        st = (10, 11, 12)
+        self.assertRaises(TypeError, sta.spike_triggered_average, 
+            self.asiga0, st, (1 * ms, 2 * ms))
+
+    def test_forgotten_AnalogSignal_argument(self):
+        self.assertRaises(TypeError, sta.spike_triggered_average, 
+            self.st0, (-2 * ms, 2 * ms))
+
+    def test_one_smaller_nrspiketrains_smaller_nranalogsignals(self):
+        '''Number of spiketrains between 1 and number of analogsignals'''
+        self.assertRaises(ValueError, sta.spike_triggered_average, 
+            self.asiga2, self.lst, (-2 * ms, 2 * ms))
+
+    def test_more_spiketrains_than_analogsignals_forbidden(self):
+        self.assertRaises(ValueError, sta.spike_triggered_average, 
+            self.asiga0, self.lst, (-2 * ms, 2 * ms))
+
+    def test_spike_earlier_than_analogsignal(self):
+        st = SpikeTrain([-1 * math.pi, 2 * math.pi],
+            units='ms', t_start=-2 * math.pi, t_stop=20 * math.pi)
+        self.assertRaises(ValueError, sta.spike_triggered_average, 
+            self.asiga0, st, (-2 * ms, 2 * ms))
+
+    def test_spike_later_than_analogsignal(self):
+        st = SpikeTrain(
+            [math.pi, 21 * math.pi], units='ms', t_stop=25 * math.pi)
+        self.assertRaises(ValueError, sta.spike_triggered_average, 
+            self.asiga0, st, (-2 * ms, 2 * ms))
+
+    def test_impossible_window(self):
+        self.assertRaises(ValueError, sta.spike_triggered_average, 
+            self.asiga0, self.st0, (-2 * ms, -5 * ms))
+
+    def test_window_larger_than_signal(self):
+        self.assertRaises(ValueError, sta.spike_triggered_average,
+            self.asiga0, self.st0, (-15 * math.pi * ms, 15 * math.pi * ms))
+
+    def test_wrong_window_starttime_unit(self):
+        self.assertRaises(TypeError, sta.spike_triggered_average, 
+            self.asiga0, self.st0, (-2 * mV, 2 * ms))
+
+    def test_wrong_window_endtime_unit(self):
+        self.assertRaises(TypeError, sta.spike_triggered_average, 
+            self.asiga0, self.st0, (-2 * ms, 2 * Hz))
+
+    def test_window_borders_as_complex_numbers(self):
+        self.assertRaises(TypeError, sta.spike_triggered_average, self.asiga0,
+            self.st0, ((-2 * math.pi + 3j) * ms, (2 * math.pi + 3j) * ms))
+
+    #***********************************************************************
+    #**** Test for an empty value (where the argument is a list, array, ****
+    #********* vector or other container datatype). ************************
+
+    def test_empty_analogsignal(self):
+        asiga = AnalogSignal([], units='mV', sampling_rate=10 / ms)
+        st = SpikeTrain([5], units='ms', t_stop=10)
+        self.assertRaises(ValueError, sta.spike_triggered_average, 
+            asiga, st, (-1 * ms, 1 * ms))
+
+    def test_one_spiketrain_empty(self):
+        '''Test for one empty SpikeTrain, but existing spikes in other'''
+        st = [SpikeTrain(
+            [9 * math.pi, 10 * math.pi, 11 * math.pi, 12 * math.pi], 
+            units='ms', t_stop=self.asiga1.t_stop), 
+            SpikeTrain([], units='ms', t_stop=self.asiga1.t_stop)]
+        STA = sta.spike_triggered_average(self.asiga1, st, (-1 * ms, 1 * ms))
+        cmp_array = AnalogSignal(np.array([np.zeros(20, dtype=float)]).T,
+            units='mV', sampling_rate=10 / ms)
+        cmp_array = cmp_array / 0.
+        cmp_array.t_start = -1 * ms
+        assert_array_equal(STA[:, 1], cmp_array[:, 0])
+
+    def test_all_spiketrains_empty(self):
+        st = SpikeTrain([], units='ms', t_stop=self.asiga1.t_stop)
+        with warnings.catch_warnings(record=True) as w:
+            # Cause all warnings to always be triggered.
+            warnings.simplefilter("always")
+            # Trigger warnings.
+            STA = sta.spike_triggered_average(
+                self.asiga1, st, (-1 * ms, 1 * ms))
+            self.assertEqual("No spike at all was either found or used "
+                             "for averaging", str(w[-1].message))
+            nan_array = np.empty(20)
+            nan_array.fill(np.nan)
+            cmp_array = AnalogSignal(np.array([nan_array, nan_array]).T,
+                units='mV', sampling_rate=10 / ms)
+            assert_array_equal(STA, cmp_array)
+
+
+# =========================================================================
+# Tests for new scipy verison (with scipy.signal.coherence)
+# =========================================================================
+
+@unittest.skipIf(not hasattr(scipy.signal, 'coherence'), "Please update scipy "
+                                                        "to a version >= 0.16")
+class sfc_TestCase_new_scipy(unittest.TestCase):
+
+    def setUp(self):
+        # standard testsignals
+        tlen0 = 100 * pq.s
+        f0 = 20. * pq.Hz
+        fs0 = 1 * pq.ms
+        t0 = np.arange(
+            0, tlen0.rescale(pq.s).magnitude,
+            fs0.rescale(pq.s).magnitude) * pq.s
+        self.anasig0 = AnalogSignal(
+            np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
+            units=pq.mV, t_start=0 * pq.ms, sampling_period=fs0)
+        self.st0 = SpikeTrain(
+            np.arange(0, tlen0.rescale(pq.ms).magnitude, 50) * pq.ms,
+            t_start=0 * pq.ms, t_stop=tlen0)
+        self.bst0 = BinnedSpikeTrain(self.st0, binsize=fs0)
+
+        # shortened analogsignals
+        self.anasig1 = self.anasig0.time_slice(1 * pq.s, None)
+        self.anasig2 = self.anasig0.time_slice(None, 99 * pq.s)
+
+        # increased sampling frequency
+        fs1 = 0.1 * pq.ms
+        self.anasig3 = AnalogSignal(
+            np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
+            units=pq.mV, t_start=0 * pq.ms, sampling_period=fs1)
+        self.bst1 = BinnedSpikeTrain(
+            self.st0.time_slice(self.anasig3.t_start, self.anasig3.t_stop),
+            binsize=fs1)
+
+        # analogsignal containing multiple traces
+        self.anasig4 = AnalogSignal(
+            np.array([
+                np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
+                np.sin(4 * np.pi * (f0 * t0).simplified.magnitude)]).
+            transpose(),
+            units=pq.mV, t_start=0 * pq.ms, sampling_period=fs0)
+
+        # shortened spike train
+        self.st3 = SpikeTrain(
+            np.arange(
+                (tlen0.rescale(pq.ms).magnitude * .25),
+                (tlen0.rescale(pq.ms).magnitude * .75), 50) * pq.ms,
+            t_start=0 * pq.ms, t_stop=tlen0)
+        self.bst3 = BinnedSpikeTrain(self.st3, binsize=fs0)
+
+        self.st4 = SpikeTrain(np.arange(
+            (tlen0.rescale(pq.ms).magnitude * .25),
+            (tlen0.rescale(pq.ms).magnitude * .75), 50) * pq.ms,
+            t_start=5 * fs0, t_stop=tlen0 - 5 * fs0)
+        self.bst4 = BinnedSpikeTrain(self.st4, binsize=fs0)
+
+        # spike train with incompatible binsize
+        self.bst5 = BinnedSpikeTrain(self.st3, binsize=fs0 * 2.)
+
+        # spike train with same binsize as the analog signal, but with
+        # bin edges not aligned to the time axis of the analog signal
+        self.bst6 = BinnedSpikeTrain(
+            self.st3, binsize=fs0, t_start=4.5 * fs0, t_stop=tlen0 - 4.5 * fs0)
+
+    # =========================================================================
+    # Tests for correct input handling
+    # =========================================================================
+
+    def test_wrong_input_type(self):
+        self.assertRaises(TypeError,
+                          sta.spike_field_coherence,
+                          np.array([1, 2, 3]), self.bst0)
+        self.assertRaises(TypeError,
+                          sta.spike_field_coherence,
+                          self.anasig0, [1, 2, 3])
+        self.assertRaises(ValueError,
+                          sta.spike_field_coherence,
+                          self.anasig0.duplicate_with_new_array([]), self.bst0)
+
+    def test_start_stop_times_out_of_range(self):
+        self.assertRaises(ValueError,
+                          sta.spike_field_coherence,
+                          self.anasig1, self.bst0)
+
+        self.assertRaises(ValueError,
+                          sta.spike_field_coherence,
+                          self.anasig2, self.bst0)
+
+    def test_non_matching_input_binning(self):
+        self.assertRaises(ValueError,
+                          sta.spike_field_coherence,
+                          self.anasig0, self.bst1)
+
+    def test_incompatible_spiketrain_analogsignal(self):
+        # These spike trains have incompatible binning (binsize or alignment to
+        # time axis of analog signal)
+        self.assertRaises(ValueError,
+                          sta.spike_field_coherence,
+                          self.anasig0, self.bst5)
+        self.assertRaises(ValueError,
+                          sta.spike_field_coherence,
+                          self.anasig0, self.bst6)
+
+    def test_signal_dimensions(self):
+        # single analogsignal trace and single spike train
+        s_single, f_single = sta.spike_field_coherence(self.anasig0, self.bst0)
+
+        self.assertEqual(len(f_single.shape), 1)
+        self.assertEqual(len(s_single.shape), 2)
+
+        # multiple analogsignal traces and single spike train
+        s_multi, f_multi = sta.spike_field_coherence(self.anasig4, self.bst0)
+
+        self.assertEqual(len(f_multi.shape), 1)
+        self.assertEqual(len(s_multi.shape), 2)
+
+        # frequencies are identical since same sampling frequency was used
+        # in both cases and data length is the same
+        assert_array_equal(f_single, f_multi)
+        # coherences of s_single and first signal in s_multi are identical,
+        # since first analogsignal trace in anasig4 is same as in anasig0
+        assert_array_equal(s_single[:, 0], s_multi[:, 0])
+
+    def test_non_binned_spiketrain_input(self):
+        s, f = sta.spike_field_coherence(self.anasig0, self.st0)
+
+        f_ind = np.where(f >= 19.)[0][0]
+        max_ind = np.argmax(s[1:]) + 1
+
+        self.assertEqual(f_ind, max_ind)
+        self.assertAlmostEqual(s[f_ind], 1., delta=0.01)
+
+    # =========================================================================
+    # Tests for correct return values
+    # =========================================================================
+
+    def test_spike_field_coherence_perfect_coherence(self):
+        # check for detection of 20Hz peak in anasig0/bst0
+        s, f = sta.spike_field_coherence(
+            self.anasig0, self.bst0, window='boxcar')
+
+        f_ind = np.where(f >= 19.)[0][0]
+        max_ind = np.argmax(s[1:]) + 1
+
+        self.assertEqual(f_ind, max_ind)
+        self.assertAlmostEqual(s[f_ind], 1., delta=0.01)
+
+    def test_output_frequencies(self):
+        nfft = 256
+        _, f = sta.spike_field_coherence(self.anasig3, self.bst1, nfft=nfft)
+
+        # check number of frequency samples
+        self.assertEqual(len(f), nfft / 2 + 1)
+
+        # check values of frequency samples
+        assert_array_almost_equal(
+            f, np.linspace(
+                0, self.anasig3.sampling_rate.rescale('Hz').magnitude / 2,
+                nfft / 2 + 1) * pq.Hz)
+
+    def test_short_spiketrain(self):
+        # this spike train has the same length as anasig0
+        s1, f1 = sta.spike_field_coherence(
+            self.anasig0, self.bst3, window='boxcar')
+
+        # this spike train has the same spikes as above, but is shorter than
+        # anasig0
+        s2, f2 = sta.spike_field_coherence(
+            self.anasig0, self.bst4, window='boxcar')
+
+        # the results above should be the same, nevertheless
+        assert_array_equal(s1.magnitude, s2.magnitude)
+        assert_array_equal(f1.magnitude, f2.magnitude)
+
+
+# =========================================================================
+# Tests for old scipy verison (without scipy.signal.coherence)
+# =========================================================================
+
+@unittest.skipIf(hasattr(scipy.signal, 'coherence'), 'Applies only for old '
+                                                     'scipy versions (<0.16)')
+class sfc_TestCase_old_scipy(unittest.TestCase):
+
+    def setUp(self):
+        # standard testsignals
+        tlen0 = 100 * pq.s
+        f0 = 20. * pq.Hz
+        fs0 = 1 * pq.ms
+        t0 = np.arange(
+            0, tlen0.rescale(pq.s).magnitude,
+            fs0.rescale(pq.s).magnitude) * pq.s
+        self.anasig0 = AnalogSignal(
+            np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
+            units=pq.mV, t_start=0 * pq.ms, sampling_period=fs0)
+        self.st0 = SpikeTrain(
+            np.arange(0, tlen0.rescale(pq.ms).magnitude, 50) * pq.ms,
+            t_start=0 * pq.ms, t_stop=tlen0)
+        self.bst0 = BinnedSpikeTrain(self.st0, binsize=fs0)
+
+        def test_old_scipy_version(self):
+            self.assertRaises(AttributeError,  sta.spike_field_coherence,
+                    self.anasig0, self.bst0)
+
+if __name__ == '__main__':
+    unittest.main()

+ 554 - 0
code/elephant/elephant/test/test_statistics.py

@@ -0,0 +1,554 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the statistics module.
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+
+import neo
+import numpy as np
+from numpy.testing.utils import assert_array_almost_equal, assert_array_equal
+import quantities as pq
+import scipy.integrate as spint
+
+import elephant.statistics as es
+import elephant.kernels as kernels
+import warnings
+
+class isi_TestCase(unittest.TestCase):
+    def setUp(self):
+        self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
+                                       [0.02, 0.71, 1.82, 8.46],
+                                       [0.03, 0.14, 0.15, 0.92]])
+        self.targ_array_2d_0 = np.array([[-0.28,  0.15,  0.95,  7.23],
+                                         [0.01, -0.57, -1.67, -7.54]])
+        self.targ_array_2d_1 = np.array([[0.26, 0.31, 0.36],
+                                         [0.69, 1.11, 6.64],
+                                         [0.11, 0.01, 0.77]])
+        self.targ_array_2d_default = self.targ_array_2d_1
+
+        self.test_array_1d = self.test_array_2d[0, :]
+        self.targ_array_1d = self.targ_array_2d_1[0, :]
+
+    def test_isi_with_spiketrain(self):
+        st = neo.SpikeTrain(
+            self.test_array_1d, units='ms', t_stop=10.0, t_start=0.29)
+        target = pq.Quantity(self.targ_array_1d, 'ms')
+        res = es.isi(st)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_isi_with_quantities_1d(self):
+        st = pq.Quantity(self.test_array_1d, units='ms')
+        target = pq.Quantity(self.targ_array_1d, 'ms')
+        res = es.isi(st)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_isi_with_plain_array_1d(self):
+        st = self.test_array_1d
+        target = self.targ_array_1d
+        res = es.isi(st)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_isi_with_plain_array_2d_default(self):
+        st = self.test_array_2d
+        target = self.targ_array_2d_default
+        res = es.isi(st)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_isi_with_plain_array_2d_0(self):
+        st = self.test_array_2d
+        target = self.targ_array_2d_0
+        res = es.isi(st, axis=0)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_isi_with_plain_array_2d_1(self):
+        st = self.test_array_2d
+        target = self.targ_array_2d_1
+        res = es.isi(st, axis=1)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+
+class isi_cv_TestCase(unittest.TestCase):
+    def setUp(self):
+        self.test_array_regular = np.arange(1, 6)
+
+    def test_cv_isi_regular_spiketrain_is_zero(self):
+        st = neo.SpikeTrain(self.test_array_regular,  units='ms', t_stop=10.0)
+        targ = 0.0
+        res = es.cv(es.isi(st))
+        self.assertEqual(res, targ)
+
+    def test_cv_isi_regular_array_is_zero(self):
+        st = self.test_array_regular
+        targ = 0.0
+        res = es.cv(es.isi(st))
+        self.assertEqual(res, targ)
+
+
+class mean_firing_rate_TestCase(unittest.TestCase):
+    def setUp(self):
+        self.test_array_3d = np.ones([5, 7, 13])
+        self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
+                                       [0.02, 0.71, 1.82, 8.46],
+                                       [0.03, 0.14, 0.15, 0.92]])
+
+        self.targ_array_2d_0 = np.array([3, 3, 3, 3])
+        self.targ_array_2d_1 = np.array([4, 4, 4])
+        self.targ_array_2d_None = 12
+        self.targ_array_2d_default = self.targ_array_2d_None
+
+        self.max_array_2d_0 = np.array([0.3, 0.71, 1.82, 8.46])
+        self.max_array_2d_1 = np.array([1.23, 8.46, 0.92])
+        self.max_array_2d_None = 8.46
+        self.max_array_2d_default = self.max_array_2d_None
+
+        self.test_array_1d = self.test_array_2d[0, :]
+        self.targ_array_1d = self.targ_array_2d_1[0]
+        self.max_array_1d = self.max_array_2d_1[0]
+
+    def test_mean_firing_rate_with_spiketrain(self):
+        st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
+        target = pq.Quantity(self.targ_array_1d/10., '1/ms')
+        res = es.mean_firing_rate(st)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_spiketrain_set_ends(self):
+        st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
+        target = pq.Quantity(2/0.5, '1/ms')
+        res = es.mean_firing_rate(st, t_start=0.4, t_stop=0.9)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_quantities_1d(self):
+        st = pq.Quantity(self.test_array_1d, units='ms')
+        target = pq.Quantity(self.targ_array_1d/self.max_array_1d, '1/ms')
+        res = es.mean_firing_rate(st)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_quantities_1d_set_ends(self):
+        st = pq.Quantity(self.test_array_1d, units='ms')
+        target = pq.Quantity(2/0.6, '1/ms')
+        res = es.mean_firing_rate(st, t_start=400*pq.us, t_stop=1.)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_1d(self):
+        st = self.test_array_1d
+        target = self.targ_array_1d/self.max_array_1d
+        res = es.mean_firing_rate(st)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_1d_set_ends(self):
+        st = self.test_array_1d
+        target = self.targ_array_1d/(1.23-0.3)
+        res = es.mean_firing_rate(st, t_start=0.3, t_stop=1.23)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_2d_default(self):
+        st = self.test_array_2d
+        target = self.targ_array_2d_default/self.max_array_2d_default
+        res = es.mean_firing_rate(st)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_2d_0(self):
+        st = self.test_array_2d
+        target = self.targ_array_2d_0/self.max_array_2d_0
+        res = es.mean_firing_rate(st, axis=0)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_2d_1(self):
+        st = self.test_array_2d
+        target = self.targ_array_2d_1/self.max_array_2d_1
+        res = es.mean_firing_rate(st, axis=1)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_3d_None(self):
+        st = self.test_array_3d
+        target = np.sum(self.test_array_3d, None)/5.
+        res = es.mean_firing_rate(st, axis=None, t_stop=5.)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_3d_0(self):
+        st = self.test_array_3d
+        target = np.sum(self.test_array_3d, 0)/5.
+        res = es.mean_firing_rate(st, axis=0, t_stop=5.)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_3d_1(self):
+        st = self.test_array_3d
+        target = np.sum(self.test_array_3d, 1)/5.
+        res = es.mean_firing_rate(st, axis=1, t_stop=5.)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_3d_2(self):
+        st = self.test_array_3d
+        target = np.sum(self.test_array_3d, 2)/5.
+        res = es.mean_firing_rate(st, axis=2, t_stop=5.)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_2d_1_set_ends(self):
+        st = self.test_array_2d
+        target = np.array([4, 1, 3])/(1.23-0.14)
+        res = es.mean_firing_rate(st, axis=1, t_start=0.14, t_stop=1.23)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_2d_None(self):
+        st = self.test_array_2d
+        target = self.targ_array_2d_None/self.max_array_2d_None
+        res = es.mean_firing_rate(st, axis=None)
+        assert not isinstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_mean_firing_rate_with_plain_array_and_units_start_stop_typeerror(self):
+        st = self.test_array_2d
+        self.assertRaises(TypeError, es.mean_firing_rate, st,
+                          t_start=pq.Quantity(0, 'ms'))
+        self.assertRaises(TypeError, es.mean_firing_rate, st,
+                          t_stop=pq.Quantity(10, 'ms'))
+        self.assertRaises(TypeError, es.mean_firing_rate, st,
+                          t_start=pq.Quantity(0, 'ms'),
+                          t_stop=pq.Quantity(10, 'ms'))
+        self.assertRaises(TypeError, es.mean_firing_rate, st,
+                          t_start=pq.Quantity(0, 'ms'),
+                          t_stop=10.)
+        self.assertRaises(TypeError, es.mean_firing_rate, st,
+                          t_start=0.,
+                          t_stop=pq.Quantity(10, 'ms'))
+
+
+class FanoFactorTestCase(unittest.TestCase):
+    def setUp(self):
+        np.random.seed(100)
+        num_st = 300
+        self.test_spiketrains = []
+        self.test_array = []
+        self.test_quantity = []
+        self.test_list = []
+        self.sp_counts = np.zeros(num_st)
+        for i in range(num_st):
+            r = np.random.rand(np.random.randint(20) + 1)
+            st = neo.core.SpikeTrain(r * pq.ms,
+                                     t_start=0.0 * pq.ms,
+                                     t_stop=20.0 * pq.ms)
+            self.test_spiketrains.append(st)
+            self.test_array.append(r)
+            self.test_quantity.append(r * pq.ms)
+            self.test_list.append(list(r))
+            # for cross-validation
+            self.sp_counts[i] = len(st)
+
+    def test_fanofactor_spiketrains(self):
+        # Test with list of spiketrains
+        self.assertEqual(
+            np.var(self.sp_counts) / np.mean(self.sp_counts),
+            es.fanofactor(self.test_spiketrains))
+
+        # One spiketrain in list
+        st = self.test_spiketrains[0]
+        self.assertEqual(es.fanofactor([st]), 0.0)
+
+    def test_fanofactor_empty(self):
+        # Test with empty list
+        self.assertTrue(np.isnan(es.fanofactor([])))
+        self.assertTrue(np.isnan(es.fanofactor([[]])))
+
+        # Test with empty quantity
+        self.assertTrue(np.isnan(es.fanofactor([] * pq.ms)))
+
+        # Empty spiketrain
+        st = neo.core.SpikeTrain([] * pq.ms, t_start=0 * pq.ms,
+                                 t_stop=1.5 * pq.ms)
+        self.assertTrue(np.isnan(es.fanofactor(st)))
+
+    def test_fanofactor_spiketrains_same(self):
+        # Test with same spiketrains in list
+        sts = [self.test_spiketrains[0]] * 3
+        self.assertEqual(es.fanofactor(sts), 0.0)
+
+    def test_fanofactor_array(self):
+        self.assertEqual(es.fanofactor(self.test_array),
+                         np.var(self.sp_counts) / np.mean(self.sp_counts))
+
+    def test_fanofactor_array_same(self):
+        lst = [self.test_array[0]] * 3
+        self.assertEqual(es.fanofactor(lst), 0.0)
+
+    def test_fanofactor_quantity(self):
+        self.assertEqual(es.fanofactor(self.test_quantity),
+                         np.var(self.sp_counts) / np.mean(self.sp_counts))
+
+    def test_fanofactor_quantity_same(self):
+        lst = [self.test_quantity[0]] * 3
+        self.assertEqual(es.fanofactor(lst), 0.0)
+
+    def test_fanofactor_list(self):
+        self.assertEqual(es.fanofactor(self.test_list),
+                         np.var(self.sp_counts) / np.mean(self.sp_counts))
+
+    def test_fanofactor_list_same(self):
+        lst = [self.test_list[0]] * 3
+        self.assertEqual(es.fanofactor(lst), 0.0)
+
+
+class LVTestCase(unittest.TestCase):
+    def setUp(self):
+        self.test_seq = [1, 28,  4, 47,  5, 16,  2,  5, 21, 12,
+                         4, 12, 59,  2,  4, 18, 33, 25,  2, 34,
+                         4,  1,  1, 14,  8,  1, 10,  1,  8, 20,
+                         5,  1,  6,  5, 12,  2,  8,  8,  2,  8,
+                         2, 10,  2,  1,  1,  2, 15,  3, 20,  6,
+                         11, 6, 18,  2,  5, 17,  4,  3, 13,  6,
+                         1, 18,  1, 16, 12,  2, 52,  2,  5,  7,
+                         6, 25,  6,  5,  3, 15,  4,  3, 16,  3,
+                         6,  5, 24, 21,  3,  3,  4,  8,  4, 11,
+                         5,  7,  5,  6,  8, 11, 33, 10,  7,  4]
+
+        self.target = 0.971826029994
+
+    def test_lv_with_quantities(self):
+        seq = pq.Quantity(self.test_seq, units='ms')
+        assert_array_almost_equal(es.lv(seq), self.target, decimal=9)
+
+    def test_lv_with_plain_array(self):
+        seq = np.array(self.test_seq)
+        assert_array_almost_equal(es.lv(seq), self.target, decimal=9)
+
+    def test_lv_with_list(self):
+        seq = self.test_seq
+        assert_array_almost_equal(es.lv(seq), self.target, decimal=9)
+
+    def test_lv_raise_error(self):
+        seq = self.test_seq
+        self.assertRaises(AttributeError, es.lv, [])
+        self.assertRaises(AttributeError, es.lv, 1)
+        self.assertRaises(ValueError, es.lv, np.array([seq, seq]))
+
+
+class RateEstimationTestCase(unittest.TestCase):
+
+    def setUp(self):
+        # create a poisson spike train:
+        self.st_tr = (0, 20.0)  # seconds
+        self.st_dur = self.st_tr[1] - self.st_tr[0]  # seconds
+        self.st_margin = 5.0  # seconds
+        self.st_rate = 10.0  # Hertz
+
+        st_num_spikes = np.random.poisson(self.st_rate*(self.st_dur-2*self.st_margin))
+        spike_train = np.random.rand(st_num_spikes) * (self.st_dur-2*self.st_margin) + self.st_margin
+        spike_train.sort()
+
+        # convert spike train into neo objects
+        self.spike_train = neo.SpikeTrain(spike_train*pq.s,
+                                          t_start=self.st_tr[0]*pq.s,
+                                          t_stop=self.st_tr[1]*pq.s)
+
+        # generation of a multiply used specific kernel
+        self.kernel = kernels.TriangularKernel(sigma = 0.03*pq.s)
+
+    def test_instantaneous_rate_and_warnings(self):
+        st = self.spike_train
+        sampling_period = 0.01*pq.s
+        with warnings.catch_warnings(record=True) as w:
+            inst_rate = es.instantaneous_rate(
+                st, sampling_period, self.kernel, cutoff=0)
+            self.assertEqual("The width of the kernel was adjusted to a minimally "
+                             "allowed width.", str(w[-2].message))
+            self.assertEqual("Instantaneous firing rate approximation contains "
+                             "negative values, possibly caused due to machine "
+                             "precision errors.", str(w[-1].message))
+        self.assertIsInstance(inst_rate, neo.core.AnalogSignal)
+        self.assertEquals(
+            inst_rate.sampling_period.simplified, sampling_period.simplified)
+        self.assertEquals(inst_rate.simplified.units, pq.Hz)
+        self.assertEquals(inst_rate.t_stop.simplified, st.t_stop.simplified)
+        self.assertEquals(inst_rate.t_start.simplified, st.t_start.simplified)
+
+    def test_error_instantaneous_rate(self):
+        self.assertRaises(
+            TypeError, es.instantaneous_rate, spiketrain=[1,2,3]*pq.s,
+            sampling_period=0.01*pq.ms, kernel=self.kernel)
+        self.assertRaises(
+            TypeError, es.instantaneous_rate, spiketrain=[1,2,3],
+            sampling_period=0.01*pq.ms, kernel=self.kernel)
+        st = self.spike_train
+        self.assertRaises(
+            TypeError, es.instantaneous_rate, spiketrain=st,
+            sampling_period=0.01, kernel=self.kernel)
+        self.assertRaises(
+            ValueError, es.instantaneous_rate, spiketrain=st,
+            sampling_period=-0.01*pq.ms, kernel=self.kernel)
+        self.assertRaises(
+            TypeError, es.instantaneous_rate, spiketrain=st,
+            sampling_period=0.01*pq.ms, kernel='NONE')
+        self.assertRaises(TypeError, es.instantaneous_rate, self.spike_train,
+            sampling_period=0.01*pq.s, kernel='wrong_string',
+            t_start=self.st_tr[0]*pq.s, t_stop=self.st_tr[1]*pq.s,
+            trim=False)
+        self.assertRaises(
+            TypeError, es.instantaneous_rate, spiketrain=st,
+            sampling_period=0.01*pq.ms, kernel=self.kernel, cutoff=20*pq.ms)
+        self.assertRaises(
+            TypeError, es.instantaneous_rate, spiketrain=st,
+            sampling_period=0.01*pq.ms, kernel=self.kernel, t_start=2)
+        self.assertRaises(
+            TypeError, es.instantaneous_rate, spiketrain=st,
+            sampling_period=0.01*pq.ms, kernel=self.kernel, t_stop=20*pq.mV)
+        self.assertRaises(
+            TypeError, es.instantaneous_rate, spiketrain=st,
+            sampling_period=0.01*pq.ms, kernel=self.kernel, trim=1)
+
+    def test_rate_estimation_consistency(self):
+        """
+        Test, whether the integral of the rate estimation curve is (almost)
+        equal to the number of spikes of the spike train.
+        """
+        kernel_types = [obj for obj in kernels.__dict__.values()
+                        if isinstance(obj, type) and
+                        issubclass(obj, kernels.Kernel) and
+                        hasattr(obj, "_evaluate") and
+                        obj is not kernels.Kernel and
+                        obj is not kernels.SymmetricKernel]
+        kernel_list = [kernel_type(sigma=0.5*pq.s, invert=False)
+                       for kernel_type in kernel_types]
+        kernel_resolution = 0.01*pq.s
+        for kernel in kernel_list:
+            rate_estimate_a0 = es.instantaneous_rate(self.spike_train,
+                                            sampling_period=kernel_resolution,
+                                            kernel='auto',
+                                            t_start=self.st_tr[0]*pq.s,
+                                            t_stop=self.st_tr[1]*pq.s,
+                                            trim=False)
+
+            rate_estimate0 = es.instantaneous_rate(self.spike_train,
+                                            sampling_period=kernel_resolution,
+                                            kernel=kernel)
+
+            rate_estimate1 = es.instantaneous_rate(self.spike_train,
+                                            sampling_period=kernel_resolution,
+                                            kernel=kernel,
+                                            t_start=self.st_tr[0]*pq.s,
+                                            t_stop=self.st_tr[1]*pq.s,
+                                            trim=False)
+
+            rate_estimate2 = es.instantaneous_rate(self.spike_train,
+                                            sampling_period=kernel_resolution,
+                                            kernel=kernel,
+                                            t_start=self.st_tr[0]*pq.s,
+                                            t_stop=self.st_tr[1]*pq.s,
+                                            trim=True)
+            ### test consistency
+            rate_estimate_list = [rate_estimate0, rate_estimate1,
+                                  rate_estimate2, rate_estimate_a0]
+
+            for rate_estimate in rate_estimate_list:
+                num_spikes = len(self.spike_train)
+                auc = spint.cumtrapz(y=rate_estimate.magnitude[:, 0],
+                                     x=rate_estimate.times.rescale('s').magnitude)[-1]
+                self.assertAlmostEqual(num_spikes, auc, delta=0.05*num_spikes)
+
+
+class TimeHistogramTestCase(unittest.TestCase):
+    def setUp(self):
+        self.spiketrain_a = neo.SpikeTrain(
+            [0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
+        self.spiketrain_b = neo.SpikeTrain(
+            [0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
+        self.spiketrains = [self.spiketrain_a, self.spiketrain_b]
+
+    def tearDown(self):
+        del self.spiketrain_a
+        self.spiketrain_a = None
+        del self.spiketrain_b
+        self.spiketrain_b = None
+
+    def test_time_histogram(self):
+        targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
+        histogram = es.time_histogram(self.spiketrains, binsize=pq.s)
+        assert_array_equal(targ, histogram.magnitude[:, 0])
+
+    def test_time_histogram_binary(self):
+        targ = np.array([2, 2, 1, 1, 2, 2, 1, 0, 1, 0])
+        histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
+                                      binary=True)
+        assert_array_equal(targ, histogram.magnitude[:, 0])
+
+    def test_time_histogram_tstart_tstop(self):
+        # Start, stop short range
+        targ = np.array([2, 1])
+        histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
+                                      t_start=5 * pq.s, t_stop=7 * pq.s)
+        assert_array_equal(targ, histogram.magnitude[:, 0])
+
+        # Test without t_stop
+        targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
+        histogram = es.time_histogram(self.spiketrains, binsize=1 * pq.s,
+                                      t_start=0 * pq.s)
+        assert_array_equal(targ, histogram.magnitude[:, 0])
+
+        # Test without t_start
+        histogram = es.time_histogram(self.spiketrains, binsize=1 * pq.s,
+                                      t_stop=10 * pq.s)
+        assert_array_equal(targ, histogram.magnitude[:, 0])
+
+    def test_time_histogram_output(self):
+        # Normalization mean
+        histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
+                                      output='mean')
+        targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0], dtype=float) / 2
+        assert_array_equal(targ.reshape(targ.size, 1), histogram.magnitude)
+
+        # Normalization rate
+        histogram = es.time_histogram(self.spiketrains, binsize=pq.s,
+                                      output='rate')
+        assert_array_equal(histogram.view(pq.Quantity),
+                           targ.reshape(targ.size, 1) * 1 / pq.s)
+
+        # Normalization unspecified, raises error
+        self.assertRaises(ValueError, es.time_histogram, self.spiketrains,
+                          binsize=pq.s, output=' ')
+
+
+class ComplexityPdfTestCase(unittest.TestCase):
+    def setUp(self):
+        self.spiketrain_a = neo.SpikeTrain(
+            [0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
+        self.spiketrain_b = neo.SpikeTrain(
+            [0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
+        self.spiketrain_c = neo.SpikeTrain(
+            [0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
+        self.spiketrains = [
+            self.spiketrain_a, self.spiketrain_b, self.spiketrain_c]
+
+    def tearDown(self):
+        del self.spiketrain_a
+        self.spiketrain_a = None
+        del self.spiketrain_b
+        self.spiketrain_b = None
+
+    def test_complexity_pdf(self):
+        targ = np.array([0.92, 0.01, 0.01, 0.06])
+        complexity = es.complexity_pdf(self.spiketrains, binsize=0.1*pq.s)
+        assert_array_equal(targ, complexity.magnitude[:, 0])
+        self.assertEqual(1, complexity.magnitude[:, 0].sum())
+        self.assertEqual(len(self.spiketrains)+1, len(complexity))
+        self.assertIsInstance(complexity, neo.AnalogSignal)
+        self.assertEqual(complexity.units, 1*pq.dimensionless)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 348 - 0
code/elephant/elephant/test/test_unitary_event_analysis.py

@@ -0,0 +1,348 @@
+"""
+Unit tests for the Unitary Events analysis
+
+:copyright: Copyright 2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import unittest
+import numpy as np
+import quantities as pq
+import types
+import elephant.unitary_event_analysis as ue
+import neo
+
+class UETestCase(unittest.TestCase):
+
+    def setUp(self):
+        sts1_with_trial = [[  26.,   48.,   78.,  144.,  178.],
+                           [   4.,   45.,   85.,  123.,  156.,  185.],
+                           [  22.,   53.,   73.,   88.,  120.,  147.,  167.,  193.],
+                           [  23.,   49.,   74.,  116.,  142.,  166.,  189.],
+                           [   5.,   34.,   54.,   80.,  108.,  128.,  150.,  181.],
+                           [  18.,   61.,  107.,  170.],
+                           [  62.,   98.,  131.,  161.],
+                           [  37.,   63.,   86.,  131.,  168.],
+                           [  39.,   76.,  100.,  127.,  153.,  198.],
+                           [   3.,   35.,   60.,   88.,  108.,  141.,  171.,  184.],
+                           [  39.,  170.],
+                           [  25.,   68.,  170.],
+                           [  19.,   57.,   84.,  116.,  157.,  192.],
+                           [  17.,   80.,  131.,  172.],
+                           [  33.,   65.,  124.,  162.,  192.],
+                           [  58.,   87.,  185.],
+                           [  19.,  101.,  174.],
+                           [  84.,  118.,  156.,  198.,  199.],
+                           [   5.,   55.,   67.,   96.,  114.,  148.,  172.,  199.],
+                           [  61.,  105.,  131.,  169.,  195.],
+                           [  26.,   96.,  129.,  157.],
+                           [  41.,   85.,  157.,  199.],
+                           [   6.,   30.,   53.,   76.,  109.,  142.,  167.,  194.],
+                           [ 159.],
+                           [   6.,   51.,   78.,  113.,  154.,  183.],
+                           [ 138.],
+                           [  23.,   59.,  154.,  185.],
+                           [  12.,   14.,   52.,   54.,  109.,  145.,  192.],
+                           [  29.,   61.,   84.,  122.,  145.,  168.],
+                           [ 26.,  99.],
+                           [   3.,   31.,   55.,   85.,  108.,  158.,  191.],
+                           [   5.,   37.,   70.,  119.,  170.],
+                           [  38.,   79.,  117.,  157.,  192.],
+                           [ 174.],
+                           [ 114.],
+                           []]
+        sts2_with_trial = [[   3.,  119.],
+                           [  54.,  155.,  183.],
+                           [  35.,  133.],
+                           [  25.,  100.,  176.],
+                           [  9.,  98.],
+                           [   6.,   97.,  198.],
+                           [   7.,   62.,  148.],
+                           [ 100.,  158.],
+                           [   7.,   62.,  122.,  179.,  191.],
+                           [ 125.,  182.],
+                           [  30.,   55.,  127.,  157.,  196.],
+                           [  27.,   70.,  173.],
+                           [  82.,   84.,  198.],
+                           [  11.,   29.,  137.],
+                           [   5.,   49.,   61.,  101.,  142.,  190.],
+                           [  78.,  162.,  178.],
+                           [  13.,   14.,  130.,  172.],
+                           [ 22.],
+                           [  16.,   55.,  109.,  113.,  175.],
+                           [  17.,   33.,   63.,  102.,  144.,  189.,  190.],
+                           [ 58.],
+                           [  27.,   30.,   99.,  145.,  176.],
+                           [  10.,   58.,  116.,  182.],
+                           [  14.,   68.,  104.,  126.,  162.,  194.],
+                           [  56.,  129.,  196.],
+                           [  50.,   78.,  105.,  152.,  190.,  197.],
+                           [  24.,   66.,  113.,  117.,  161.],
+                           [   9.,   31.,   81.,   95.,  136.,  154.],
+                           [  10.,  115.,  185.,  191.],
+                           [  71.,  140.,  157.],
+                           [  15.,   27.,   88.,  102.,  103.,  151.,  181.,  188.],
+                           [  51.,   75.,   95.,  134.,  195.],
+                           [  18.,   55.,   75.,  131.,  186.],
+                           [  10.,   16.,   41.,   42.,   75.,  127.],
+                           [  62.,   76.,  102.,  145.,  171.,  183.],
+                           [  66.,   71.,   85.,  140.,  154.]]
+        self.sts1_neo = [neo.SpikeTrain(
+            i*pq.ms,t_stop = 200*pq.ms) for i in sts1_with_trial]
+        self.sts2_neo = [neo.SpikeTrain(
+            i*pq.ms,t_stop = 200*pq.ms) for i in sts2_with_trial]
+        self.binary_sts = np.array([[[1, 1, 1, 1, 0],
+                                     [0, 1, 1, 1, 0],
+                                     [0, 1, 1, 0, 1]],
+                                    [[1, 1, 1, 1, 1],
+                                     [0, 1, 1, 1, 1],
+                                     [1, 1, 0, 1, 0]]])
+
+    def test_hash_default(self):
+        m = np.array([[0,0,0], [1,0,0], [0,1,0], [0,0,1], [1,1,0],
+                      [1,0,1],[0,1,1],[1,1,1]])
+        expected = np.array([77,43,23])
+        h = ue.hash_from_pattern(m, N=8)
+        self.assertTrue(np.all(expected == h))
+
+    def test_hash_default_longpattern(self):
+        m = np.zeros((100,2))
+        m[0,0] = 1
+        expected = np.array([2**99,0])
+        h = ue.hash_from_pattern(m, N=100)
+        self.assertTrue(np.all(expected == h))
+
+    def test_hash_ValueError_wrong_orientation(self):
+        m = np.array([[0,0,0], [1,0,0], [0,1,0], [0,0,1], [1,1,0],
+                      [1,0,1],[0,1,1],[1,1,1]])
+        self.assertRaises(ValueError, ue.hash_from_pattern, m, N=3)
+
+    def test_hash_ValueError_wrong_entries(self):
+        m = np.array([[0,0,0], [1,0,0], [0,2,0], [0,0,1], [1,1,0],
+                      [1,0,1],[0,1,1],[1,1,1]])
+        self.assertRaises(ValueError, ue.hash_from_pattern, m, N=3)
+
+    def test_hash_base_not_two(self):
+        m = np.array([[0,0,0], [1,0,0], [0,1,0], [0,0,1], [1,1,0],
+                      [1,0,1],[0,1,1],[1,1,1]])
+        m = m.T
+        base = 3
+        expected = np.array([0,9,3,1,12,10,4,13])
+        h = ue.hash_from_pattern(m, N=3, base=base)
+        self.assertTrue(np.all(expected == h))
+
+    ## TODO: write a test for ValueError in inverse_hash_from_pattern
+    def test_invhash_ValueError(self):
+        self.assertRaises(ValueError, ue.inverse_hash_from_pattern, [128, 8], 4)
+
+    def test_invhash_default_base(self):
+        N = 3
+        h = np.array([0, 4, 2, 1, 6, 5, 3, 7])
+        expected = np.array([[0, 1, 0, 0, 1, 1, 0, 1],[0, 0, 1, 0, 1, 0, 1, 1],[0, 0, 0, 1, 0, 1, 1, 1]])
+        m = ue.inverse_hash_from_pattern(h, N)
+        self.assertTrue(np.all(expected == m))
+
+    def test_invhash_base_not_two(self):
+        N = 3
+        h = np.array([1,4,13])
+        base = 3
+        expected = np.array([[0,0,1],[0,1,1],[1,1,1]])
+        m = ue.inverse_hash_from_pattern(h, N, base)
+        self.assertTrue(np.all(expected == m))
+
+    def test_invhash_shape_mat(self):
+        N = 8
+        h = np.array([178, 212, 232])
+        expected = np.array([[0,0,0], [1,0,0], [0,1,0], [0,0,1], [1,1,0],[1,0,1],[0,1,1],[1,1,1]])
+        m = ue.inverse_hash_from_pattern(h, N)
+        self.assertTrue(np.shape(m)[0] == N)
+
+    def test_hash_invhash_consistency(self):
+        m = np.array([[0, 0, 0],[1, 0, 0],[0, 1, 0],[0, 0, 1],[1, 1, 0],[1, 0, 1],[0, 1, 1],[1, 1, 1]])
+        inv_h = ue.hash_from_pattern(m, N=8)
+        m1 = ue.inverse_hash_from_pattern(inv_h, N = 8)
+        self.assertTrue(np.all(m == m1))
+
+    def test_n_emp_mat_default(self):
+        mat = np.array([[0, 0, 0, 1, 1],[0, 0, 0, 0, 1],[1, 0, 1, 1, 1],[1, 0, 1, 1, 1]])
+        N = 4
+        pattern_hash = [3, 15]
+        expected1 = np.array([ 2.,  1.])
+        expected2 = [[0, 2], [4]]
+        nemp,nemp_indices = ue.n_emp_mat(mat,N,pattern_hash)
+        self.assertTrue(np.all(nemp == expected1))
+        for item_cnt,item in enumerate(nemp_indices):
+            self.assertTrue(np.allclose(expected2[item_cnt],item))
+
+    def test_n_emp_mat_sum_trial_default(self):
+        mat = self.binary_sts
+        pattern_hash = np.array([4,6])
+        N = 3
+        expected1 = np.array([ 1.,  3.])
+        expected2 = [[[0], [3]],[[],[2,4]]]
+        n_emp, n_emp_idx = ue.n_emp_mat_sum_trial(mat, N,pattern_hash)
+        self.assertTrue(np.all(n_emp == expected1))
+        for item0_cnt,item0 in enumerate(n_emp_idx):
+            for item1_cnt,item1 in enumerate(item0):
+                self.assertTrue(np.allclose(expected2[item0_cnt][item1_cnt],item1))
+
+    def test_n_emp_mat_sum_trial_ValueError(self):
+        mat = np.array([[0,0,0], [1,0,0], [0,1,0], [0,0,1], [1,1,0],
+                      [1,0,1],[0,1,1],[1,1,1]])
+        self.assertRaises(ValueError,ue.n_emp_mat_sum_trial,mat,N=2,pattern_hash = [3,6])
+
+    def test_n_exp_mat_default(self):
+        mat = np.array([[0, 0, 0, 1, 1],[0, 0, 0, 0, 1],[1, 0, 1, 1, 1],[1, 0, 1, 1, 1]])
+        N = 4
+        pattern_hash = [3, 11]
+        expected = np.array([ 1.536,  1.024])
+        nexp = ue.n_exp_mat(mat,N,pattern_hash)
+        self.assertTrue(np.allclose(expected,nexp))
+
+    def test_n_exp_mat_sum_trial_default(self):
+        mat = self.binary_sts
+        pattern_hash = np.array([5,6])
+        N = 3
+        expected = np.array([ 1.56,  2.56])
+        n_exp = ue.n_exp_mat_sum_trial(mat, N,pattern_hash)
+        self.assertTrue(np.allclose(n_exp,expected))
+
+    def test_n_exp_mat_sum_trial_TrialAverage(self):
+        mat = self.binary_sts
+        pattern_hash = np.array([5,6])
+        N = 3
+        expected = np.array([ 1.62,  2.52])
+        n_exp = ue.n_exp_mat_sum_trial(mat, N, pattern_hash, method='analytic_TrialAverage')
+        self.assertTrue(np.allclose(n_exp,expected))
+
+    def test_n_exp_mat_sum_trial_surrogate(self):
+        mat = self.binary_sts
+        pattern_hash = np.array([5])
+        N = 3
+        n_exp_anal = ue.n_exp_mat_sum_trial(mat, N, pattern_hash, method='analytic_TrialAverage')
+        n_exp_surr = ue.n_exp_mat_sum_trial(mat, N, pattern_hash, method='surrogate_TrialByTrial',n_surr = 1000)
+        self.assertLess((np.abs(n_exp_anal[0]-np.mean(n_exp_surr))/n_exp_anal[0]),0.1)
+
+    def test_n_exp_mat_sum_trial_ValueError(self):
+        mat = np.array([[0,0,0], [1,0,0], [0,1,0], [0,0,1], [1,1,0],
+                      [1,0,1],[0,1,1],[1,1,1]])
+        self.assertRaises(ValueError,ue.n_exp_mat_sum_trial,mat,N=2,pattern_hash = [3,6])
+
+    def test_gen_pval_anal_default(self):
+        mat = np.array([[[1, 1, 1, 1, 0],
+                         [0, 1, 1, 1, 0],
+                         [0, 1, 1, 0, 1]],
+
+                        [[1, 1, 1, 1, 1],
+                         [0, 1, 1, 1, 1],
+                         [1, 1, 0, 1, 0]]])
+        pattern_hash = np.array([5,6])
+        N = 3
+        expected = np.array([ 1.56,  2.56])
+        pval_func,n_exp = ue.gen_pval_anal(mat, N,pattern_hash)
+        self.assertTrue(np.allclose(n_exp,expected))
+        self.assertTrue(isinstance(pval_func, types.FunctionType))
+
+    def test_jointJ_default(self):
+        p_val = np.array([0.31271072,  0.01175031])
+        expected = np.array([0.3419968 ,  1.92481736])
+        self.assertTrue(np.allclose(ue.jointJ(p_val),expected))
+
+    def test__rate_mat_avg_trial_default(self):
+        mat = self.binary_sts
+        expected = [0.9, 0.7,0.6]
+        self.assertTrue(np.allclose(expected,ue._rate_mat_avg_trial(mat)))
+
+    def test__bintime(self):
+        t = 13*pq.ms
+        binsize = 3*pq.ms
+        expected = 4
+        self.assertTrue(np.allclose(expected,ue._bintime(t,binsize)))
+    def test__winpos(self):
+        t_start = 10*pq.ms
+        t_stop = 46*pq.ms
+        winsize = 15*pq.ms
+        winstep = 3*pq.ms
+        expected = [ 10., 13., 16., 19., 22., 25., 28., 31.]*pq.ms
+        self.assertTrue(
+            np.allclose(
+                ue._winpos(
+                    t_start, t_stop, winsize,
+                    winstep).rescale('ms').magnitude,
+                expected.rescale('ms').magnitude))
+
+    def test__UE_default(self):
+        mat = self.binary_sts
+        pattern_hash = np.array([4,6])
+        N = 3
+        expected_S = np.array([-0.26226523,  0.04959301])
+        expected_idx = [[[0], [3]], [[], [2, 4]]]
+        expected_nemp = np.array([ 1.,  3.])
+        expected_nexp = np.array([ 1.04,  2.56])
+        expected_rate = np.array([ 0.9,  0.7,  0.6])
+        S, rate_avg, n_exp, n_emp,indices = ue._UE(mat,N,pattern_hash)
+        self.assertTrue(np.allclose(S ,expected_S))
+        self.assertTrue(np.allclose(n_exp ,expected_nexp))
+        self.assertTrue(np.allclose(n_emp ,expected_nemp))
+        self.assertTrue(np.allclose(expected_rate ,rate_avg))
+        for item0_cnt,item0 in enumerate(indices):
+            for item1_cnt,item1 in enumerate(item0):
+                self.assertTrue(np.allclose(expected_idx[item0_cnt][item1_cnt],item1))
+
+    def test__UE_surrogate(self):
+        mat = self.binary_sts
+        pattern_hash = np.array([4])
+        N = 3
+        _, rate_avg_surr, _, n_emp_surr,indices_surr =\
+        ue._UE(mat, N, pattern_hash, method='surrogate_TrialByTrial', n_surr=100)
+        _, rate_avg, _, n_emp,indices =\
+        ue._UE(mat, N, pattern_hash, method='analytic_TrialByTrial')
+        self.assertTrue(np.allclose(n_emp ,n_emp_surr))
+        self.assertTrue(np.allclose(rate_avg ,rate_avg_surr))
+        for item0_cnt,item0 in enumerate(indices):
+            for item1_cnt,item1 in enumerate(item0):
+                self.assertTrue(np.allclose(indices_surr[item0_cnt][item1_cnt],item1))
+
+    def test_jointJ_window_analysis(self):
+        sts1 = self.sts1_neo
+        sts2 = self.sts2_neo
+        data = np.vstack((sts1,sts2)).T
+        winsize = 100*pq.ms
+        binsize = 5*pq.ms
+        winstep = 20*pq.ms
+        pattern_hash = [3]
+        UE_dic = ue.jointJ_window_analysis(data, binsize, winsize, winstep, pattern_hash)
+        expected_Js = np.array(
+            [ 0.57953708,  0.47348757,  0.1729669 ,  
+              0.01883295, -0.21934742,-0.80608759])
+        expected_n_emp = np.array(
+            [ 9.,  9.,  7.,  7.,  6.,  6.])
+        expected_n_exp = np.array(
+            [ 6.5 ,  6.85,  6.05,  6.6 ,  6.45,  8.7 ])
+        expected_rate = np.array(
+            [[ 0.02166667,  0.01861111],
+             [ 0.02277778,  0.01777778],
+             [ 0.02111111,  0.01777778],
+             [ 0.02277778,  0.01888889],
+             [ 0.02305556,  0.01722222],
+             [ 0.02388889,  0.02055556]])*pq.kHz
+        expected_indecis_tril26 = [ 4.,    4.]
+        expected_indecis_tril4 = [ 1.]
+        self.assertTrue(np.allclose(UE_dic['Js'] ,expected_Js))
+        self.assertTrue(np.allclose(UE_dic['n_emp'] ,expected_n_emp))
+        self.assertTrue(np.allclose(UE_dic['n_exp'] ,expected_n_exp))
+        self.assertTrue(np.allclose(
+            UE_dic['rate_avg'].rescale('Hz').magnitude ,
+            expected_rate.rescale('Hz').magnitude))
+        self.assertTrue(np.allclose(
+            UE_dic['indices']['trial26'],expected_indecis_tril26))
+        self.assertTrue(np.allclose(
+            UE_dic['indices']['trial4'],expected_indecis_tril4))
+def suite():
+    suite = unittest.makeSuite(UETestCase, 'test')
+    return suite
+
+if __name__ == "__main__":
+    runner = unittest.TextTestRunner(verbosity=2)
+    runner.run(suite())
+

+ 805 - 0
code/elephant/elephant/unitary_event_analysis.py

@@ -0,0 +1,805 @@
+# -*- coding: utf-8 -*-
+"""
+Unitary Event (UE) analysis is a statistical method that
+ enables to analyze in a time resolved manner excess spike correlation
+ between simultaneously recorded neurons by comparing the empirical
+ spike coincidences (precision of a few ms) to the expected number
+ based on the firing rates of the neurons.
+
+References:
+  - Gruen, Diesmann, Grammont, Riehle, Aertsen (1999) J Neurosci Methods,
+    94(1): 67-79.
+  - Gruen, Diesmann, Aertsen (2002a,b) Neural Comput, 14(1): 43-80; 81-19.
+  - Gruen S, Riehle A, and Diesmann M (2003) Effect of cross-trial
+    nonstationarity on joint-spike events Biological Cybernetics 88(5):335-351.
+  - Gruen S (2009) Data-driven significance estimation of precise spike
+    correlation. J Neurophysiology 101:1126-1140 (invited review)
+
+:copyright: Copyright 2015-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+
+import numpy as np
+import quantities as pq
+import neo
+import warnings
+import elephant.conversion as conv
+import scipy
+
+
+def hash_from_pattern(m, N, base=2):
+    """
+    Calculate for a spike pattern or a matrix of spike patterns
+    (provide each pattern as a column) composed of N neurons a
+    unique number.
+
+
+    Parameters:
+    -----------
+    m: 2-dim ndarray
+           spike patterns represented as a binary matrix (i.e., matrix of 0's and 1's). 
+           Rows and columns correspond to patterns and neurons, respectively.
+    N: integer
+           number of neurons is required to be equal to the number
+           of rows
+    base: integer
+           base for calculation of hash values from binary
+           sequences (= pattern).
+           Default is 2
+
+    Returns:
+    --------
+    list of integers:
+           An array containing the hash values of each pattern,
+           shape: (number of patterns)
+
+    Raises:
+    -------
+       ValueError: if matrix m has wrong orientation
+
+    Examples:
+    ---------
+    descriptive example:
+    m = [0
+         1
+         1]
+    N = 3
+    base = 2
+    hash = 0*2^2 + 1*2^1 + 1*2^0 = 3
+
+    second example:
+    >>> import numpy as np
+    >>> m = np.array([[0, 1, 0, 0, 1, 1, 0, 1],
+                         [0, 0, 1, 0, 1, 0, 1, 1],
+                         [0, 0, 0, 1, 0, 1, 1, 1]])
+
+    >>> hash_from_pattern(m,N=3)
+        array([0, 4, 2, 1, 6, 5, 3, 7])
+    """
+    # check the consistency between shape of m and number neurons N
+    if N != np.shape(m)[0]:
+        raise ValueError('patterns in the matrix should be column entries')
+
+    # check the entries of the matrix
+    if not np.all((np.array(m) == 0) + (np.array(m) == 1)):
+        raise ValueError('patterns should be zero or one')
+
+    # generate the representation
+    v = np.array([base**x for x in range(N)])
+    # reverse the order
+    v = v[np.argsort(-v)]
+    # calculate the binary number by use of scalar product
+    return np.dot(v, m)
+
+
+def inverse_hash_from_pattern(h, N, base=2):
+    """
+    Calculate the 0-1 spike patterns (matrix) from hash values
+
+    Parameters:
+    -----------
+    h: list of integers
+           list or array of hash values, length: number of patterns
+    N: integer
+           number of neurons
+    base: integer
+           base for calculation of the number from binary
+           sequences (= pattern).
+           Default is 2
+
+    Raises:
+    -------
+       ValueError: if the hash is not compatible with the number
+       of neurons hash value should not be larger than the biggest
+       possible hash number with given number of neurons
+       (e.g. for N = 2, max(hash) = 2^1 + 2^0 = 3
+            , or for N = 4, max(hash) = 2^3 + 2^2 + 2^1 + 2^0 = 15)
+
+    Returns:
+    --------
+       numpy.array:
+           A matrix of shape: (N, number of patterns)
+
+    Examples
+    ---------
+    >>> import numpy as np
+    >>> h = np.array([3,7])
+    >>> N = 4
+    >>> inverse_hash_from_pattern(h,N)
+        array([[1, 1],
+               [1, 1],
+               [0, 1],
+               [0, 0]])
+    """
+
+    # check if the hash values are not greater than the greatest possible
+    # value for N neurons with the given base
+    if np.any(h > np.sum([base**x for x in range(N)])):
+        raise ValueError(
+            "hash value is not compatible with the number of neurons N")
+    # check if the hash values are integer
+    if not np.all(np.int64(h) == h):
+        raise ValueError("hash values are not integers")
+
+    m = np.zeros((N, len(h)), dtype=int)
+    for j, hh in enumerate(h):
+        i = N - 1
+        while i >= 0 and hh != 0:
+            m[i, j] = hh % base
+            hh /= base
+            i -= 1
+    return m
+
+
+def n_emp_mat(mat, N, pattern_hash, base=2):
+    """
+    Count the occurrences of spike coincidence patterns 
+    in the given spike trains.
+
+    Parameters:
+    -----------
+    mat: 2-dim ndarray
+           binned spike trains of N neurons. Rows and columns correspond 
+           to neurons and temporal bins, respectively.
+    N: integer
+           number of neurons
+    pattern_hash: list of integers
+           hash values representing the spike coincidence patterns 
+           of which occurrences are counted.
+    base: integer
+           Base which was used to generate the hash values.
+           Default is 2
+
+    Returns:
+    --------
+    N_emp: list of integers
+           number of occurrences of the given patterns in the given spike trains
+    indices: list of lists of integers
+           indices indexing the bins where the given spike patterns are found 
+           in `mat`. Same length as `pattern_hash`
+           indices[i] = N_emp[i] = pattern_hash[i]
+
+    Raises:
+    -------
+       ValueError: if mat is not zero-one matrix
+
+    Examples:
+    ---------
+    >>> mat = np.array([[1, 0, 0, 1, 1],
+                        [1, 0, 0, 1, 0]])
+    >>> pattern_hash = np.array([1,3])
+    >>> n_emp, n_emp_indices = N_emp_mat(mat, N,pattern_hash)
+    >>> print n_emp
+    [ 0.  2.]
+    >>> print n_emp_indices
+    [array([]), array([0, 3])]
+    """
+    # check if the mat is zero-one matrix
+    if not np.all((np.array(mat) == 0) + (np.array(mat) == 1)):
+        raise ValueError("entries of mat should be either one or zero")
+    h = hash_from_pattern(mat, N, base=base)
+    N_emp = np.zeros(len(pattern_hash))
+    indices = []
+    for idx_ph, ph in enumerate(pattern_hash):
+        indices_tmp = np.where(h == ph)[0]
+        indices.append(indices_tmp)
+        N_emp[idx_ph] = len(indices_tmp)
+    return N_emp, indices
+
+
+def n_emp_mat_sum_trial(mat, N, pattern_hash):
+    """
+    Calculates empirical number of observed patterns summed across trials
+
+    Parameters:
+    -----------
+    mat: 3d numpy array or elephant BinnedSpikeTrain object
+           Binned spike trains represented as a binary matrix (i.e., matrix of 0's and 1's), 
+           segmented into trials. Trials should contain an identical number of neurons and 
+           an identical number of time bins.
+            the entries are zero or one
+            0-axis --> trials
+            1-axis --> neurons
+            2-axis --> time bins
+    N: integer
+            number of neurons
+    pattern_hash: list of integers
+            array of hash values, length: number of patterns
+
+
+    Returns:
+    --------
+    N_emp: list of integers
+           numbers of occurences of the given spike patterns in the given spike trains, 
+           summed across trials. Same length as `pattern_hash`.
+    idx_trials: list of lists of integers
+           list of indices of mat for each trial in which
+           the specific pattern has been observed.
+           0-axis --> trial
+           1-axis --> list of indices for the chosen trial per
+           entry of `pattern_hash`
+
+    Raises:
+    -------
+       ValueError: if matrix mat has wrong orientation
+       ValueError: if mat is not zero-one matrix
+
+    Examples:
+    ---------
+    >>> mat = np.array([[[1, 1, 1, 1, 0],
+                       [0, 1, 1, 1, 0],
+                       [0, 1, 1, 0, 1]],
+
+                       [[1, 1, 1, 1, 1],
+                        [0, 1, 1, 1, 1],
+                        [1, 1, 0, 1, 0]]])
+
+    >>> pattern_hash = np.array([4,6])
+    >>> N = 3
+    >>> n_emp_sum_trial, n_emp_sum_trial_idx =
+                             n_emp_mat_sum_trial(mat, N,pattern_hash)
+    >>> n_emp_sum_trial
+        array([ 1.,  3.])
+    >>> n_emp_sum_trial_idx
+        [[array([0]), array([3])], [array([], dtype=int64), array([2, 4])]]
+    """
+    # check the consistency between shape of m and number neurons N
+    if N != np.shape(mat)[1]:
+        raise ValueError('the entries of mat should be a list of a'
+                         'list where 0-axis is trials and 1-axis is neurons')
+
+    num_patt = len(pattern_hash)
+    N_emp = np.zeros(num_patt)
+
+    idx_trials = []
+    # check if the mat is zero-one matrix
+    if not np.all((np.array(mat) == 0) + (np.array(mat) == 1)):
+        raise ValueError("entries of mat should be either one or zero")
+
+    for mat_tr in mat:
+        N_emp_tmp, indices_tmp = n_emp_mat(mat_tr, N, pattern_hash, base=2)
+        idx_trials.append(indices_tmp)
+        N_emp += N_emp_tmp
+
+    return N_emp, idx_trials
+
+
+def _n_exp_mat_analytic(mat, N, pattern_hash):
+    """
+    Calculates the expected joint probability for each spike pattern analyticaly
+    """
+    marg_prob = np.mean(mat, 1, dtype=float)
+    # marg_prob needs to be a column vector, so we
+    # build a two dimensional array with 1 column
+    # and len(marg_prob) rows
+    marg_prob = np.reshape(marg_prob, (len(marg_prob), 1))
+    m = inverse_hash_from_pattern(pattern_hash, N)
+    nrep = np.shape(m)[1]
+    # multipyling the marginal probability of neurons with regard to the
+    # pattern
+    pmat = np.multiply(m, np.tile(marg_prob, (1, nrep))) +\
+        np.multiply(1 - m, np.tile(1 - marg_prob, (1, nrep)))
+    return np.prod(pmat, axis=0) * float(np.shape(mat)[1])
+
+
+def _n_exp_mat_surrogate(mat, N, pattern_hash, n_surr=1):
+    """
+    Calculates the expected joint probability for each spike pattern with spike
+    time randomization surrogate
+    """
+    if len(pattern_hash) > 1:
+        raise ValueError('surrogate method works only for one pattern!')
+    N_exp_array = np.zeros(n_surr)
+    for rz_idx, rz in enumerate(np.arange(n_surr)):
+        # shuffling all elements of zero-one matrix
+        mat_surr = np.array(mat)
+        [np.random.shuffle(i) for i in mat_surr]
+        N_exp_array[rz_idx] = n_emp_mat(mat_surr, N, pattern_hash)[0][0]
+    return N_exp_array
+
+
+def n_exp_mat(mat, N, pattern_hash, method='analytic', n_surr=1):
+    """
+    Calculates the expected joint probability for each spike pattern
+
+    Parameters:
+    -----------
+    mat: 2d numpy array
+            the entries are zero or one
+            0-axis --> neurons
+            1-axis --> time bins
+    pattern_hash: list of integers
+            array of hash values, length: number of patterns
+    method: string
+            method with which the expectency should be caculated
+            'analytic' -- > analytically
+            'surr' -- > with surrogates (spike time randomization)
+            Default is 'analytic'
+    n_surr: integer
+            number of surrogates for constructing the distribution of expected joint probability.
+            Default is 1 and this number is needed only when method = 'surr'
+
+    kwargs:
+    -------
+
+    Raises:
+    -------
+       ValueError: if matrix m has wrong orientation
+
+    Returns:
+    --------
+    if method is analytic:
+        numpy.array:
+           An array containing the expected joint probability of each pattern,
+           shape: (number of patterns,)
+    if method is surr:
+        numpy.ndarray, 0-axis --> different realizations,
+                       length = number of surrogates
+                       1-axis --> patterns
+
+    Examples:
+    ---------
+    >>> mat = np.array([[1, 1, 1, 1],
+                       [0, 1, 0, 1],
+                       [0, 0, 1, 0]])
+    >>> pattern_hash = np.array([5,6])
+    >>> N = 3
+    >>> n_exp_anal = n_exp_mat(mat,N, pattern_hash, method = 'analytic')
+    >>> n_exp_anal
+        [ 0.5 1.5 ]
+    >>>
+    >>>
+    >>> n_exp_surr = n_exp_mat(
+                  mat, N,pattern_hash, method = 'surr', n_surr = 5000)
+    >>> print n_exp_surr
+    [[ 1.  1.]
+     [ 2.  0.]
+     [ 2.  0.]
+     ...,
+     [ 2.  0.]
+     [ 2.  0.]
+     [ 1.  1.]]
+
+    """
+    # check if the mat is zero-one matrix
+    if np.any(mat > 1) or np.any(mat < 0):
+        raise ValueError("entries of mat should be either one or zero")
+
+    if method == 'analytic':
+        return _n_exp_mat_analytic(mat, N, pattern_hash)
+    if method == 'surr':
+        return _n_exp_mat_surrogate(mat, N, pattern_hash, n_surr)
+
+
+def n_exp_mat_sum_trial(
+        mat, N, pattern_hash, method='analytic_TrialByTrial', **kwargs):
+    """
+    Calculates the expected joint probability
+    for each spike pattern sum over trials
+
+    Parameters:
+    -----------
+    mat: 3d numpy array or elephant BinnedSpikeTrain object
+           Binned spike trains represented as a binary matrix (i.e., matrix of 0's and 1's), 
+           segmented into trials. Trials should contain an identical number of neurons and 
+           an identical number of time bins.
+            the entries are zero or one
+            0-axis --> trials
+            1-axis --> neurons
+            2-axis --> time bins
+    N: integer
+           number of neurons
+    pattern_hash: list of integers
+            array of hash values, length: number of patterns
+    method: string
+            method with which the unitary events whould be computed
+            'analytic_TrialByTrial' -- > calculate the expectency
+            (analytically) on each trial, then sum over all trials.
+            'analytic_TrialAverage' -- > calculate the expectency
+            by averaging over trials.
+            (cf. Gruen et al. 2003)
+            'surrogate_TrialByTrial' -- > calculate the distribution 
+            of expected coincidences by spike time randomzation in 
+            each trial and sum over trials.
+            Default is 'analytic_trialByTrial'
+
+    kwargs:
+    -------
+    n_surr: integer
+            number of surrogate to be used
+            Default is 1
+
+    Returns:
+    --------
+    numpy.array:
+       An array containing the expected joint probability of
+       each pattern summed over trials,shape: (number of patterns,)
+
+    Examples:
+    --------
+    >>> mat = np.array([[[1, 1, 1, 1, 0],
+                       [0, 1, 1, 1, 0],
+                       [0, 1, 1, 0, 1]],
+
+                       [[1, 1, 1, 1, 1],
+                        [0, 1, 1, 1, 1],
+                        [1, 1, 0, 1, 0]]])
+
+    >>> pattern_hash = np.array([5,6])
+    >>> N = 3
+    >>> n_exp_anal = n_exp_mat_sum_trial(mat, N, pattern_hash)
+    >>> print n_exp_anal
+        array([ 1.56,  2.56])
+    """
+    # check the consistency between shape of m and number neurons N
+    if N != np.shape(mat)[1]:
+        raise ValueError('the entries of mat should be a list of a'
+                         'list where 0-axis is trials and 1-axis is neurons')
+
+    if method == 'analytic_TrialByTrial':
+        n_exp = np.zeros(len(pattern_hash))
+        for mat_tr in mat:
+            n_exp += n_exp_mat(mat_tr, N, pattern_hash, method='analytic')
+    elif method == 'analytic_TrialAverage':
+        n_exp = n_exp_mat(
+            np.mean(mat, 0), N, pattern_hash, method='analytic') * np.shape(mat)[0]
+    elif method == 'surrogate_TrialByTrial':
+        if 'n_surr' in kwargs:
+            n_surr = kwargs['n_surr']
+        else:
+            n_surr = 1.
+        n_exp = np.zeros(n_surr)
+        for mat_tr in mat:
+            n_exp += n_exp_mat(mat_tr, N, pattern_hash,
+                               method='surr', n_surr=n_surr)
+    else:
+        raise ValueError(
+            "The method only works on the zero_one matrix at the moment")
+    return n_exp
+
+
+def gen_pval_anal(
+        mat, N, pattern_hash, method='analytic_TrialByTrial', **kwargs):
+    """
+    computes the expected coincidences and a function to calculate
+    p-value for given empirical coincidences
+
+    this function generate a poisson distribution with the expected
+    value calculated by mat. it returns a function which gets
+    the empirical coincidences, `n_emp`,  and calculates a p-value
+    as the area under the poisson distribution from `n_emp` to infinity
+
+    Parameters:
+    -----------
+    mat: 3d numpy array or elephant BinnedSpikeTrain object
+           Binned spike trains represented as a binary matrix (i.e., matrix of 0's and 1's), 
+           segmented into trials. Trials should contain an identical number of neurons and 
+           an identical number of time bins.
+            the entries are zero or one
+            0-axis --> trials
+            1-axis --> neurons
+            2-axis --> time bins
+    N: integer
+           number of neurons
+    pattern_hash: list of integers
+            array of hash values, length: number of patterns
+    method: string
+            method with which the unitary events whould be computed
+            'analytic_TrialByTrial' -- > calculate the expectency
+            (analytically) on each trial, then sum over all trials.
+            ''analytic_TrialAverage' -- > calculate the expectency
+            by averaging over trials.
+            Default is 'analytic_trialByTrial'
+            (cf. Gruen et al. 2003)
+
+    kwargs:
+    -------
+    n_surr: integer
+            number of surrogate to be used
+            Default is 1
+
+
+    Returns:
+    --------
+    pval_anal:
+            a function which calculates the p-value for
+            the given empirical coincidences
+    n_exp: list of floats
+            expected coincidences
+
+    Examples:
+    --------
+    >>> mat = np.array([[[1, 1, 1, 1, 0],
+                       [0, 1, 1, 1, 0],
+                       [0, 1, 1, 0, 1]],
+
+                       [[1, 1, 1, 1, 1],
+                        [0, 1, 1, 1, 1],
+                        [1, 1, 0, 1, 0]]])
+
+    >>> pattern_hash = np.array([5,6])
+    >>> N = 3
+    >>> pval_anal,n_exp = gen_pval_anal(mat, N,pattern_hash)
+    >>> n_exp
+        array([ 1.56,  2.56])
+    """
+    if method == 'analytic_TrialByTrial' or method == 'analytic_TrialAverage':
+        n_exp = n_exp_mat_sum_trial(mat, N, pattern_hash, method=method)
+
+        def pval(n_emp):
+            p = 1. - scipy.special.gammaincc(n_emp, n_exp)
+            return p
+    elif method == 'surrogate_TrialByTrial':
+        if 'n_surr' in kwargs:
+            n_surr = kwargs['n_surr']
+        else:
+            n_surr = 1.
+        n_exp = n_exp_mat_sum_trial(
+            mat, N, pattern_hash, method=method, n_surr=n_surr)
+
+        def pval(n_emp):
+            hist = np.bincount(np.int64(n_exp))
+            exp_dist = hist / float(np.sum(hist))
+            if len(n_emp) > 1:
+                raise ValueError(
+                    'in surrogate method the p_value can be calculated only for one pattern!')
+            return np.sum(exp_dist[n_emp[0]:])
+
+    return pval, n_exp
+
+
+def jointJ(p_val):
+    """Surprise measurement
+
+    logarithmic transformation of joint-p-value into surprise measure
+    for better visualization as the highly significant events are
+    indicated by very low joint-p-values
+
+    Parameters:
+    -----------
+    p_val: list of floats
+        p-values of statistical tests for different pattern.
+
+    Returns:
+    --------
+    J: list of floats
+        list of surprise measure
+
+    Examples:
+    ---------
+    >>> p_val = np.array([0.31271072,  0.01175031])
+    >>> jointJ(p_val)
+        array([0.3419968 ,  1.92481736])
+    """
+    p_arr = np.array(p_val)
+
+    try:
+        Js = np.log10(1 - p_arr) - np.log10(p_arr)
+    except RuntimeWarning:
+        pass
+    return Js
+
+
+def _rate_mat_avg_trial(mat):
+    """
+    calculates the average firing rate of each neurons across trials
+    """
+    num_tr, N, nbins = np.shape(mat)
+    psth = np.zeros(N)
+    for tr, mat_tr in enumerate(mat):
+        psth += np.sum(mat_tr, axis=1)
+    return psth / float(nbins) / float(num_tr)
+
+
+def _bintime(t, binsize):
+    """
+    change the real time to bintime
+    """
+    t_dl = t.rescale('ms').magnitude
+    binsize_dl = binsize.rescale('ms').magnitude
+    return np.floor(np.array(t_dl) / binsize_dl).astype(int)
+
+
+def _winpos(t_start, t_stop, winsize, winstep, position='left-edge'):
+    """
+    Calculates the position of the analysis window
+    """
+    t_start_dl = t_start.rescale('ms').magnitude
+    t_stop_dl = t_stop.rescale('ms').magnitude
+    winsize_dl = winsize.rescale('ms').magnitude
+    winstep_dl = winstep.rescale('ms').magnitude
+
+    # left side of the window time
+    if position == 'left-edge':
+        ts_winpos = np.arange(
+            t_start_dl, t_stop_dl - winsize_dl + winstep_dl, winstep_dl) * pq.ms
+    else:
+        raise ValueError(
+            'the current version only returns left-edge of the window')
+    return ts_winpos
+
+
+def _UE(mat, N, pattern_hash, method='analytic_TrialByTrial', **kwargs):
+    """
+    returns the default results of unitary events analysis
+    (Surprise, empirical coincidences and index of where it happened
+    in the given mat, n_exp and average rate of neurons)
+    """
+    rate_avg = _rate_mat_avg_trial(mat)
+    n_emp, indices = n_emp_mat_sum_trial(mat, N, pattern_hash)
+    if method == 'surrogate_TrialByTrial':
+        if 'n_surr' in kwargs:
+            n_surr = kwargs['n_surr']
+        else:
+            n_surr = 1
+        dist_exp, n_exp = gen_pval_anal(
+            mat, N, pattern_hash, method, n_surr=n_surr)
+        n_exp = np.mean(n_exp)
+    elif method == 'analytic_TrialByTrial' or method == 'analytic_TrialAverage':
+        dist_exp, n_exp = gen_pval_anal(mat, N, pattern_hash, method)
+    pval = dist_exp(n_emp)
+    Js = jointJ(pval)
+    return Js, rate_avg, n_exp, n_emp, indices
+
+
+def jointJ_window_analysis(
+        data, binsize, winsize, winstep, pattern_hash,
+        method='analytic_TrialByTrial', t_start=None,
+        t_stop=None, binary=True, **kwargs):
+    """
+    Calculates the joint surprise in a sliding window fashion
+
+    Parameters:
+    ----------
+    data: list of neo.SpikeTrain objects
+          list of spike trains in different trials
+                                        0-axis --> Trials
+                                        1-axis --> Neurons
+                                        2-axis --> Spike times
+    binsize: Quantity scalar with dimension time
+           size of bins for descritizing spike trains
+    winsize: Quantity scalar with dimension time
+           size of the window of analysis
+    winstep: Quantity scalar with dimension time
+           size of the window step
+    pattern_hash: list of integers
+           list of interested patterns in hash values
+           (see hash_from_pattern and inverse_hash_from_pattern functions)
+    method: string
+            method with which the unitary events whould be computed
+            'analytic_TrialByTrial' -- > calculate the expectency
+            (analytically) on each trial, then sum over all trials.
+            'analytic_TrialAverage' -- > calculate the expectency
+            by averaging over trials.
+            (cf. Gruen et al. 2003)
+            'surrogate_TrialByTrial' -- > calculate the distribution 
+            of expected coincidences by spike time randomzation in 
+            each trial and sum over trials.
+            Default is 'analytic_trialByTrial'
+    t_start: float or Quantity scalar, optional
+             The start time to use for the time points.
+             If not specified, retrieved from the `t_start`
+             attribute of `spiketrain`.
+    t_stop: float or Quantity scalar, optional
+            The start time to use for the time points.
+            If not specified, retrieved from the `t_stop`
+            attribute of `spiketrain`.
+
+    kwargs:
+    -------
+    n_surr: integer
+            number of surrogate to be used
+            Default is 100
+
+
+    Returns:
+    -------
+    result: dictionary
+          Js: list of float
+                 JointSurprise of different given patterns within each window
+                 shape: different pattern hash --> 0-axis
+                        different window --> 1-axis
+          indices: list of list of integers
+                 list of indices of pattern within each window
+                 shape: different pattern hash --> 0-axis
+                        different window --> 1-axis
+          n_emp: list of integers
+                 empirical number of each observed pattern.
+                 shape: different pattern hash --> 0-axis
+                        different window --> 1-axis
+          n_exp: list of floats
+                 expeced number of each pattern.
+                 shape: different pattern hash --> 0-axis
+                        different window --> 1-axis
+          rate_avg: list of floats
+                 average firing rate of each neuron
+                 shape: different pattern hash --> 0-axis
+                        different window --> 1-axis
+
+    """
+    if not isinstance(data[0][0], neo.SpikeTrain):
+        raise ValueError(
+            "structure of the data is not correct: 0-axis should be trials, 1-axis units and 2-axis neo spike trains")
+
+    if t_start is None:
+        t_start = data[0][0].t_start.rescale('ms')
+    if t_stop is None:
+        t_stop = data[0][0].t_stop.rescale('ms')
+
+    # position of all windows (left edges)
+    t_winpos = _winpos(t_start, t_stop, winsize, winstep, position='left-edge')
+    t_winpos_bintime = _bintime(t_winpos, binsize)
+
+    winsize_bintime = _bintime(winsize, binsize)
+    winstep_bintime = _bintime(winstep, binsize)
+
+    if winsize_bintime * binsize != winsize:
+        warnings.warn(
+            "ratio between winsize and binsize is not integer -- "
+            "the actual number for window size is " + str(winsize_bintime * binsize))
+
+    if winstep_bintime * binsize != winstep:
+        warnings.warn(
+            "ratio between winsize and binsize is not integer -- "
+            "the actual number for window size is" + str(winstep_bintime * binsize))
+
+    num_tr, N = np.shape(data)[:2]
+
+    n_bins = int((t_stop - t_start) / binsize)
+
+    mat_tr_unit_spt = np.zeros((len(data), N, n_bins))
+    for tr, sts in enumerate(data):
+        bs = conv.BinnedSpikeTrain(
+            sts, t_start=t_start, t_stop=t_stop, binsize=binsize)
+        if binary is True:
+            mat = bs.to_bool_array()
+        else:
+            raise ValueError(
+                "The method only works on the zero_one matrix at the moment")
+        mat_tr_unit_spt[tr] = mat
+
+    num_win = len(t_winpos)
+    Js_win, n_exp_win, n_emp_win = (np.zeros(num_win) for _ in range(3))
+    rate_avg = np.zeros((num_win, N))
+    indices_win = {}
+    for i in range(num_tr):
+        indices_win['trial' + str(i)] = []
+
+    for i, win_pos in enumerate(t_winpos_bintime):
+        mat_win = mat_tr_unit_spt[:, :, win_pos:win_pos + winsize_bintime]
+        if method == 'surrogate_TrialByTrial':
+            if 'n_surr' in kwargs:
+                n_surr = kwargs['n_surr']
+            else:
+                n_surr = 100
+            Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[i], indices_lst = _UE(
+                mat_win, N, pattern_hash, method, n_surr=n_surr)
+        else:
+            Js_win[i], rate_avg[i], n_exp_win[i], n_emp_win[
+                i], indices_lst = _UE(mat_win, N, pattern_hash, method)
+        for j in range(num_tr):
+            if len(indices_lst[j][0]) > 0:
+                indices_win[
+                    'trial' + str(j)] = np.append(indices_win['trial' + str(j)], indices_lst[j][0] + win_pos)
+    return {'Js': Js_win, 'indices': indices_win, 'n_emp': n_emp_win, 'n_exp': n_exp_win, 'rate_avg': rate_avg / binsize}

+ 2 - 0
code/elephant/readthedocs.yml

@@ -0,0 +1,2 @@
+conda:
+  file: doc/environment.yml

+ 14 - 0
code/elephant/requirements.txt

@@ -0,0 +1,14 @@
+# essential
+-e git+https://github.com/NeuralEnsemble/python-neo.git@snapshot-20150821#egg=neo-snapshot-20150821
+numpy>=1.8.2
+quantities>=0.10.1
+scipy>=0.14.0
+six
+# optional
+#pandas>=0.14.1
+# for building documentation
+#numpydoc>=0.5
+#sklearn>=0.15.1
+#sphinx>=1.2.2
+# for running tests
+nose>=1.3.3

+ 44 - 0
code/elephant/setup.py

@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+from setuptools import setup
+import os
+
+long_description = open("README.rst").read()
+install_requires = ['neo>0.3.3',
+                    'numpy>=1.8.2',
+                    'quantities>=0.10.1',
+                    'scipy>=0.14.0',
+                    'six>=1.10.0']
+extras_require = {'pandas': ['pandas>=0.14.1'],
+                  'docs': ['numpydoc>=0.5',
+                           'sphinx>=1.2.2'],
+                  'tests': ['nose>=1.3.3']}
+
+setup(
+    name="elephant",
+    version='0.4.1',
+    packages=['elephant', 'elephant.test'],
+    package_data = {'elephant' : [os.path.join('current_source_density_src', 'test_data.mat'),
+                                  os.path.join('current_source_density_src', 'LICENSE'),
+                                  os.path.join('current_source_density_src', 'README.md'),
+                                  os.path.join('current_source_density_src', '*.py')]},
+    
+    install_requires=install_requires,
+    extras_require=extras_require,
+
+    author="Elephant authors and contributors",
+    author_email="andrew.davison@unic.cnrs-gif.fr",
+    description="Elephant is a package for analysis of electrophysiology data in Python",
+    long_description=long_description,
+    license="BSD",
+    url='http://neuralensemble.org/elephant',
+    classifiers=[
+        'Development Status :: 4 - Beta',
+        'Intended Audience :: Science/Research',
+        'License :: OSI Approved :: BSD License',
+        'Natural Language :: English',
+        'Operating System :: OS Independent',
+        'Programming Language :: Python :: 2',
+        'Programming Language :: Python :: 3',
+        'Topic :: Scientific/Engineering']
+)

+ 38 - 0
code/example.m

@@ -0,0 +1,38 @@
+% Minimal example for MATLAB users
+
+clear all
+
+% Load data from the specially prepared matlab files
+% PLEASE NOTE: These are not the original data files, and are provided for
+% convenience only. These files contain the data structures returned by the
+% Python loading routines, converted to a matlab format. Thus, these files
+% contain a condensed, interpreted subset of the original data.
+% Note that due to file size restrictions in the MATLAB file format, raw 
+% signals are saved in separate files, channel by channel.
+data = load("../datasets_matlab/i140703-001_lfp-spikes.mat");
+
+% Plot the LFP and spikes of electrode 62 from 3s to 5s
+% Note: All annotations of the original Python data objects are saved as
+% "an_XXX" in the matlab structs
+for i=1:length(data.block.segments{1}.analogsignals)
+    if data.block.segments{1}.analogsignals{i}.an_channel_id==62
+        % LFP
+        % Sampled at 1KHz
+        time_axis = [0:1/data.block.segments{1}.analogsignals{i}.sampling_rate:(length(data.block.segments{1}.analogsignals{i}.signal)-1)/data.block.segments{1}.analogsignals{i}.sampling_rate];
+        plot(time_axis(3000:5000), data.block.segments{1}.analogsignals{i}.signal(3000:5000), 'k-');
+        hold on
+        
+        % Spikes
+        for t=1:length(data.block.segments{1}.spiketrains{i}.times)
+            % Spikes are saved in the native resolution of 30kHz
+            st = data.block.segments{1}.spiketrains{i}.times(t) / 30000;
+            if st>=3 && st<=5
+                plot(st,0,'rx');
+            end
+        end
+        xlabel("time [1/" + data.block.segments{1}.analogsignals{i}.sampling_rate_units + "]");
+        ylabel("LFP [" + data.block.segments{1}.analogsignals{i}.signal_units + "]");
+        title(data.block.segments{1}.analogsignals{i}.description);
+    end
+end
+

+ 253 - 0
code/example.py

@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+"""
+Example code for loading and processing of a recording of the reach-
+to-grasp experiments conducted at the Institute de Neurosciences de la Timone
+by Thomas Brochier and Alexa Riehle.
+
+Authors: Julia Sprenger, Lyuba Zehl, Michael Denker
+
+
+Copyright (c) 2017, Institute of Neuroscience and Medicine (INM-6),
+Forschungszentrum Juelich, Germany
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+* Neither the names of the copyright holders nor the names of the contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+# This loads the Neo and odML libraries shipped with this code. For production
+# use, please use the newest releases of odML and Neo.
+import load_local_neo_odml_elephant
+
+import os
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+import quantities as pq
+
+from neo import Block, Segment
+from elephant.signal_processing import butter
+
+from reachgraspio import reachgraspio
+from neo_utils import add_epoch, cut_segment_by_epoch, get_events
+
+
+# =============================================================================
+# Load data
+#
+# As a first step, we partially load the data file into memory as a Neo object.
+# =============================================================================
+
+# Specify the path to the recording session to load, eg,
+# '/home/user/l101210-001'
+session_name = os.path.join('..', 'datasets', 'i140703-001')
+# session_name = os.path.join('..', 'datasets', 'l101210-001')
+odml_dir = os.path.join('..', 'datasets')
+
+# Open the session for reading
+session = reachgraspio.ReachGraspIO(session_name, odml_directory=odml_dir)
+
+# Read the first 300s of data (time series at 1000Hz (ns2) and 30kHz (ns6)
+# scaled to units of voltage, sorted spike trains, spike waveforms and events)
+# from electrode 62 of the recording session and return it as a Neo Block. The
+# time shift of the ns2 signal (LFP) induced by the online filter is
+# automatically corrected for by a heuristic factor stored in the metadata
+# (correct_filter_shifts=True).
+data_block = session.read_block(
+    nsx_to_load='all',
+    n_starts=None, n_stops=300 * pq.s,
+    channels=[62], units='all',
+    load_events=True, load_waveforms=True, scaling='voltage',
+    correct_filter_shifts=True)
+
+# Access the single Segment of the data block, reaching up to 300s.
+assert len(data_block.segments) == 1
+data_segment = data_block.segments[0]
+
+
+# =============================================================================
+# Create offline filtered LFP
+#
+# Here, we construct one offline filtered LFP from each ns5 (monkey L) or ns6
+# (monkey N) raw recording trace. For monkey N, this filtered LFP can be
+# compared to the LFPs in the ns2 file (note that monkey L contains only
+# behavioral signals in the ns2 file). Also, we assign telling names to each
+# Neo AnalogSignal, which is used for plotting later on in this script.
+# =============================================================================
+
+filtered_anasig = []
+# Loop through all AnalogSignal objects in the loaded data
+for anasig in data_block.segments[0].analogsignals:
+    if anasig.annotations['nsx'] == 2:
+        # AnalogSignal is LFP from ns2
+        anasig.name = 'LFP (online filter, ns%i)' % anasig.annotations['nsx']
+    elif anasig.annotations['nsx'] in [5, 6]:
+        # AnalogSignal is raw signal from ns5 or ns6
+        anasig.name = 'raw (ns%i)' % anasig.annotations['nsx']
+
+        # Use the Elephant library to filter the analog signal
+        f_anasig = butter(
+                anasig,
+                highpass_freq=None,
+                lowpass_freq=250 * pq.Hz,
+                order=4)
+        f_anasig.name = 'LFP (offline filtered ns%i)' % \
+            anasig.annotations['nsx']
+        filtered_anasig.append(f_anasig)
+# Attach all offline filtered LFPs to the segment of data
+data_block.segments[0].analogsignals.extend(filtered_anasig)
+
+
+# =============================================================================
+# Construct analysis epochs
+#
+# In this step we extract and cut the data into time segments (termed analysis
+# epochs) that we wish to analyze. We contrast these analysis epochs to the
+# behavioral trials that are defined by the experiment as occurrence of a Trial
+# Start (TS-ON) event in the experiment. Concretely, here our analysis epochs
+# are constructed as a cutout of 25ms of data around the TS-ON event of all
+# successful behavioral trials.
+# =============================================================================
+
+# Get Trial Start (TS-ON) events of all successful behavioral trials
+# (corresponds to performance code 255, which is accessed for convenience and
+# better legibility in the dictionary attribute performance_codes of the
+# ReachGraspIO class).
+#
+# To this end, we filter all event objects of the loaded data to match the name
+# "TrialEvents", which is the Event object containing all Events available (see
+# documentation of ReachGraspIO). From this Event object we extract only events
+# matching "TS-ON" and the desired trial performance code (which are
+# annotations of the Event object).
+start_events = get_events(
+    data_segment,
+    properties={
+        'name': 'TrialEvents',
+        'trial_event_labels': 'TS-ON',
+        'performance_in_trial': session.performance_codes['correct_trial']})
+
+# Extract single Neo Event object containing all TS-ON triggers
+assert len(start_events) == 1
+start_event = start_events[0]
+
+# Construct analysis epochs from 10ms before the TS-ON of a successful
+# behavioral trial to 15ms after TS-ON. The name "analysis_epochs" is given to
+# the resulting Neo Epoch object. The object is not attached to the Neo
+# Segment. The parameter event2 of add_epoch() is left empty, since we are
+# cutting around a single event, as opposed to cutting between two events.
+pre = -10 * pq.ms
+post = 15 * pq.ms
+epoch = add_epoch(
+    data_segment,
+    event1=start_event, event2=None,
+    pre=pre, post=post,
+    attach_result=False,
+    name='analysis_epochs')
+
+# Create new segments of data cut according to the analysis epochs of the
+# 'analysis_epochs' Neo Epoch object. The time axes of all segments are aligned
+# such that each segment starts at time 0 (parameter reset_times); annotations
+# describing the analysis epoch are carried over to the segments. A new Neo
+# Block named "data_cut_to_analysis_epochs" is created to capture all cut
+# analysis epochs.
+cut_trial_block = Block(name="data_cut_to_analysis_epochs")
+cut_trial_block.segments = cut_segment_by_epoch(
+    data_segment, epoch, reset_time=True)
+
+# =============================================================================
+# Plot data
+# =============================================================================
+
+# Determine the first existing trial ID i from the Event object containing all
+# start events. Then, by calling the filter() function of the Neo Block
+# "data_cut_to_analysis_epochs" containing the data cut into the analysis
+# epochs, we ask to return all Segments annotated by the behavioral trial ID i.
+# In this case this call should return one matching analysis epoch around TS-ON
+# belonging to behavioral trial ID i. For monkey N, this is trial ID 1, for
+# monkey L this is trial ID 2 since trial ID 1 is not a correct trial.
+trial_id = int(np.min(start_event.annotations['trial_id']))
+trial_segments = cut_trial_block.filter(
+    targdict={"trial_id": trial_id}, objects=Segment)
+assert len(trial_segments) == 1
+trial_segment = trial_segments[0]
+
+# Create figure
+fig = plt.figure(facecolor='w')
+time_unit = pq.CompoundUnit('1./30000*s')
+amplitude_unit = pq.microvolt
+nsx_colors = ['b', 'k', 'r']
+
+# Loop through all analog signals and plot the signal in a color corresponding
+# to its sampling frequency (i.e., originating from the ns2/ns5 or ns2/ns6).
+for i, anasig in enumerate(trial_segment.analogsignals):
+        plt.plot(
+            anasig.times.rescale(time_unit),
+            anasig.rescale(amplitude_unit),
+            label=anasig.name,
+            color=nsx_colors[i])
+
+# Loop through all spike trains and plot the spike time, and overlapping the
+# wave form of the spike used for spike sorting stored separately in the nev
+# file.
+for st in trial_segment.spiketrains:
+    color = np.random.rand(3,)
+    for spike_id, spike in enumerate(st):
+        # Plot spike times
+        plt.axvline(
+            spike.rescale(time_unit).magnitude,
+            color=color,
+            label='Unit ID %i' % st.annotations['unit_id'])
+        # Plot waveforms
+        waveform = st.waveforms[spike_id, 0, :]
+        waveform_times = np.arange(len(waveform))*time_unit + spike
+        plt.plot(
+            waveform_times.rescale(time_unit).magnitude,
+            waveform.rescale(amplitude_unit),
+            '--',
+            linewidth=2,
+            color=color,
+            zorder=0)
+
+# Loop through all events
+for event in trial_segment.events:
+    if event.name == 'TrialEvents':
+        for ev_id, ev in enumerate(event):
+                plt.axvline(
+                    ev,
+                    alpha=0.2,
+                    linewidth=3,
+                    linestyle='dashed',
+                    label='event ' + event.annotations[
+                        'trial_event_labels'][ev_id])
+
+# Finishing touches on the plot
+plt.autoscale(enable=True, axis='x', tight=True)
+plt.xlabel(time_unit.name)
+plt.ylabel(amplitude_unit.name)
+plt.legend(loc=4, fontsize=10)
+
+# Save plot
+fname = 'example_plot'
+for file_format in ['eps', 'png', 'pdf']:
+    fig.savefig(fname + '.%s' % file_format, dpi=400, format=file_format)

+ 10 - 0
code/load_local_neo_odml_elephant.py

@@ -0,0 +1,10 @@
+import sys
+
+# This loads the Neo and odML libraries shipped with this code. For production
+# use, please use the newest releases of odML and Neo.
+sys.path.insert(0, 'python-neo')
+sys.path.insert(0, 'python-odml')
+
+# This loads the Elephant analysis libraries shipped with this code. It
+# is used to generate the offline filtered LFP in example.py.
+sys.path.insert(0, 'elephant')

+ 877 - 0
code/neo_utils.py

@@ -0,0 +1,877 @@
+'''
+Convenience functions to extend the functionality of the Neo framework
+version 0.5.
+
+Authors: Julia Sprenger, Lyuba Zehl, Michael Denker
+
+
+Copyright (c) 2017, Institute of Neuroscience and Medicine (INM-6),
+Forschungszentrum Juelich, Germany
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+* Neither the names of the copyright holders nor the names of the contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+'''
+
+import copy
+import warnings
+import inspect
+
+import numpy as np
+import quantities as pq
+
+import neo
+
+
+def get_events(container, properties=None):
+    """
+    This function returns a list of Neo Event objects, corresponding to given
+    key-value pairs in the attributes or annotations of the Event.
+
+    Parameter:
+    ---------
+    container: neo.Block or neo.Segment
+        The Neo Block or Segment object to extract data from.
+    properties: dictionary
+        A dictionary that contains the Event keys and values to filter for.
+        Each key of the dictionary is matched to a attribute or an an
+        annotation of Event. The value of each dictionary entry corresponds to
+        a valid entry or a list of valid entries of the attribute or
+        annotation.
+
+        If the value belonging to the key is a list of entries of the same
+        length as the number of events in the Event object, the list entries
+        are matched to the events in the Event object. The resulting Event
+        object contains only those events where the values match up.
+
+        Otherwise, the value is compared to the attributes or annotation of the
+        Event object as such, and depending on the comparison, either the
+        complete Event object is returned or not.
+
+        If None or an empty dictionary is passed, all Event Objects will be
+        returned in a list.
+
+    Returns:
+    --------
+    events: list
+        A list of Event objects matching the given criteria.
+
+    Example:
+    --------
+        >>> event = neo.Event(
+                times = [0.5, 10.0, 25.2] * pq.s)
+        >>> event.annotate(
+                event_type = 'trial start',
+                trial_id = [1, 2, 3]
+        >>> seg = neo.Segment()
+        >>> seg.events = [event]
+
+        # Will return a list with the complete event object
+        >>> get_events(event, properties={event_type='trial start')
+
+        # Will return an empty list
+        >>> get_events(event, properties={event_type='trial stop'})
+
+        # Will return a list with an Event object, but only with trial 2
+        >>> get_events(event, properties={'trial_id' = 2})
+
+        # Will return a list with an Event object, but only with trials 1 and 2
+        >>> get_events(event, properties={'trial_id' = [1, 2]})
+    """
+    if isinstance(container, neo.Segment):
+        return _get_from_list(container.events, prop=properties)
+
+    elif isinstance(container, neo.Block):
+        event_lst = []
+        for seg in container.segments:
+            event_lst += _get_from_list(seg.events, prop=properties)
+        return event_lst
+    else:
+        raise TypeError(
+            'Container needs to be of type neo.Block or neo.Segment, not %s '
+            'in order to extract Events.' % (type(container)))
+
+
+def get_epochs(container, properties=None):
+    """
+    This function returns a list of Neo Epoch objects, corresponding to given
+    key-value pairs in the attributes or annotations of the Epoch.
+
+    Parameters:
+    -----------
+    container: neo.Block or neo.Segment
+        The Neo Block or Segment object to extract data from.
+    properties: dictionary
+        A dictionary that contains the Epoch keys and values to filter for.
+        Each key of the dictionary is matched to an attribute or an an
+        annotation of the Event. The value of each dictionary entry corresponds
+        to a valid entry or a list of valid entries of the attribute or
+        annotation.
+
+        If the value belonging to the key is a list of entries of the same
+        length as the number of epochs in the Epoch object, the list entries
+        are matched to the epochs in the Epoch object. The resulting Epoch
+        object contains only those epochs where the values match up.
+
+        Otherwise, the value is compared to the attribute or annotation of the
+        Epoch object as such, and depending on the comparison, either the
+        complete Epoch object is returned or not.
+
+        If None or an empty dictionary is passed, all Epoch Objects will
+        be returned in a list.
+
+    Returns:
+    --------
+    epochs: list
+        A list of Epoch objects matching the given criteria.
+
+    Example:
+    --------
+        >>> epoch = neo.Epoch(
+                times = [0.5, 10.0, 25.2] * pq.s,
+                durations = [100, 100, 100] * pq.ms)
+        >>> epoch.annotate(
+                event_type = 'complete trial',
+                trial_id = [1, 2, 3]
+        >>> seg = neo.Segment()
+        >>> seg.epochs = [epoch]
+
+        # Will return a list with the complete event object
+        >>> get_epochs(epoch, prop={epoch_type='complete trial')
+
+        # Will return an empty list
+        >>> get_epochs(epoch, prop={epoch_type='error trial'})
+
+        # Will return a list with an Event object, but only with trial 2
+        >>> get_epochs(epoch, prop={'trial_id' = 2})
+
+        # Will return a list with an Event object, but only with trials 1 and 2
+        >>> get_epochs(epoch, prop={'trial_id' = [1, 2]})
+    """
+    if isinstance(container, neo.Segment):
+        return _get_from_list(container.epochs, prop=properties)
+
+    elif isinstance(container, neo.Block):
+        epoch_list = []
+        for seg in container.segments:
+            epoch_list += _get_from_list(seg.epochs, prop=properties)
+        return epoch_list
+    else:
+        raise TypeError(
+            'Container needs to be of type neo.Block or neo.Segment, not %s '
+            'in order to extract Epochs.' % (type(container)))
+
+
+def add_epoch(
+        segment, event1, event2=None, pre=0 * pq.s, post=0 * pq.s,
+        attach_result=True, **kwargs):
+    """
+    Create epochs around a single event, or between pairs of events. Starting
+    and end time of the epoch can be modified using pre and post as offsets
+    before the and after the event(s). Additional keywords will be directly
+    forwarded to the epoch intialization.
+
+    Parameters:
+    -----------
+    sgement : neo.Segment
+        The segement in which the final Epoch object is added.
+    event1 : neo.Event
+        The Neo Event objects containing the start events of the epochs. If no
+        event2 is specified, these event1 also specifies the stop events, i.e.,
+        the epoch is cut around event1 times.
+    event2: neo.Event
+        The Neo Event objects containing the stop events of the epochs. If no
+        event2 is specified, event1 specifies the stop events, i.e., the epoch
+        is cut around event1 times. The number of events in event2 must match
+        that of event1.
+    pre, post: Quantity (time)
+        Time offsets to modify the start (pre) and end (post) of the resulting
+        epoch. Example: pre=-10*ms and post=+25*ms will cut from 10 ms before
+        event1 times to 25 ms after event2 times
+    attach_result: bool
+        If True, the resulting Neo Epoch object is added to segment.
+
+    Keyword Arguments:
+    ------------------
+    Passed to the Neo Epoch object.
+
+    Returns:
+    --------
+    epoch: neo.Epoch
+        An Epoch object with the calculated epochs (one per entry in event1).
+    """
+    if event2 is None:
+        event2 = event1
+
+    if not isinstance(segment, neo.Segment):
+        raise TypeError(
+            'Segment has to be of type neo.Segment, not %s' % type(segment))
+
+    for event in [event1, event2]:
+        if not isinstance(event, neo.Event):
+            raise TypeError(
+                'Events have to be of type neo.Event, not %s' % type(event))
+
+    if len(event1) != len(event2):
+        raise ValueError(
+            'event1 and event2 have to have the same number of entries in '
+            'order to create epochs between pairs of entries. Match your '
+            'events before generating epochs. Current event lengths '
+            'are %i and %i' % (len(event1), len(event2)))
+
+    times = event1.times + pre
+    durations = event2.times + post - times
+
+    if any(durations < 0):
+        raise ValueError(
+            'Can not create epoch with negative duration. '
+            'Requested durations %s.' % durations)
+    elif any(durations == 0):
+        raise ValueError('Can not create epoch with zero duration.')
+
+    if 'name' not in kwargs:
+        kwargs['name'] = 'epoch'
+    if 'labels' not in kwargs:
+        kwargs['labels'] = [
+            '%s_%i' % (kwargs['name'], i) for i in range(len(times))]
+
+    ep = neo.Epoch(times=times, durations=durations, **kwargs)
+
+    ep.annotate(**event1.annotations)
+
+    if attach_result:
+        segment.epochs.append(ep)
+        segment.create_relationship()
+
+    return ep
+
+
+def match_events(event1, event2):
+    """
+    Finds pairs of Event entries in event1 and event2 with the minimum delay,
+    such that the entry of event1 directly preceeds the entry of event2.
+    Returns filtered two events of identical length, which contain matched
+    entries.
+
+    Parameters:
+    -----------
+    event1, event2: neo.Event
+        The two Event objects to match up.
+
+    Returns:
+    --------
+    event1, event2: neo.Event
+        Event objects with identical number of events, containing only those
+        events that could be matched against each other. A warning is issued if
+        not all events in event1 or event2 could be matched.
+    """
+    event1 = event1
+    event2 = event2
+
+    id1, id2 = 0, 0
+    match_ev1, match_ev2 = [], []
+    while id1 < len(event1) and id2 < len(event2):
+        time1 = event1.times[id1]
+        time2 = event2.times[id2]
+
+        # wrong order of events
+        if time1 > time2:
+            id2 += 1
+
+        # shorter epoch possible by later event1 entry
+        elif id1 + 1 < len(event1) and event1.times[id1 + 1] < time2:
+                # there is no event in 2 until the next event in 1
+            id1 += 1
+
+        # found a match
+        else:
+            match_ev1.append(id1)
+            match_ev2.append(id2)
+            id1 += 1
+            id2 += 1
+
+    if id1 < len(event1):
+        warnings.warn(
+            'Could not match all events to generate epochs. Missed '
+            '%s event entries in event1 list' % (len(event1) - id1))
+    if id2 < len(event2):
+        warnings.warn(
+            'Could not match all events to generate epochs. Missed '
+            '%s event entries in event2 list' % (len(event2) - id2))
+
+    event1_matched = _event_epoch_slice_by_valid_ids(
+        obj=event1, valid_ids=match_ev1)
+    event2_matched = _event_epoch_slice_by_valid_ids(
+        obj=event2, valid_ids=match_ev2)
+
+    return event1_matched, event2_matched
+
+
+def cut_block_by_epochs(block, properties=None, reset_time=False):
+    """
+    This function cuts Neo Segments in a Neo Block according to multiple Neo
+    Epoch objects.
+
+    The function alters the Neo Block by adding one Neo Segment per Epoch entry
+    fulfilling a set of conditions on the Epoch attributes and annotations. The
+    original segments are removed from the block.
+
+    A dictionary contains restrictions on which epochs are considered for
+    the cutting procedure. To this end, it is possible to
+    specify accepted (valid) values of specific annotations on the source
+    epochs.
+
+    The resulting cut segments may either retain their original time stamps, or
+    be shifted to a common starting time.
+
+    Parameters
+    ----------
+    block: Neo Block
+        Contains the Segments to cut according to the Epoch criteria provided
+    properties: dictionary
+        A dictionary that contains the Epoch keys and values to filter for.
+        Each key of the dictionary is matched to an attribute or an an
+        annotation of the Event. The value of each dictionary entry corresponds
+        to a valid entry or a list of valid entries of the attribute or
+        annotation.
+
+        If the value belonging to the key is a list of entries of the same
+        length as the number of epochs in the Epoch object, the list entries
+        are matched to the epochs in the Epoch object. The resulting Epoch
+        object contains only those epochs where the values match up.
+
+        Otherwise, the value is compared to the attributes or annotation of the
+        Epoch object as such, and depending on the comparison, either the
+        complete Epoch object is returned or not.
+
+        If None or an empty dictionary is passed, all Epoch Objects will
+        be considered
+
+    reset_time: bool
+        If True the times stamps of all sliced objects are set to fall
+        in the range from 0 to the duration of the epoch duration.
+        If False, original time stamps are retained.
+        Default is False.
+
+    Returns:
+    --------
+    None
+    """
+    if not isinstance(block, neo.Block):
+        raise TypeError(
+            'block needs to be a neo Block, not %s' % type(block))
+
+    old_segments = copy.copy(block.segments)
+    for seg in old_segments:
+        epochs = _get_from_list(seg.epochs, prop=properties)
+        if len(epochs) > 1:
+            warnings.warn(
+                'Segment %s contains multiple epochs with '
+                'requested properties (%s). Subsegments can '
+                'have overlapping times' % (seg.name, properties))
+
+        elif len(epochs) == 0:
+            warnings.warn(
+                'No epoch is matching the requested epoch properties %s. '
+                'No cutting of segment performed.' % (properties))
+
+        for epoch in epochs:
+            new_segments = cut_segment_by_epoch(
+                seg, epoch=epoch, reset_time=reset_time)
+            block.segments += new_segments
+
+        block.segments.remove(seg)
+    block.create_relationship()
+
+
+def cut_segment_by_epoch(seg, epoch, reset_time=False):
+    """
+    Cuts a Neo Segment according to a neo Epoch object
+
+    The function returns a list of neo Segments, where each segment corresponds
+    to an epoch in the neo Epoch object and contains the data of the original
+    Segment cut to that particular Epoch.
+
+    The resulting segments may either retain their original time stamps,
+    or can be shifted to a common time axis.
+
+    Parameters
+    ----------
+    seg: Neo Segment
+        The Segment containing the original uncut data.
+    epoch: Neo Epoch
+        For each epoch in this input, one segment is generated according to
+         the epoch time and duration.
+    reset_time: bool
+        If True the times stamps of all sliced objects are set to fall
+        in the range from 0 to the duration of the epoch duration.
+        If False, original time stamps are retained.
+        Default is False.
+
+    Returns:
+    --------
+    segments: list of Neo Segments
+        Per epoch in the input, a neo.Segment with AnalogSignal and/or
+        SpikeTrain Objects will be generated and returned. Each Segment will
+        receive the annotations of the corresponding epoch in the input.
+    """
+    if not isinstance(seg, neo.Segment):
+        raise TypeError(
+            'Seg needs to be of type neo.Segment, not %s' % type(seg))
+
+    if type(seg.parents[0]) != neo.Block:
+        raise ValueError(
+            'Segment has no block as parent. Can not cut segment.')
+
+    if not isinstance(epoch, neo.Epoch):
+        raise TypeError(
+            'Epoch needs to be of type neo.Epoch, not %s' % type(epoch))
+
+    segments = []
+    for ep_id in range(len(epoch)):
+        subseg = seg_time_slice(seg,
+                                epoch.times[ep_id],
+                                epoch.times[ep_id] + epoch.durations[ep_id],
+                                reset_time=reset_time)
+        # Add annotations of Epoch
+        for a in epoch.annotations:
+            if type(epoch.annotations[a]) is list \
+                    and len(epoch.annotations[a]) == len(epoch):
+                subseg.annotations[a] = copy.copy(epoch.annotations[a][ep_id])
+            else:
+                subseg.annotations[a] = copy.copy(epoch.annotations[a])
+        segments.append(subseg)
+
+    return segments
+
+
+def seg_time_slice(seg, t_start=None, t_stop=None, reset_time=False, **kwargs):
+    """
+    Creates a time slice of a neo Segment containing slices of all child
+    objects.
+
+    Parameters:
+    -----------
+    seg: neo Segment
+        The neo Segment object to slice.
+    t_start: Quantity
+        Starting time of the sliced time window.
+    t_stop: Quantity
+        Stop time of the sliced time window.
+    reset_time: bool
+        If True the times stamps of all sliced objects are set to fall
+        in the range from 0 to the duration of the epoch duration.
+        If False, original time stamps are retained.
+        Default is False.
+
+    Keyword Arguments:
+    ------------------
+        Additional keyword arguments used for initialization of the sliced
+        Neo Segment object.
+
+    Returns:
+    --------
+    seg: Neo Segment
+        Temporal slice of the original Neo Segment from t_start to t_stop.
+    """
+    subseg = neo.Segment(**kwargs)
+
+    for attr in [
+            'file_datetime', 'rec_datetime', 'index',
+            'name', 'description', 'file_origin']:
+        setattr(subseg, attr, getattr(seg, attr))
+
+    subseg.annotations = copy.deepcopy(seg.annotations)
+    # This would be the better definition of t_shift after incorporating
+    # PR#215 at NeuronalEnsemble/python-neo
+    t_shift = seg.t_start - t_start
+
+    # t_min_id = np.argmin(np.array([a.t_start for a in seg.analogsignals]))
+    # t_shift = seg.analogsignals[t_min_id] - t_start
+
+    # cut analogsignals
+    for ana_id in range(len(seg.analogsignals)):
+        ana_time_slice = seg.analogsignals[ana_id].time_slice(t_start, t_stop)
+        # explicitely copying parents as this is not yet fixed in neo (
+        # NeuralEnsemble/python-neo issue #220)
+        ana_time_slice.segment = subseg
+        ana_time_slice.channel_index = seg.analogsignals[ana_id].channel_index
+        if reset_time:
+            ana_time_slice.t_start = ana_time_slice.t_start + t_shift
+        subseg.analogsignals.append(ana_time_slice)
+
+    # cut spiketrains
+    for st_id in range(len(seg.spiketrains)):
+        st_time_slice = seg.spiketrains[st_id].time_slice(t_start, t_stop)
+        if reset_time:
+            st_time_slice = shift_spiketrain(st_time_slice, t_shift)
+        subseg.spiketrains.append(st_time_slice)
+
+    # cut events
+    for ev_id in range(len(seg.events)):
+        ev_time_slice = event_time_slice(seg.events[ev_id], t_start, t_stop)
+        if reset_time:
+            ev_time_slice = shift_event(ev_time_slice, t_shift)
+        # appending only non-empty events
+        if len(ev_time_slice):
+            subseg.events.append(ev_time_slice)
+
+    # cut epochs
+    for ep_id in range(len(seg.epochs)):
+        ep_time_slice = epoch_time_slice(seg.epochs[ep_id], t_start, t_stop)
+        if reset_time:
+            ep_time_slice = shift_epoch(ep_time_slice, t_shift)
+        # appending only non-empty epochs
+        if len(ep_time_slice):
+            subseg.epochs.append(ep_time_slice)
+
+    # TODO: Improve
+    # seg.create_relationship(force=True)
+    return subseg
+
+
+def shift_spiketrain(spiketrain, t_shift):
+    """
+    Shifts a spike train to start at a new time.
+
+    Parameters:
+    -----------
+    spiketrain: Neo SpikeTrain
+        Spiketrain of which a copy will be generated with shifted spikes and
+        starting and stopping times
+    t_shift: Quantity (time)
+        Amount of time by which to shift the SpikeTrain.
+
+    Returns:
+    --------
+    spiketrain: Neo SpikeTrain
+        New instance of a SpikeTrain object starting at t_start (the original
+        SpikeTrain is not modified).
+    """
+    new_st = spiketrain.duplicate_with_new_data(
+        signal=spiketrain.times.view(pq.Quantity) + t_shift,
+        t_start=spiketrain.t_start + t_shift,
+        t_stop=spiketrain.t_stop + t_shift)
+    return new_st
+
+
+def shift_event(ev, t_shift):
+    """
+    Shifts an event by an amount of time.
+
+    Parameters:
+    -----------
+    event: Neo Event
+        Event of which a copy will be generated with shifted times
+    t_shift: Quantity (time)
+        Amount of time by which to shift the Event.
+
+    Returns:
+    --------
+    epoch: Neo Event
+        New instance of an Event object starting at t_shift later than the
+        original Event (the original Event is not modified).
+    """
+    return _shift_time_signal(ev, t_shift)
+
+
+def shift_epoch(epoch, t_shift):
+    """
+    Shifts an epoch by an amount of time.
+
+    Parameters:
+    -----------
+    epoch: Neo Epoch
+        Epoch of which a copy will be generated with shifted times
+    t_shift: Quantity (time)
+        Amount of time by which to shift the Epoch.
+
+    Returns:
+    --------
+    epoch: Neo Epoch
+        New instance of an Epoch object starting at t_shift later than the
+        original Epoch (the original Epoch is not modified).
+    """
+    return _shift_time_signal(epoch, t_shift)
+
+
+def event_time_slice(event, t_start=None, t_stop=None):
+    """
+    Slices an Event object to retain only those events that fall in a certain
+    time window.
+
+    Parameters:
+    -----------
+    event: Neo Event
+        The Event to slice.
+    t_start, t_stop: Quantity (time)
+        Time window in which to retain events. An event at time t is retained
+        if t_start <= t < t_stop.
+
+    Returns:
+    --------
+    event: Neo Event
+        New instance of an Event object containing only the events in the time
+        range.
+    """
+    if t_start is None:
+        t_start = -np.inf
+    if t_stop is None:
+        t_stop = np.inf
+
+    valid_ids = np.where(np.logical_and(
+        event.times >= t_start, event.times < t_stop))[0]
+
+    new_event = _event_epoch_slice_by_valid_ids(event, valid_ids=valid_ids)
+
+    return new_event
+
+
+def epoch_time_slice(epoch, t_start=None, t_stop=None):
+    """
+    Slices an Epoch object to retain only those epochs that fall in a certain
+    time window.
+
+    Parameters:
+    -----------
+    epoch: Neo Epoch
+        The Epoch to slice.
+    t_start, t_stop: Quantity (time)
+        Time window in which to retain epochs. An epoch at time t and
+        duration d is retained if t_start <= t < t_stop - d.
+
+    Returns:
+    --------
+    epoch: Neo Epoch
+        New instance of an Epoch object containing only the epochs in the time
+        range.
+    """
+    if t_start is None:
+        t_start = -np.inf
+    if t_stop is None:
+        t_stop = np.inf
+
+    valid_ids = np.where(np.logical_and(
+        epoch.times >= t_start, epoch.times + epoch.durations < t_stop))[0]
+
+    new_epoch = _event_epoch_slice_by_valid_ids(epoch, valid_ids=valid_ids)
+
+    return new_epoch
+
+
+def _get_from_list(input_list, prop=None):
+    """
+    Internal function
+    """
+    output_list = []
+    # empty or no dictionary
+    if not prop or bool([b for b in prop.values() if b == []]):
+        output_list += [e for e in input_list]
+    # dictionary is given
+    else:
+        for ep in input_list:
+            sparse_ep = ep.copy()
+            for k in prop.keys():
+                sparse_ep = _filter_event_epoch(sparse_ep, k, prop[k])
+                # if there is nothing left, it cannot filtered
+                if sparse_ep is None:
+                    break
+            if sparse_ep is not None:
+                output_list.append(sparse_ep)
+    return output_list
+
+
+def _filter_event_epoch(obj, annotation_key, annotation_value):
+    """
+    Internal function.
+
+    This function return a copy of a neo Event or Epoch object, which only
+    contains attributes or annotations corresponding to requested key-value
+    pairs.
+
+    Parameters:
+    -----------
+    obj : neo.Event
+        The neo Event or Epoch object to modify.
+    annotation_key : string, int or float
+        The name of the annotation used to filter.
+    annotation_value : string, int, float, list or np.ndarray
+        The accepted value or list of accepted values of the attributes or
+        annotations specified by annotation_key. For each entry in obj the
+        respective annotation defined by annotation_key is compared to the
+        annotation value. The entry of obj is kept if the attribute or
+        annotation is equal or contained in annotation_value.
+
+    Returns:
+    --------
+    obj : neo.Event or neo.Epoch
+        The Event or Epoch object with every event or epoch removed that does
+        not match the filter criteria (i.e., where none of the entries in
+        annotation_value match the attribute or annotation annotation_key.
+    """
+    valid_ids = _get_valid_ids(obj, annotation_key, annotation_value)
+
+    if len(valid_ids) == 0:
+        return None
+
+    return _event_epoch_slice_by_valid_ids(obj, valid_ids)
+
+
+def _event_epoch_slice_by_valid_ids(obj, valid_ids):
+    """
+    Internal function
+    """
+    # modify annotations
+    sparse_annotations = _get_valid_annotations(obj, valid_ids)
+
+    # modify labels
+    sparse_labels = _get_valid_labels(obj, valid_ids)
+
+    if type(obj) is neo.Event:
+        sparse_obj = neo.Event(
+            times=copy.deepcopy(obj.times[valid_ids]),
+            labels=sparse_labels,
+            units=copy.deepcopy(obj.units),
+            name=copy.deepcopy(obj.name),
+            description=copy.deepcopy(obj.description),
+            file_origin=copy.deepcopy(obj.file_origin),
+            **sparse_annotations)
+    elif type(obj) is neo.Epoch:
+        sparse_obj = neo.Epoch(
+            times=copy.deepcopy(obj.times[valid_ids]),
+            durations=copy.deepcopy(obj.durations[valid_ids]),
+            labels=sparse_labels,
+            units=copy.deepcopy(obj.units),
+            name=copy.deepcopy(obj.name),
+            description=copy.deepcopy(obj.description),
+            file_origin=copy.deepcopy(obj.file_origin),
+            **sparse_annotations)
+    else:
+        raise TypeError('Can only slice Event and Epoch objects by valid IDs.')
+
+    return sparse_obj
+
+
+def _get_valid_ids(obj, annotation_key, annotation_value):
+    """
+    Internal function
+    """
+    # wrap annotation value to be list
+    if not type(annotation_value) in [list, np.ndarray]:
+        annotation_value = [annotation_value]
+
+    # get all real attributes of object
+    attributes = inspect.getmembers(obj)
+    attributes_names = [t[0] for t in attributes if not(
+        t[0].startswith('__') and t[0].endswith('__'))]
+    attributes_ids = [i for i, t in enumerate(attributes) if not(
+        t[0].startswith('__') and t[0].endswith('__'))]
+
+    # check if annotation is present
+    value_avail = False
+    if annotation_key in obj.annotations:
+        check_value = obj.annotations[annotation_key]
+        value_avail = True
+    elif annotation_key in attributes_names:
+        check_value = attributes[attributes_ids[
+            attributes_names.index(annotation_key)]][1]
+        value_avail = True
+
+    if value_avail:
+        # check if annotation is list and fits to length of object list
+        if not _is_annotation_list(check_value, len(obj)):
+            # check if annotation is single value and fits to requested value
+            if (check_value in annotation_value):
+                valid_mask = np.ones(obj.shape)
+            else:
+                valid_mask = np.zeros(obj.shape)
+                if type(check_value) != str:
+                    warnings.warn(
+                        'Length of annotation "%s" (%s) does not fit '
+                        'to length of object list (%s)' % (
+                            annotation_key, len(check_value), len(obj)))
+
+        # extract object entries, which match requested annotation
+        else:
+            valid_mask = np.zeros(obj.shape)
+            for obj_id in range(len(obj)):
+                if check_value[obj_id] in annotation_value:
+                    valid_mask[obj_id] = True
+    else:
+        valid_mask = np.zeros(obj.shape)
+
+    valid_ids = np.where(valid_mask)[0]
+
+    return valid_ids
+
+
+def _get_valid_annotations(obj, valid_ids):
+    """
+    Internal function
+    """
+    sparse_annotations = copy.deepcopy(obj.annotations)
+    for key in sparse_annotations:
+        if _is_annotation_list(sparse_annotations[key], len(obj)):
+            sparse_annotations[key] = list(np.array(sparse_annotations[key])[
+                valid_ids])
+    return sparse_annotations
+
+
+def _get_valid_labels(obj, valid_ids):
+    """
+    Internal function
+    """
+    labels = obj.labels
+    selected_labels = []
+    if len(labels) > 0:
+        if _is_annotation_list(labels, len(obj)):
+            for vid in valid_ids:
+                selected_labels.append(labels[vid])
+            # sparse_labels = sparse_labels[valid_ids]
+        else:
+            warnings.warn('Can not filter object labels. Shape (%s) does not '
+                          'fit object shape (%s)'
+                          '' % (labels.shape, obj.shape))
+    return np.array(selected_labels)
+
+
+def _is_annotation_list(value, exp_length):
+    """
+    Internal function
+    """
+    return (
+        (isinstance(value, list) or (
+            isinstance(value, np.ndarray) and value.ndim > 0)) and
+        (len(value) == exp_length))
+
+
+def _shift_time_signal(sig, t_shift):
+    """
+    Internal function.
+    """
+    if not hasattr(sig, 'times'):
+        raise AttributeError(
+            'Can only shift signals, which have an attribute'
+            ' "times", not %s' % type(sig))
+    new_sig = sig.duplicate_with_new_data(signal=sig.times + t_shift)
+    return new_sig

+ 241 - 0
code/odml_utils.py

@@ -0,0 +1,241 @@
+# -*- coding: utf-8 -*-
+'''
+Convenience functions to work with the odML metadata collection of the reach-
+to-grasp experiment.
+
+Authors: Julia Sprenger, Lyuba Zehl, Michael Denker
+
+
+Copyright (c) 2017, Institute of Neuroscience and Medicine (INM-6),
+Forschungszentrum Juelich, Germany
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+* Neither the names of the copyright holders nor the names of the contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+'''
+
+import itertools
+import numpy as np
+
+
+def get_TrialCount(doc, trialtype=None, performance_code=None):
+    """
+    Returns a list of trials ids
+
+    Args:
+        doc (odml.doc.BaseDocument):
+            odML Document of reach-to-grasp project
+        trialtype (str or int):
+            If stated, returns only count of trials with given trial type
+        performance_code (int):
+            If stated, returns only count of trials with given performance code
+
+    Returns:
+        (int):
+            Number of specified trials
+    """
+    sec = doc['Recording']['TaskSettings']
+
+    if performance_code == 255:
+        output = sec.properties['CorrectTrialCount'].value.data
+
+    elif performance_code == 191:
+        output = sec.properties['GripErrorTrialCount'].value.data
+
+    elif performance_code in [0, 159, 161, 163, 167, 175]:
+        subsec = sec['TrialTypeSettings']
+
+    else:
+        output = sec.properties['TotalTrialCount'].value.data
+
+    # TODO: extend to trial types and other performance codes
+
+    return output
+
+
+def get_TrialIDs(doc, idtype='TrialID'):
+    """
+    Returns a list of trials ids
+
+    Args:
+        doc (odml.doc.BaseDocument):
+            odML Document of reach-to-grasp project
+
+    Returns:
+        (list of int):
+            Trial id list
+    """
+    output = []
+
+    sec = doc['Recording']['TaskSettings']
+
+    def ff(x): return x.name.startswith('Trial_')
+    for trsec in sec.itersections(filter_func=ff):
+        def FF(x): return x.name == idtype
+        output.append(
+            [p for p in trsec.iterproperties(filter_func=FF)][0].value.data)
+
+    return sorted(output)
+
+
+def get_TrialType(doc, trialid, code=True):
+    """
+    Returns trial type (code or abbreviation) for wanted trial
+
+    Args:
+        doc (odml.doc.BaseDocument):
+            odML Document of reach-to-grasp project
+        trialid (int):
+            ID of wanted trial
+        code (boolean):
+            If True (default), integer code of trial type is returned
+            If False, string abbreviation of trial type is returned
+
+    Returns:
+        (int or str):
+            trial type for wanted trial
+    """
+    def ff(x): return x.name == 'Trial_%03i' % trialid
+    sec = [s for s in doc.itersections(filter_func=ff)][0]
+
+    output = sec.properties['TrialType'].value.data
+
+    return output
+
+
+def get_PerformanceCode(doc, trialid, code=True):
+    """
+    Returns the performance of the monkey in the given trial either as code or
+    abbreviation.
+
+    Args:
+        doc (odml.doc.BaseDocument):
+            odML Document of reach-to-grasp project
+        trialid (int):
+            ID of wanted trial
+        code (boolean):
+            If True (default), integer code of trial performance is returned
+            If False, abbreviation of trial performance is returned
+
+    Returns:
+        (int or string):
+            performance code or abbreviation for wanted trial
+    """
+    def ff1(x): return x.name == 'Trial_%03i' % trialid
+    sec = [s for s in doc.itersections(filter_func=ff1)][0]
+
+    def ff2(x): return x.name == 'PerformanceCode'
+    output = [p for p in sec.iterproperties(filter_func=ff2)][0].value.data
+
+    if code:
+        return output
+
+    else:
+        def ff3(x): return x.name == 'PerformanceCodes'
+        sec = [s for s in doc.itersections(filter_func=ff3)][0]
+
+        def ff4(x): return x.name == 'pc_%i' % output
+        output = [p for p in sec.iterproperties(filter_func=ff4)][0].value.data
+
+        return output
+
+
+def get_OccurringTrialTypes(doc, code=True):
+    """
+    Returns all occurring trial types (code or abbreviations)
+
+    Args:
+        doc (odml.doc.BaseDocument):
+            odML Document of reach-to-grasp project
+        code (boolean):
+            If True, integer code of trial type is returned
+            If False, string abbreviation of trial type is returned
+
+    Returns:
+        (list of int or str):
+            list of occurring trial types
+    """
+    trial_id_list = get_TrialIDs(doc)
+
+    output = np.unique([get_TrialType(doc, trid, code=code) for trid in
+                        trial_id_list]).tolist()
+
+    return output
+
+
+def get_trialids_pc(doc, performance_code):
+    """
+    Returns a list of trials ids which have the given performance code
+
+    Args:
+        doc (odml.doc.BaseDocument):
+            odML Document of reach-to-grasp project
+        trialtype (int or str):
+            trial type of wanted trials
+
+    Returns:
+        (list of int):
+            Trial id list with the given trial type
+    """
+    trialids = get_TrialIDs(doc)
+
+    if isinstance(performance_code, int):
+        code = True
+    else:
+        code = False
+
+    output = []
+    for trid in trialids:
+        if get_PerformanceCode(doc, trid, code) == performance_code:
+            output.append(trid)
+
+    return output
+
+
+def get_trialids_trty(doc, trialtype):
+    """
+    Returns a list of trials ids which have the given trial type
+
+    Args:
+        doc (odml.doc.BaseDocument):
+            odML Document of reach-to-grasp project
+        trialtype (int or str):
+            trial type of wanted trials
+
+    Returns:
+        (list of int):
+            Trial id list with the given trial type
+    """
+    trialids = get_TrialIDs(doc)
+
+    if isinstance(trialtype, int):
+        code = True
+    else:
+        code = False
+
+    output = []
+    for trid in trialids:
+        if get_TrialType(doc, trid, code) == trialtype:
+            output.append(trid)
+
+    return output

+ 1 - 0
code/python-neo/AUTHORS

@@ -0,0 +1 @@
+See doc/source/authors.rst

+ 0 - 0
code/python-neo/CITATION.txt


Some files were not shown because too many files changed in this diff