Browse Source

Readds changes of 4541dadca64aff36f26146e5d0843460829989c1

This reverts commit 3d981a6e16efae203515e80b738b608ab0c72008.
Julia Sprenger 5 years ago
parent
commit
ff48cadf9f
100 changed files with 7158 additions and 13354 deletions
  1. 39 39
      code/data_overview_1.py
  2. 1 1
      code/data_overview_2.py
  3. 1 1
      code/elephant/LICENSE.txt
  4. 1 1
      code/elephant/README.rst
  5. 40 12
      code/elephant/continuous_integration/install.sh
  6. 5 1
      code/elephant/continuous_integration/test_script.sh
  7. 4 0
      code/elephant/doc/authors.rst
  8. 38 25
      code/elephant/doc/conf.py
  9. 6 3
      code/elephant/doc/developers_guide.rst
  10. 5 5
      code/elephant/doc/environment.yml
  11. 2 2
      code/elephant/doc/install.rst
  12. 3 2
      code/elephant/doc/modules.rst
  13. 1 0
      code/elephant/doc/reference/spike_train_correlation.rst
  14. 1 1
      code/elephant/doc/reference/spike_train_dissimilarity.rst
  15. 87 11
      code/elephant/doc/release_notes.rst
  16. 7 3
      code/elephant/elephant/__init__.py
  17. 26 25
      code/elephant/elephant/asset.py
  18. 3 3
      code/elephant/elephant/conversion.py
  19. 8 12
      code/elephant/elephant/current_source_density.py
  20. 135 0
      code/elephant/elephant/signal_processing.py
  21. 225 128
      code/elephant/elephant/spike_train_correlation.py
  22. 93 0
      code/elephant/elephant/spike_train_generation.py
  23. 1 1
      code/elephant/elephant/sta.py
  24. 117 13
      code/elephant/elephant/statistics.py
  25. 1 1
      code/elephant/elephant/test/make_spike_extraction_test_data.py
  26. BIN
      code/elephant/elephant/test/spike_extraction_test_data.npz
  27. 24 1
      code/elephant/elephant/test/test_conversion.py
  28. 6 6
      code/elephant/elephant/test/test_csd.py
  29. 3 3
      code/elephant/elephant/test/test_kcsd.py
  30. 134 45
      code/elephant/elephant/test/test_pandas_bridge.py
  31. 179 16
      code/elephant/elephant/test/test_signal_processing.py
  32. 101 56
      code/elephant/elephant/test/test_spike_train_correlation.py
  33. 77 15
      code/elephant/elephant/test/test_spike_train_generation.py
  34. 2 2
      code/elephant/elephant/test/test_sta.py
  35. 106 35
      code/elephant/elephant/test/test_statistics.py
  36. 153 0
      code/elephant/elephant/test/test_unitary_event_analysis.py
  37. 1 1
      code/elephant/elephant/unitary_event_analysis.py
  38. 2 11
      code/elephant/requirements.txt
  39. 44 17
      code/elephant/setup.py
  40. 7 7
      code/odml_utils.py
  41. 2 0
      code/python-neo/.gitignore
  42. 2 4
      code/python-neo/.travis.yml
  43. 17 17
      code/python-neo/CITATION.txt
  44. 1 1
      code/python-neo/LICENSE.txt
  45. 2 1
      code/python-neo/MANIFEST.in
  46. 14 5
      code/python-neo/README.rst
  47. 13 2
      code/python-neo/doc/source/authors.rst
  48. 53 39
      code/python-neo/doc/source/conf.py
  49. 31 13
      code/python-neo/doc/source/core.rst
  50. 20 21
      code/python-neo/doc/source/developers_guide.rst
  51. 40 41
      code/python-neo/doc/source/images/generate_diagram.py
  52. 17 5
      code/python-neo/doc/source/index.rst
  53. 12 7
      code/python-neo/doc/source/install.rst
  54. 15 50
      code/python-neo/doc/source/io.rst
  55. 39 53
      code/python-neo/doc/source/io_developers_guide.rst
  56. 7 3
      code/python-neo/doc/source/whatisnew.rst
  57. 30 36
      code/python-neo/examples/generated_data.py
  58. 0 47
      code/python-neo/examples/read_files.py
  59. 5 7
      code/python-neo/examples/simple_plot_with_matplotlib.py
  60. 2 0
      code/python-neo/neo/__init__.py
  61. 135 222
      code/python-neo/neo/core/analogsignal.py
  62. 2 2
      code/python-neo/neo/core/baseneo.py
  63. 214 24
      code/python-neo/neo/core/basesignal.py
  64. 27 19
      code/python-neo/neo/core/container.py
  65. 133 57
      code/python-neo/neo/core/epoch.py
  66. 147 45
      code/python-neo/neo/core/event.py
  67. 150 173
      code/python-neo/neo/core/irregularlysampledsignal.py
  68. 7 5
      code/python-neo/neo/core/segment.py
  69. 209 118
      code/python-neo/neo/core/spiketrain.py
  70. 72 56
      code/python-neo/neo/io/__init__.py
  71. 367 399
      code/python-neo/neo/io/alphaomegaio.py
  72. 85 94
      code/python-neo/neo/io/asciisignalio.py
  73. 48 57
      code/python-neo/neo/io/asciispiketrainio.py
  74. 41 850
      code/python-neo/neo/io/axonio.py
  75. 31 32
      code/python-neo/neo/io/baseio.py
  76. 57 2549
      code/python-neo/neo/io/blackrockio.py
  77. 0 483
      code/python-neo/neo/io/blackrockio_deprecated.py
  78. 8 154
      code/python-neo/neo/io/brainvisionio.py
  79. 15 26
      code/python-neo/neo/io/brainwaredamio.py
  80. 10 23
      code/python-neo/neo/io/brainwaref32io.py
  81. 35 47
      code/python-neo/neo/io/brainwaresrcio.py
  82. 12 371
      code/python-neo/neo/io/elanio.py
  83. 1364 1328
      code/python-neo/neo/io/elphyio.py
  84. 18 314
      code/python-neo/neo/io/exampleio.py
  85. 51 65
      code/python-neo/neo/io/hdf5io.py
  86. 28 27
      code/python-neo/neo/io/igorproio.py
  87. 52 66
      code/python-neo/neo/io/klustakwikio.py
  88. 50 54
      code/python-neo/neo/io/kwikio.py
  89. 8 216
      code/python-neo/neo/io/micromedio.py
  90. 52 60
      code/python-neo/neo/io/neomatlabio.py
  91. 86 86
      code/python-neo/neo/io/nestio.py
  92. 14 2394
      code/python-neo/neo/io/neuralynxio.py
  93. 8 327
      code/python-neo/neo/io/neuroexplorerio.py
  94. 9 120
      code/python-neo/neo/io/neuroscopeio.py
  95. 316 355
      code/python-neo/neo/io/neuroshareapiio.py
  96. 239 245
      code/python-neo/neo/io/neurosharectypesio.py
  97. 997 981
      code/python-neo/neo/io/nixio.py
  98. 45 50
      code/python-neo/neo/io/nsdfio.py
  99. 5 3
      code/python-neo/neo/io/pickleio.py
  100. 0 0
      code/python-neo/neo/io/plexonio.py

+ 39 - 39
code/data_overview_1.py

@@ -175,49 +175,49 @@ seg_raw = bl_raw.segments[0]
 seg_lfp = bl_lfp.segments[0]
 
 # Displaying loaded data structure as string output
-print "\nBlock"
-print 'Attributes ', bl_raw.__dict__.keys()
-print 'Annotations', bl_raw.annotations
-print "\nSegment"
-print 'Attributes ', seg_raw.__dict__.keys()
-print 'Annotations', seg_raw.annotations
-print "\nEvents"
+print("\nBlock")
+print('Attributes ', bl_raw.__dict__.keys())
+print('Annotations', bl_raw.annotations)
+print("\nSegment")
+print('Attributes ', seg_raw.__dict__.keys())
+print('Annotations', seg_raw.annotations)
+print("\nEvents")
 for x in seg_raw.events:
-    print '\tEvent with name', x.name
-    print '\t\tAttributes ', x.__dict__.keys()
-    print '\t\tAnnotation keys', x.annotations.keys()
-    print '\t\ttimes', x.times[:20]
+    print('\tEvent with name', x.name)
+    print('\t\tAttributes ', x.__dict__.keys())
+    print('\t\tAnnotation keys', x.annotations.keys())
+    print('\t\ttimes', x.times[:20])
     for anno_key in ['trial_id', 'trial_timestamp_id', 'trial_event_labels',
                      'trial_reject_IFC']:
-        print '\t\t'+anno_key, x.annotations[anno_key][:20]
+        print('\t\t'+anno_key, x.annotations[anno_key][:20])
 
-print "\nChannels"
+print("\nChannels")
 for x in bl_raw.channel_indexes:
-    print '\tChannel with name', x.name
-    print '\t\tAttributes ', x.__dict__.keys()
-    print '\t\tchannel_ids', x.channel_ids
-    print '\t\tchannel_names', x.channel_names
-    print '\t\tAnnotations', x.annotations
-print "\nUnits"
+    print('\tChannel with name', x.name)
+    print('\t\tAttributes ', x.__dict__.keys())
+    print('\t\tchannel_ids', x.channel_ids)
+    print('\t\tchannel_names', x.channel_names)
+    print('\t\tAnnotations', x.annotations)
+print("\nUnits")
 for x in bl_raw.list_units:
-    print '\tUnit with name', x.name
-    print '\t\tAttributes ', x.__dict__.keys()
-    print '\t\tAnnotations', x.annotations
-    print '\t\tchannel_id', x.annotations['channel_id']
+    print('\tUnit with name', x.name)
+    print('\t\tAttributes ', x.__dict__.keys())
+    print('\t\tAnnotations', x.annotations)
+    print('\t\tchannel_id', x.annotations['channel_id'])
     assert(x.annotations['channel_id'] == x.channel_index.channel_ids[0])
-print "\nSpikeTrains"
+print("\nSpikeTrains")
 for x in seg_raw.spiketrains:
-    print '\tSpiketrain with name', x.name
-    print '\t\tAttributes ', x.__dict__.keys()
-    print '\t\tAnnotations', x.annotations
-    print '\t\tchannel_id', x.annotations['channel_id']
-    print '\t\tspike times', x.times[0:20]
-print "\nAnalogSignals"
+    print('\tSpiketrain with name', x.name)
+    print('\t\tAttributes ', x.__dict__.keys())
+    print('\t\tAnnotations', x.annotations)
+    print('\t\tchannel_id', x.annotations['channel_id'])
+    print('\t\tspike times', x.times[0:20])
+print("\nAnalogSignals")
 for x in seg_raw.analogsignals:
-    print '\tAnalogSignal with name', x.name
-    print '\t\tAttributes ', x.__dict__.keys()
-    print '\t\tAnnotations', x.annotations
-    print '\t\tchannel_id', x.annotations['channel_id']
+    print('\tAnalogSignal with name', x.name)
+    print('\t\tAttributes ', x.__dict__.keys())
+    print('\t\tAnnotations', x.annotations)
+    print('\t\tchannel_id', x.annotations['channel_id'])
 
 # get start and stop events of trials
 start_events = neo_utils.get_events(
@@ -374,7 +374,7 @@ for tt in octrty:
         color = trialtype_colors[tt]
 
     B = ax1.bar(
-        left=left, height=height, width=width, color=color, linewidth=0.001)
+        x=left, height=height, width=width, color=color, linewidth=0.001, align='edge')
 
     # Mark trials of current trial type (left) if a grip error occurred
     x = [i for i in list(set(left) & set(trids_pc191))]
@@ -485,7 +485,7 @@ for spiketrain in trial_seg_raw.spiketrains:
             times.rescale(wf_time_unit)[0], times.rescale(wf_time_unit)[-1])
 
 # adding xlabels and titles
-for unit_id, ax in unit_ax_translator.iteritems():
+for unit_id, ax in unit_ax_translator.items():
     ax.set_title('unit %i (%s)' % (unit_id, unit_type[unit_id]),
                  fontdict_titles)
     ax.tick_params(direction='in', length=3, labelsize='xx-small',
@@ -605,13 +605,13 @@ trialx_sec = odmldoc['Recording']['TaskSettings']['Trial_%03i' % trialx_trid]
 
 # get correct channel id
 trialx_chids = [143]
-FSRi = trialx_sec['AnalogEvents'].properties['UsedForceSensor'].value.data
+FSRi = trialx_sec['AnalogEvents'].properties['UsedForceSensor'].values[0]
 FSRinfosec = odmldoc['Setup']['Apparatus']['TargetObject']['FSRSensor']
 if 'SG' in trialx_trty:
-    sgchids = [d.data for d in FSRinfosec.properties['SGChannelIDs'].values]
+    sgchids = FSRinfosec.properties['SGChannelIDs'].values
     trialx_chids.append(min(sgchids) if FSRi == 1 else max(sgchids))
 else:
-    pgchids = [d.data for d in FSRinfosec.properties['PGChannelIDs'].values]
+    pgchids = FSRinfosec.properties['PGChannelIDs'].values
     trialx_chids.append(min(pgchids) if FSRi == 1 else max(pgchids))
 
 

+ 1 - 1
code/data_overview_2.py

@@ -200,7 +200,7 @@ event_colors = {
 
 electrode_cmap = plt.get_cmap('bone')
 electrode_colors = [electrode_cmap(x) for x in
-                    np.tile(np.array([0.3, 0.7]), len(chosen_els[monkey]) / 2)]
+                    np.tile(np.array([0.3, 0.7]), int(len(chosen_els[monkey]) / 2))]
 
 time_unit = 'ms'
 lfp_unit = 'uV'

+ 1 - 1
code/elephant/LICENSE.txt

@@ -1,4 +1,4 @@
-Copyright (c) 2014, Elephant authors and contributors
+Copyright (c) 2014-2018, Elephant authors and contributors
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

+ 1 - 1
code/elephant/README.rst

@@ -19,5 +19,5 @@ Code status
    :target: https://readthedocs.org/projects/elephant/?badge=latest
    :alt: Documentation Status
 
-:copyright: Copyright 2014-2015 by the Elephant team, see AUTHORS.txt.
+:copyright: Copyright 2014-2018 by the Elephant team, see AUTHORS.txt.
 :license: Modified BSD License, see LICENSE.txt for details.

+ 40 - 12
code/elephant/continuous_integration/install.sh

@@ -76,17 +76,44 @@ elif [[ "$DISTRIB" == "conda" ]]; then
 
     python -c "import pandas; import os; assert os.getenv('PANDAS_VERSION') == pandas.__version__"
 
-elif [[ "$DISTRIB" == "ubuntu" ]]; then
+elif [[ "$DISTRIB" == "mpi" ]]; then
+    # Deactivate the travis-provided virtual environment and setup a
+    # conda-based environment instead
     deactivate
+
+    # Use the miniconda installer for faster download / install of conda
+    # itself
+    wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh \
+        -O miniconda.sh
+    chmod +x miniconda.sh && ./miniconda.sh -b -p $HOME/miniconda
+    export PATH=/home/travis/miniconda/bin:$PATH
+    conda config --set always_yes yes
+    conda update --yes conda
+
+    # Configure the conda environment and put it in the path using the
+    # provided versions
+    conda create -n testenv --yes python=$PYTHON_VERSION pip nose coverage six=$SIX_VERSION \
+        numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION scikit-learn mpi4py=$MPI_VERSION
+    source activate testenv
+
+    if [[ "$INSTALL_MKL" == "true" ]]; then
+        # Make sure that MKL is used
+        conda install --yes --no-update-dependencies mkl
+    else
+        # Make sure that MKL is not used
+        conda remove --yes --features mkl || echo "MKL not installed"
+    fi
+
+    if [[ "$COVERAGE" == "true" ]]; then
+        pip install coveralls
+    fi
+
+elif [[ "$DISTRIB" == "ubuntu" ]]; then
+    # deactivate
     # Create a new virtualenv using system site packages for numpy and scipy
-    virtualenv --system-site-packages testenv
-    source testenv/bin/activate
-    pip install nose
-    pip install coverage
-    pip install numpy==$NUMPY_VERSION
-    pip install scipy==$SCIPY_VERSION
-    pip install six==$SIX_VERSION
-    pip install quantities
+    # virtualenv --system-site-packages testenv
+    # source testenv/bin/activate
+    pip install -r requirements.txt    
 fi
 
 if [[ "$COVERAGE" == "true" ]]; then
@@ -102,6 +129,7 @@ popd
 
 pip install .
 
-
-python -c "import numpy; import os; assert os.getenv('NUMPY_VERSION') == numpy.__version__"
-python -c "import scipy; import os; assert os.getenv('SCIPY_VERSION') == scipy.__version__"
+if ! [[ "$DISTRIB" == "ubuntu" ]]; then
+    python -c "import numpy; import os; assert os.getenv('NUMPY_VERSION') == numpy.__version__, 'Numpy versions do not match: {0} - {1}'.format(os.getenv('NUMPY_VERSION'), numpy.__version__)"
+    python -c "import scipy; import os; assert os.getenv('SCIPY_VERSION') == scipy.__version__, 'Scipy versions do not match: {0} - {1}'.format(os.getenv('SCIPY_VERSION'), scipy.__version__)"
+fi

+ 5 - 1
code/elephant/continuous_integration/test_script.sh

@@ -13,7 +13,11 @@ python -c "import numpy; print('numpy %s' % numpy.__version__)"
 python -c "import scipy; print('scipy %s' % scipy.__version__)"
 
 if [[ "$COVERAGE" == "true" ]]; then
-    nosetests --with-coverage --cover-package=elephant
+    if [[ "$MPI" == "true" ]]; then
+	mpiexec -n 1 nosetests --with-coverage --cover-package=elephant
+    else
+	nosetests --with-coverage --cover-package=elephant
+    fi
 else
     nosetests
 fi

+ 4 - 0
code/elephant/doc/authors.rst

@@ -29,6 +29,9 @@ contribution, and may not be the current affiliation of a contributor.
 * Bartosz Telenczuk [2]
 * Chaitanya Chintaluri [9]
 * Michał Czerwiński [9]
+* Michael von Papen [1]
+* Robin Gutzen [1]
+* Felipe Méndez [10]
 
 1. Institute of Neuroscience and Medicine (INM-6), Computational and Systems Neuroscience & Institute for Advanced Simulation (IAS-6), Theoretical Neuroscience, Jülich Research Centre and JARA, Jülich, Germany
 2. Unité de Neurosciences, Information et Complexité, CNRS UPR 3293, Gif-sur-Yvette, France
@@ -39,5 +42,6 @@ contribution, and may not be the current affiliation of a contributor.
 7. Arizona State University School of Life Sciences, USA
 8. Computational Neuroscience Research Group (CNRG), Waterloo Centre for Theoretical Neuroscience, Waterloo, Canada
 9. Nencki Institute of Experimental Biology, Warsaw, Poland
+10.  Instituto de Neurobiología, Universidad Nacional Autónoma de México, Mexico City, Mexico
 
 If we've somehow missed you off the list we're very sorry - please let us know.

+ 38 - 25
code/elephant/doc/conf.py

@@ -11,23 +11,29 @@
 # All configuration values have a default; values that are commented out
 # serve to show the default.
 
-import sys, os
+import sys
+import os
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 sys.path.insert(0, '..')
 
-# -- General configuration -----------------------------------------------------
+# -- General configuration -----------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
 #needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
-              'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax',
-              'sphinx.ext.viewcode', 'numpydoc']
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.doctest',
+    'sphinx.ext.intersphinx',
+    'sphinx.ext.todo',
+    'sphinx.ext.imgmath',
+    'sphinx.ext.viewcode',
+    'numpydoc']
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
@@ -44,16 +50,16 @@ master_doc = 'index'
 # General information about the project.
 project = u'Elephant'
 authors = u'Elephant authors and contributors'
-copyright = u'2014-2017, ' + authors
+copyright = u'2014-2018, ' + authors
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 #
 # The short X.Y version.
-version = '0.4'
+version = '0.6'
 # The full version, including alpha/beta/rc tags.
-release = '0.4.1'
+release = '0.6.0'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
@@ -90,7 +96,7 @@ pygments_style = 'sphinx'
 #modindex_common_prefix = []
 
 
-# -- Options for HTML output ---------------------------------------------------
+# -- Options for HTML output ---------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
@@ -170,24 +176,24 @@ html_use_index = True
 htmlhelp_basename = 'elephantdoc'
 
 
-# -- Options for LaTeX output --------------------------------------------------
+# -- Options for LaTeX output --------------------------------------------
 
 latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
+    # The paper size ('letterpaper' or 'a4paper').
+    #'papersize': 'letterpaper',
 
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
+    # The font size ('10pt', '11pt' or '12pt').
+    #'pointsize': '10pt',
 
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
+    # Additional stuff for the LaTeX preamble.
+    #'preamble': '',
 }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'elephant.tex', u'Elephant Documentation',
-   authors, 'manual'),
+    ('index', 'elephant.tex', u'Elephant Documentation',
+     authors, 'manual'),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
@@ -211,7 +217,7 @@ latex_documents = [
 #latex_domain_indices = True
 
 
-# -- Options for manual page output --------------------------------------------
+# -- Options for manual page output --------------------------------------
 
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
@@ -224,14 +230,18 @@ man_pages = [
 #man_show_urls = False
 
 
-# -- Options for Texinfo output ------------------------------------------------
+# -- Options for Texinfo output ------------------------------------------
 
 # Grouping the document tree into Texinfo files. List of tuples
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-    ('index', 'Elephant', u'Elephant Documentation',
-     authors, 'Elephant', 'Elephant is a package for the analysis of neurophysiology data.',
+    ('index',
+     'Elephant',
+     u'Elephant Documentation',
+     authors,
+     'Elephant',
+     'Elephant is a package for the analysis of neurophysiology data.',
      'Miscellaneous'),
 ]
 
@@ -245,7 +255,7 @@ texinfo_documents = [
 #texinfo_show_urls = 'footnote'
 
 
-# -- Options for Epub output ---------------------------------------------------
+# -- Options for Epub output ---------------------------------------------
 
 # Bibliographic Dublin Core info.
 epub_title = project
@@ -295,9 +305,11 @@ intersphinx_mapping = {'http://docs.python.org/': None}
 mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
 
 # Remove the copyright notice from docstrings:
+
+
 def process_docstring_remove_copyright(app, what, name, obj, options, lines):
     copyright_line = None
-    for i,line in enumerate(lines):
+    for i, line in enumerate(lines):
         if line.startswith(':copyright:'):
             copyright_line = i
             break
@@ -307,4 +319,5 @@ def process_docstring_remove_copyright(app, what, name, obj, options, lines):
 
 
 def setup(app):
-    app.connect('autodoc-process-docstring', process_docstring_remove_copyright)
+    app.connect('autodoc-process-docstring',
+                process_docstring_remove_copyright)

+ 6 - 3
code/elephant/doc/developers_guide.rst

@@ -181,8 +181,10 @@ Making a release
 
 .. Add a section in /doc/releases/<version>.rst for the release.
 
-First check that the version string (in :file:`elephant/__init__.py`, :file:`setup.py`,
-:file:`doc/conf.py` and :file:`doc/install.rst`) is correct.
+First, check that the version string (in :file:`elephant/__init__.py`, :file:`setup.py`,
+:file:`doc/conf.py`, and :file:`doc/install.rst`) is correct.
+
+Second, check that the copyright statement (in :file:`LICENCE.txt`, :file:`README.md`, and :file:`doc/conf.py`) is correct.
 
 To build a source package::
 
@@ -198,7 +200,8 @@ Finally, tag the release in the Git repository and push it::
 
     $ git tag <version>
     $ git push --tags upstream
-    
+
+Here, version should be of the form `vX.Y.Z`.
 
 .. make a release branch
 

+ 5 - 5
code/elephant/doc/environment.yml

@@ -3,12 +3,12 @@ dependencies:
 - libgfortran=1.0=0
 - alabaster=0.7.7=py35_0
 - babel=2.2.0=py35_0
-- docutils=0.12=py35_0
+- docutils
 - jinja2=2.8=py35_0
 - markupsafe=0.23=py35_0
 - mkl=11.3.1=0
-- numpy=1.10.4=py35_0
-- numpydoc=0.5=py35_1
+- numpy
+- numpydoc
 - openssl=1.0.2g=0
 - pip=8.1.1=py35_0
 - pygments=2.1.1=py35_0
@@ -28,7 +28,7 @@ dependencies:
 - xz=5.0.5=1
 - zlib=1.2.8=0
 - pip:
-  - https://github.com/NeuralEnsemble/python-neo/archive/snapshot-20150821.zip
+  - neo
   - quantities
-  - sphinx-rtd-theme==0.1.9
+  - sphinx-rtd-theme
  

+ 2 - 2
code/elephant/doc/install.rst

@@ -73,8 +73,8 @@ To download and install manually, download the latest package from http://pypi.p
 
 Then::
 
-    $ tar xzf elephant-0.4.1.tar.gz
-    $ cd elephant-0.4.1
+    $ tar xzf elephant-0.6.0.tar.gz
+    $ cd elephant-0.6.0
     $ python setup.py install
     
 or::

+ 3 - 2
code/elephant/doc/modules.rst

@@ -3,11 +3,12 @@ Function Reference by Module
 ****************************
 
 .. toctree::
-   :maxdepth: 2
+   :maxdepth: 1
 
    reference/statistics
    reference/signal_processing
    reference/spectral
+   reference/current_source_density
    reference/kernels
    reference/spike_train_dissimilarity
    reference/sta
@@ -15,10 +16,10 @@ Function Reference by Module
    reference/unitary_event_analysis
    reference/cubic
    reference/asset
+   reference/cell_assembly_detection
    reference/spike_train_generation
    reference/spike_train_surrogates
    reference/conversion
-   reference/csd
    reference/neo_tools
    reference/pandas_bridge
 

+ 1 - 0
code/elephant/doc/reference/spike_train_correlation.rst

@@ -10,3 +10,4 @@ Spike train correlation
 
 .. automodule:: elephant.spike_train_correlation
    :members:
+   :exclude-members: cch, sttc

+ 1 - 1
code/elephant/doc/reference/spike_train_dissimilarity.rst

@@ -1,5 +1,5 @@
 =================================================
-Spike Train Dissimilarity / Spike Train Synchrony
+Spike train dissimilarity / spike train synchrony
 =================================================
 
 

+ 87 - 11
code/elephant/doc/release_notes.rst

@@ -2,20 +2,95 @@
 Release Notes
 *************
 
+Elephant 0.6.0 release notes
+============================
+October 12th 2018
+
+New functions
+-------------
+* `cell_assembly_detection` module
+    * New function to detect higher-order correlation structures such as patterns in parallel spike trains based on Russo et al, 2017.
+*  **wavelet_transform()** function in `signal_prosessing.py` module
+    * Function for computing wavelet transform of a given time series based on Le van Quyen et al. (2001)
+
+Other changes
+-------------
+* Switched to multiple `requirements.txt` files which are directly read into the `setup.py`
+* `instantaneous_rate()` accepts now list of spiketrains
+* Minor bug fixes  
+
+
+Elephant 0.5.0 release notes
+============================
+April 4nd 2018
+
+New functions
+-------------
+* `change_point_detection` module:
+    * New function to detect changes in the firing rate
+* `spike_train_correlation` module:
+    * New function to calculate the spike time tiling coefficient
+* `phase_analysis` module:
+    * New function to extract spike-triggered phases of an AnalogSignal
+* `unitary_event_analysis` module:
+    * Added new unit test to the UE function to verify the method based on data of a recent [Re]Science publication
+  
+Other changes
+-------------
+* Minor bug fixes
+  
+  
+Elephant 0.4.3 release notes
+============================
+March 2nd 2018
+
+Other changes
+-------------
+* Bug fixes in `spade` module:
+    * Fixed an incompatibility with the latest version of an external library
+
+  
+Elephant 0.4.2 release notes
+============================
+March 1st 2018
+
+New functions
+-------------
+* `spike_train_generation` module:
+    * **inhomogeneous_poisson()** function
+* Modules for Spatio Temporal Pattern Detection (SPADE) `spade_src`:
+    * Module SPADE: `spade.py`
+* Module `statistics.py`:
+    * Added CV2 (coefficient of variation for non-stationary time series)
+* Module `spike_train_correlation.py`:
+    * Added normalization in **cross-correlation histogram()** (CCH)
+
+Other changes
+-------------
+* Adapted the `setup.py` to automatically install the spade modules including the compiled `C` files `fim.so`
+* Included testing environment for MPI in `travis.yml`
+* Changed function arguments  in `current_source_density.py` to `neo.AnalogSignal` instead list of `neo.AnalogSignal` objects
+* Fixes to travis and setup configuration files
+* Fixed bug in ISI function `isi()`, `statistics.py` module
+* Fixed bug in `dither_spikes()`, `spike_train_surrogates.py`
+* Minor bug fixes
+ 
+ 
 Elephant 0.4.1 release notes
 ============================
 March 23rd 2017
 
 Other changes
-=============
+-------------
 * Fix in `setup.py` to correctly import the current source density module
 
+
 Elephant 0.4.0 release notes
 ============================
 March 22nd 2017
 
 New functions
-=============
+-------------
 * `spike_train_generation` module:
     * peak detection: **peak_detection()**
 * Modules for Current Source Density: `current_source_density_src`
@@ -23,14 +98,14 @@ New functions
     * Module for Inverse Current Source Density: `icsd.py`
 
 API changes
-===========
+-----------
 * Interoperability between Neo 0.5.0 and Elephant
     * Elephant has adapted its functions to the changes in Neo 0.5.0,
       most of the functionality behaves as before
     * See Neo documentation for recent changes: http://neo.readthedocs.io/en/latest/whatisnew.html
 
 Other changes
-=============
+-------------
 * Fixes to travis and setup configuration files.
 * Minor bug fixes.
 * Added module `six` for Python 2.7 backwards compatibility
@@ -41,7 +116,7 @@ Elephant 0.3.0 release notes
 April 12st 2016
 
 New functions
-=============
+-------------
 * `spike_train_correlation` module:
     * cross correlation histogram: **cross_correlation_histogram()**
 * `spike_train_generation` module:
@@ -57,11 +132,11 @@ New functions
 * Analysis of Sequences of Synchronous EvenTs (ASSET): `asset` module
 
 API changes
-===========
+-----------
 * Function **instantaneous_rate()** now uses kernels as objects defined in the `kernels` module. The previous implementation of the function using the `make_kernel()` function is deprecated, but still temporarily available as `oldfct_instantaneous_rate()`.
 
 Other changes
-=============
+-------------
 * Fixes to travis and readthedocs configuration files.
 
 
@@ -69,6 +144,8 @@ Elephant 0.2.1 release notes
 ============================
 February 18th 2016
 
+Other changes
+-------------
 Minor bug fixes.
 
 
@@ -77,8 +154,7 @@ Elephant 0.2.0 release notes
 September 22nd 2015
 
 New functions
-=============
-
+-------------
 * Added covariance function **covariance()** in the `spike_train_correlation` module
 * Added complexity pdf **complexity_pdf()** in the `statistics` module
 * Added spike train extraction from analog signals via threshold detection the in `spike_train_generation` module
@@ -86,11 +162,11 @@ New functions
 * Added **Cumulant Based Inference for higher-order of Correlation (CuBIC)** in the `cubic` module for correlation analysis of parallel recorded spike trains
 
 API changes
-===========
+-----------
 * **Optimized kernel bandwidth** in `rate_estimation` function: Calculates the optimized kernel width when the paramter kernel width is specified as `auto`
 
 Other changes
-=============
+-------------
 * **Optimized creation of sparse matrices**: The creation speed of the sparse matrix inside the `BinnedSpikeTrain` class is optimized
 * Added **Izhikevich neuron simulator** in the `make_spike_extraction_test_data` module
 * Minor improvements to the test and continous integration infrastructure

+ 7 - 3
code/elephant/elephant/__init__.py

@@ -2,7 +2,7 @@
 """
 Elephant is a package for the analysis of neurophysiology data, based on Neo.
 
-:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:copyright: Copyright 2014-2018 by the Elephant team, see AUTHORS.txt.
 :license: Modified BSD, see LICENSE.txt for details.
 """
 
@@ -17,9 +17,13 @@ from . import (statistics,
                spike_train_surrogates,
                signal_processing,
                current_source_density,
+               change_point_detection,
+               phase_analysis,
                sta,
                conversion,
-               neo_tools)
+               neo_tools,
+               spade,
+               cell_assembly_detection)
 
 try:
     from . import pandas_bridge
@@ -27,4 +31,4 @@ try:
 except ImportError:
     pass
 
-__version__ = "0.4.1"
+__version__ = "0.6.0"

+ 26 - 25
code/elephant/elephant/asset.py

@@ -41,7 +41,6 @@ References:
 [1] Torre, Canova, Denker, Gerstein, Helias, Gruen (submitted)
 """
 
-
 import numpy as np
 import scipy.spatial
 import scipy.stats
@@ -52,6 +51,7 @@ import elephant.conversion as conv
 import elephant.spike_train_surrogates as spike_train_surrogates
 from sklearn.cluster import dbscan as dbscan
 
+
 # =============================================================================
 # Some Utility Functions to be dealt with in some way or another
 # =============================================================================
@@ -78,7 +78,7 @@ def _signals_same_tstart(signals):
     signals : list
         a list of signals (e.g. AnalogSignals or SpikeTrains) having
         attribute `t_start`
-
+        
     Returns
     -------
     t_start : Quantity
@@ -248,7 +248,7 @@ def _transactions(spiketrains, binsize, t_start=None, t_stop=None, ids=None):
 
     # Compute and return the transaction list
     return [[train_id for train_id, b in zip(ids, filled_bins)
-            if bin_id in b] for bin_id in _xrange(Nbins)]
+             if bin_id in b] for bin_id in _xrange(Nbins)]
 
 
 def _analog_signal_step_interp(signal, times):
@@ -379,6 +379,7 @@ def _time_slice(signal, t_start, t_stop):
 
     return sliced_signal
 
+
 # =============================================================================
 # HERE ASSET STARTS
 # =============================================================================
@@ -420,12 +421,12 @@ def intersection_matrix(
         type of normalization to be applied to each entry [i,j] of the
         intersection matrix. Given the sets s_i, s_j of neuron ids in the
         bins i, j respectively, the normalisation coefficient can be:
-        
+
             * norm = 0 or None: no normalisation (row counts)
             * norm = 1: len(intersection(s_i, s_j))
             * norm = 2: sqrt(len(s_1) * len(s_2))
             * norm = 3: len(union(s_i, s_j))
-            
+
         Default: None
 
     Returns
@@ -455,7 +456,7 @@ def intersection_matrix(
         if not (st.t_stop > t_stop_max or
                 _quantities_almost_equal(st.t_stop, t_stop_max)):
             msg = 'SpikeTrain %d is shorter than the required time ' % i + \
-                'span: t_stop (%s) < %s' % (st.t_stop, t_stop_max)
+                  'span: t_stop (%s) < %s' % (st.t_stop, t_stop_max)
             raise ValueError(msg)
 
     # For both x and y axis, cut all SpikeTrains between t_start and t_stop
@@ -628,7 +629,7 @@ def mask_matrices(matrices, thresholds):
 
     # Replace nans, coming from False * np.inf, with 0s
     # (trick to find nans in masked: a number is nan if it's not >= - np.inf)
-    mask[True - (mask >= -np.inf)] = False
+    mask[np.logical_xor(True, (mask >= -np.inf))] = False
 
     return np.array(mask, dtype=bool)
 
@@ -712,7 +713,7 @@ def cluster_matrix_entries(mat, eps=10, min=2, stretch=5):
     a neighbourhood if at least one of them has a distance not larger than
     eps from the others, and if they are at least min. Overlapping
     neighborhoods form a cluster.
-    
+
         * Clusters are assigned integers from 1 to the total number k of
           clusters
         * Unclustered ("isolated") positive elements of mat are
@@ -873,10 +874,10 @@ def probability_matrix_montecarlo(
     pmat = np.array(np.zeros(imat.shape), dtype=int)
     if verbose:
         print('pmat_bootstrap(): begin of bootstrap...')
-    for i in _xrange(n_surr):                      # For each surrogate id i
+    for i in _xrange(n_surr):  # For each surrogate id i
         if verbose:
             print('    surr %d' % i)
-        surrs_i = [st[i] for st in surrs]         # Take each i-th surrogate
+        surrs_i = [st[i] for st in surrs]  # Take each i-th surrogate
         imat_surr, xx, yy = intersection_matrix(  # compute the related imat
             surrs_i, binsize=binsize, dt=dt,
             t_start_x=t_start_x, t_start_y=t_start_y)
@@ -897,7 +898,7 @@ def probability_matrix_analytical(
 
     The approximation is analytical and works under the assumptions that the
     input spike trains are independent and Poisson. It works as follows:
-    
+
         * Bin each spike train at the specified binsize: this yields a binary
           array of 1s (spike in bin) and 0s (no spike in bin) (clipping used)
         * If required, estimate the rate profile of each spike train by 
@@ -1006,7 +1007,7 @@ def probability_matrix_analytical(
         # Reshape all rates to one-dimensional array object (e.g. AnalogSignal)
         for i, rate in enumerate(fir_rates):
             if len(rate.shape) == 2:
-                fir_rates[i] = rate.reshape((-1, ))
+                fir_rates[i] = rate.reshape((-1,))
             elif len(rate.shape) > 2:
                 raise ValueError(
                     'elements in fir_rates have too many dimensions')
@@ -1079,17 +1080,17 @@ def _jsf_uniform_orderstat_3d(u, alpha, n):
     '''
     Considered n independent random variables X1, X2, ..., Xn all having
     uniform distribution in the interval (alpha, 1):
-    
+
     .. centered::  Xi ~ Uniform(alpha, 1),
-    
+
     with alpha \in [0, 1), and given a 3D matrix U = (u_ijk) where each U_ij
     is an array of length d: U_ij = [u0, u1, ..., u_{d-1}] of
     quantiles, with u1 <= u2 <= ... <= un, computes the joint survival function
     (jsf) of the d highest order statistics (U_{n-d+1}, U_{n-d+2}, ..., U_n),
     where U_i := "i-th highest X's" at each u_ij, i.e.:
-    
+
     .. centered::  jsf(u_ij) = Prob(U_{n-k} >= u_ijk, k=0,1,..., d-1).
-    
+
 
     Arguments
     ---------
@@ -1415,7 +1416,7 @@ def sse_intersection(sse1, sse2, intersection='linkwise'):
     consisting of a pool of positions (iK, jK) of matrix entries and
     associated synchronous events SK, finds the intersection among them.
     The intersection can be performed 'pixelwise' or 'linkwise'.
-        
+
         * if 'pixelwise', it yields a new SSE which retains only events in sse1
           whose pixel position matches a pixel position in sse2. This operation
           is not symmetric: intersection(sse1, sse2) != intersection(sse2, sse1).
@@ -1425,9 +1426,9 @@ def sse_intersection(sse1, sse2, intersection='linkwise'):
           intersection(sse1, sse2) = intersection(sse2, sse1).
 
     Both sse1 and sse2 must be provided as dictionaries of the type
-    
+
     .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
-    
+
     where each i, j is an integer and each S is a set of neuron ids.
     (See also: extract_sse() that extracts SSEs from given spiketrains).
 
@@ -1535,9 +1536,9 @@ def _remove_empty_events(sse):
     copy of sse where all empty events have been removed.
 
     sse must be provided as a dictionary of type
-    
+
     .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
-    
+
     where each i, j is an integer and each S is a set of neuron ids.
     (See also: extract_sse() that extracts SSEs from given spiketrains).
 
@@ -1571,9 +1572,9 @@ def sse_isequal(sse1, sse2):
     do not belong to sse1 (i.e. sse1 and sse2 are not identical)
 
     Both sse1 and sse2 must be provided as dictionaries of the type
-    
+
     .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
-    
+
     where each i, j is an integer and each S is a set of neuron ids.
     (See also: extract_sse() that extracts SSEs from given spiketrains).
 
@@ -1606,9 +1607,9 @@ def sse_isdisjoint(sse1, sse2):
     associated to common pixels are disjoint.
 
     Both sse1 and sse2 must be provided as dictionaries of the type
-    
+
     .. centered:: {(i1, j1): S1, (i2, j2): S2, ..., (iK, jK): SK},
-    
+
     where each i, j is an integer and each S is a set of neuron ids.
     (See also: extract_sse() that extracts SSEs from given spiketrains).
 

+ 3 - 3
code/elephant/elephant/conversion.py

@@ -581,10 +581,10 @@ class BinnedSpikeTrain(object):
             are returned as a quantity array.
 
         """
-        return pq.Quantity(np.linspace(self.t_start.magnitude,
-                                       self.t_stop.magnitude,
+        return pq.Quantity(np.linspace(self.t_start.rescale('s').magnitude,
+                                       self.t_stop.rescale('s').magnitude,
                                        self.num_bins + 1, endpoint=True),
-                           units=self.binsize.units)
+                           units='s').rescale(self.binsize.units)
 
     @property
     def bin_centers(self):

+ 8 - 12
code/elephant/elephant/current_source_density.py

@@ -68,7 +68,7 @@ def estimate_csd(lfp, coords=None, method=None,
 
     Parameters
     ----------
-    lfp : list(neo.AnalogSignal type objects)
+    lfp : neo.AnalogSignal
         positions of electrodes can be added as neo.RecordingChannel
         coordinate or sent externally as a func argument (See coords)
     coords : [Optional] corresponding spatial coordinates of the electrodes
@@ -95,7 +95,7 @@ def estimate_csd(lfp, coords=None, method=None,
     Returns
     -------
     Estimated CSD
-       neo.AnalogSignal Object
+       neo.AnalogSignal object
        annotated with the spatial coordinates
 
     Raises
@@ -109,12 +109,9 @@ def estimate_csd(lfp, coords=None, method=None,
         Invalid cv_param argument passed
     """
     if not isinstance(lfp, neo.AnalogSignal):
-        raise TypeError('Parameter `lfp` must be a list(neo.AnalogSignal \
-                         type objects')
+        raise TypeError('Parameter `lfp` must be a neo.AnalogSignal object')
     if coords is None:
         coords = lfp.channel_index.coordinates
-        # for ii in lfp:
-        #     coords.append(ii.channel_index.coordinate.rescale(pq.mm))
     else:
         scaled_coords = []
         for coord in coords:
@@ -126,7 +123,7 @@ def estimate_csd(lfp, coords=None, method=None,
         coords = scaled_coords
     if method is None:
         raise ValueError('Must specify a method of CSD implementation')
-    if len(coords) != len(lfp):
+    if len(coords) != lfp.shape[1]:
         raise ValueError('Number of signals and coords is not same')
     for ii in coords:  # CHECK for Dimensionality of electrodes
         if len(ii) > 3:
@@ -148,7 +145,7 @@ def estimate_csd(lfp, coords=None, method=None,
         kernel_method = getattr(KCSD, method)  # fetch the class 'KCSD1D'
         lambdas = kwargs.pop('lambdas', None)
         Rs = kwargs.pop('Rs', None)
-        k = kernel_method(np.array(coords), input_array, **kwargs)
+        k = kernel_method(np.array(coords), input_array.T, **kwargs)
         if process_estimate:
             k.cross_validate(lambdas, Rs)
         estm_csd = k.values()
@@ -187,7 +184,7 @@ def estimate_csd(lfp, coords=None, method=None,
         lfp = neo.AnalogSignal(np.asarray(lfp).T, units=lfp.units,
                                     sampling_rate=lfp.sampling_rate)
         csd_method = getattr(icsd, method)  # fetch class from icsd.py file
-        csd_estimator = csd_method(lfp=lfp.magnitude.T * lfp.units,
+        csd_estimator = csd_method(lfp=lfp.magnitude * lfp.units,
                                    coord_electrode=coords.flatten(),
                                    **kwargs)
         csd_pqarr = csd_estimator.get_csd()
@@ -238,7 +235,7 @@ def generate_lfp(csd_profile, ele_xx, ele_yy=None, ele_zz=None,
 
         Returns
         -------
-        LFP : list(neo.AnalogSignal type objects)
+        LFP : neo.AnalogSignal object
            The potentials created by the csd profile at the electrode positions
            The electrode postions are attached as RecordingChannel's coordinate
     """
@@ -324,8 +321,7 @@ def generate_lfp(csd_profile, ele_xx, ele_yy=None, ele_zz=None,
     ch = neo.ChannelIndex(index=range(len(pots)))
     for ii in range(len(pots)):
         lfp.append(pots[ii])
-    # lfp = neo.AnalogSignal(lfp, sampling_rate=1000*pq.Hz, units='mV')
-    asig = neo.AnalogSignal(lfp, sampling_rate=pq.kHz, units='mV')
+    asig = neo.AnalogSignal(np.array(lfp).T, sampling_rate=pq.kHz, units='mV')
     ch.coordinates = ele_pos
     ch.analogsignals.append(asig)
     ch.create_relationship()

+ 135 - 0
code/elephant/elephant/signal_processing.py

@@ -239,6 +239,141 @@ def butter(signal, highpass_freq=None, lowpass_freq=None, order=4,
         return filtered_data
 
 
+def wavelet_transform(signal, freq, nco=6.0, fs=1.0, zero_padding=True):
+    """
+    Compute the wavelet transform of a given signal with Morlet mother wavelet.
+    The parametrization of the wavelet is based on [1].
+
+    Parameters
+    ----------
+    signal : neo.AnalogSignal or array_like
+        Time series data to be wavelet-transformed. When multi-dimensional
+        array_like is given, the time axis must be the last dimension of
+        the array_like.
+    freq : float or list of floats
+        Center frequency of the Morlet wavelet in Hz. Multiple center
+        frequencies can be given as a list, in which case the function
+        computes the wavelet transforms for all the given frequencies at once.
+    nco : float (optional)
+        Size of the mother wavelet (approximate number of oscillation cycles
+        within a wavelet; related to the wavelet number w as w ~ 2 pi nco / 6),
+        as defined in [1]. A larger nco value leads to a higher frequency
+        resolution and a lower temporal resolution, and vice versa. Typically
+        used values are in a range of 3 - 8, but one should be cautious when
+        using a value smaller than ~ 6, in which case the admissibility of the
+        wavelet is not ensured (cf. [2]). Default value is 6.0.
+    fs : float (optional)
+        Sampling rate of the input data in Hz. When `signal` is given as an
+        AnalogSignal, the sampling frequency is taken from its attribute and
+        this parameter is ignored. Default value is 1.0.
+    zero_padding : bool (optional)
+        Specifies whether the data length is extended to the least power of
+        2 greater than the original length, by padding zeros to the tail, for
+        speeding up the computation. In the case of True, the extended part is
+        cut out from the final result before returned, so that the output
+        has the same length as the input. Default is True.
+
+    Returns
+    -------
+    signal_wt: complex array
+        Wavelet transform of the input data. When `freq` was given as a list,
+        the way how the wavelet transforms for different frequencies are
+        returned depends on the input type. When the input was an AnalogSignal
+        of shape (Nt, Nch), where Nt and Nch are the numbers of time points and
+        channels, respectively, the returned array has a shape (Nt, Nch, Nf),
+        where Nf = `len(freq)`, such that the last dimension indexes the
+        frequencies. When the input was an array_like of shape
+        (a, b, ..., c, Nt), the returned array has a shape
+        (a, b, ..., c, Nf, Nt), such that the second last dimension indexes the
+        frequencies.
+        To summarize, `signal_wt.ndim` = `signal.ndim` + 1, with the additional
+        dimension in the last axis (for AnalogSignal input) or the second last
+        axis (for array_like input) indexing the frequencies.
+
+    Raises
+    ------
+    ValueError
+        If `freq` (or one of the values in `freq` when it is a list) is greater
+        than the half of `fs`, or `nco` is not positive.
+
+    References
+    ----------
+    1. Le van Quyen et al. J Neurosci Meth 111:83-98 (2001)
+    2. Farge, Annu Rev Fluid Mech 24:395-458 (1992)
+    """
+    def _morlet_wavelet_ft(freq, nco, fs, n):
+        # Generate the Fourier transform of Morlet wavelet as defined
+        # in Le van Quyen et al. J Neurosci Meth 111:83-98 (2001).
+        sigma = nco / (6. * freq)
+        freqs = np.fft.fftfreq(n, 1.0 / fs)
+        heaviside = np.array(freqs > 0., dtype=np.float)
+        ft_real = np.sqrt(2 * np.pi * freq) * sigma * np.exp(
+            -2 * (np.pi * sigma * (freqs - freq)) ** 2) * heaviside * fs
+        ft_imag = np.zeros_like(ft_real)
+        return ft_real + 1.0j * ft_imag
+
+    data = np.asarray(signal)
+    # When the input is AnalogSignal, the axis for time index (i.e. the
+    # first axis) needs to be rolled to the last
+    if isinstance(signal, neo.AnalogSignal):
+        data = np.rollaxis(data, 0, data.ndim)
+
+    # When the input is AnalogSignal, use its attribute to specify the
+    # sampling frequency
+    if hasattr(signal, 'sampling_rate'):
+        fs = signal.sampling_rate
+    if isinstance(fs, pq.quantity.Quantity):
+        fs = fs.rescale('Hz').magnitude
+
+    if isinstance(freq, (list, tuple, np.ndarray)):
+        freqs = np.asarray(freq)
+    else:
+        freqs = np.array([freq,])
+    if isinstance(freqs[0], pq.quantity.Quantity):
+        freqs = [f.rescale('Hz').magnitude for f in freqs]
+
+    # check whether the given central frequencies are less than the
+    # Nyquist frequency of the signal
+    if np.any(freqs >= fs / 2):
+        raise ValueError("`freq` must be less than the half of " +
+                         "the sampling rate `fs` = {} Hz".format(fs))
+
+    # check if nco is positive
+    if nco <= 0:
+        raise ValueError("`nco` must be positive")
+
+    n_orig = data.shape[-1]
+    if zero_padding:
+        n = 2 ** (int(np.log2(n_orig)) + 1)
+    else:
+        n = n_orig
+
+    # generate Morlet wavelets (in the frequency domain)
+    wavelet_fts = np.empty([len(freqs), n], dtype=np.complex)
+    for i, f in enumerate(freqs):
+        wavelet_fts[i] = _morlet_wavelet_ft(f, nco, fs, n)
+
+    # perform wavelet transform by convoluting the signal with the wavelets
+    if data.ndim == 1:
+        data = np.expand_dims(data, 0)
+    data = np.expand_dims(data, data.ndim-1)
+    data = np.fft.ifft(np.fft.fft(data, n) * wavelet_fts)
+    signal_wt = data[..., 0:n_orig]
+
+    # reshape the result array according to the input
+    if isinstance(signal, neo.AnalogSignal):
+        signal_wt = np.rollaxis(signal_wt, -1)
+        if not isinstance(freq, (list, tuple, np.ndarray)):
+            signal_wt = signal_wt[..., 0]
+    else:
+        if signal.ndim == 1:
+            signal_wt = signal_wt[0]
+        if not isinstance(freq, (list, tuple, np.ndarray)):
+            signal_wt = signal_wt[..., 0, :]
+
+    return signal_wt
+
+
 def hilbert(signal, N='nextpow'):
     '''
     Apply a Hilbert transform to an AnalogSignal object in order to obtain its

+ 225 - 128
code/elephant/elephant/spike_train_correlation.py

@@ -249,8 +249,8 @@ def __calculate_correlation_or_covariance(binned_sts, binary, corrcoef_norm):
 
 
 def cross_correlation_histogram(
-        binned_st1, binned_st2, window='full', border_correction=False, binary=False,
-        kernel=None, method='speed'):
+        binned_st1, binned_st2, window='full', border_correction=False,
+        binary=False, kernel=None, method='speed', cross_corr_coef=False):
     """
     Computes the cross-correlation histogram (CCH) between two binned spike
     trains binned_st1 and binned_st2.
@@ -260,7 +260,7 @@ def cross_correlation_histogram(
     binned_st1, binned_st2 : BinnedSpikeTrain
         Binned spike trains to cross-correlate. The two spike trains must have
         same t_start and t_stop
-    window : string or list (optional)
+    window : string or list of integer (optional)
         ‘full’: This returns the crosscorrelation at each point of overlap,
         with an output shape of (N+M-1,). At the end-points of the
         cross-correlogram, the signals do not overlap completely, and
@@ -269,12 +269,11 @@ def cross_correlation_histogram(
         The cross-correlation product is only given for points where the
         signals overlap completely.
         Values outside the signal boundary have no effect.
+        list of integer (window[0]=minimum lag, window[1]=maximum lag): The
+        entries of window are two integers representing the left and
+        right extremes (expressed as number of bins) where the
+        crosscorrelation is computed
         Default: 'full'
-        list of integer of of quantities (window[0]=minimum, window[1]=maximum
-        lag): The  entries of window can be integer (number of bins) or
-        quantities (time units of the lag), in the second case they have to be
-        a multiple of the binsize
-        Default: 'Full'
     border_correction : bool (optional)
         whether to correct for the border effect. If True, the value of the
         CCH at bin b (for b=-H,-H+1, ...,H, where H is the CCH half-length)
@@ -307,18 +306,24 @@ def cross_correlation_histogram(
         implementation to calculate the correlation based on sparse matrices,
         which is more memory efficient but slower than the "speed" option.
         Default: "speed"
+    cross_corr_coef : bool (optional)
+        Normalizes the CCH to obtain the cross-correlation  coefficient
+        function ranging from -1 to 1 according to Equation (5.10) in
+        "Analysis of parallel spike trains", 2010, Gruen & Rotter, Vol 7
 
     Returns
     -------
     cch : AnalogSignal
-        Containing the cross-correlation histogram between binned_st1 and binned_st2.
+        Containing the cross-correlation histogram between binned_st1 and
+        binned_st2.
 
         The central bin of the histogram represents correlation at zero
         delay. Offset bins correspond to correlations at a delay equivalent
-        to the difference between the spike times of binned_st1 and those of binned_st2: an
-        entry at positive lags corresponds to a spike in binned_st2 following a
-        spike in binned_st1 bins to the right, and an entry at negative lags
-        corresponds to a spike in binned_st1 following a spike in binned_st2.
+        to the difference between the spike times of binned_st1 and those of
+        binned_st2: an entry at positive lags corresponds to a spike in
+        binned_st2 following a spike in binned_st1 bins to the right, and an
+        entry at negative lags corresponds to a spike in binned_st1 following
+        a spike in binned_st2.
 
         To illustrate this definition, consider the two spike trains:
         binned_st1: 0 0 0 0 1 0 0 0 0 0 0
@@ -351,7 +356,8 @@ def cross_correlation_histogram(
                     10. * pq.Hz, t_start=0 * pq.ms, t_stop=5000 * pq.ms),
                 binsize=5. * pq.ms)
 
-        >>> cc_hist = elephant.spike_train_correlation.cross_correlation_histogram(
+        >>> cc_hist = \
+            elephant.spike_train_correlation.cross_correlation_histogram(
                 binned_st1, binned_st2, window=[-30,30],
                 border_correction=False,
                 binary=False, kernel=None, method='memory')
@@ -369,6 +375,23 @@ def cross_correlation_histogram(
     -----
     cch
     """
+
+    def _cross_corr_coef(cch_result, binned_st1, binned_st2):
+        # Normalizes the CCH to obtain the cross-correlation
+        # coefficient function ranging from -1 to 1
+        N = max(binned_st1.num_bins, binned_st2.num_bins)
+        Nx = len(binned_st1.spike_indices[0])
+        Ny = len(binned_st2.spike_indices[0])
+        spmat = [binned_st1.to_sparse_array(), binned_st2.to_sparse_array()]
+        bin_counts_unique = []
+        for s in spmat:
+            bin_counts_unique.append(s.data)
+        ii = np.dot(bin_counts_unique[0], bin_counts_unique[0])
+        jj = np.dot(bin_counts_unique[1], bin_counts_unique[1])
+        rho_xy = (cch_result - Nx * Ny / N) / \
+            np.sqrt((ii - Nx**2. / N) * (jj - Ny**2. / N))
+        return rho_xy
+
     def _border_correction(counts, max_num_bins, l, r):
         # Correct the values taking into account lacking contributes
         # at the edges
@@ -394,55 +417,12 @@ def cross_correlation_histogram(
         # Smooth the cross-correlation histogram with the kern
         return np.convolve(counts, kern, mode='same')
 
-    def _cch_memory(binned_st1, binned_st2, win, border_corr, binary, kern):
+    def _cch_memory(binned_st1, binned_st2, left_edge, right_edge,
+                    border_corr, binary, kern):
 
         # Retrieve unclipped matrix
         st1_spmat = binned_st1.to_sparse_array()
         st2_spmat = binned_st2.to_sparse_array()
-        binsize = binned_st1.binsize
-        max_num_bins = max(binned_st1.num_bins, binned_st2.num_bins)
-
-        # Set the time window in which is computed the cch
-        if not isinstance(win, str):
-            # Window parameter given in number of bins (integer)
-            if isinstance(win[0], int) and isinstance(win[1], int):
-                # Check the window parameter values
-                if win[0] >= win[1] or win[0] <= -max_num_bins \
-                        or win[1] >= max_num_bins:
-                    raise ValueError(
-                        "The window exceeds the length of the spike trains")
-                # Assign left and right edges of the cch
-                l, r = win[0], win[1]
-            # Window parameter given in time units
-            else:
-                # Check the window parameter values
-                if win[0].rescale(binsize.units).magnitude % \
-                    binsize.magnitude != 0 or win[1].rescale(
-                        binsize.units).magnitude % binsize.magnitude != 0:
-                    raise ValueError(
-                        "The window has to be a multiple of the binsize")
-                if win[0] >= win[1] or win[0] <= -max_num_bins * binsize \
-                        or win[1] >= max_num_bins * binsize:
-                    raise ValueError("The window exceeds the length of the"
-                                     " spike trains")
-                # Assign left and right edges of the cch
-                l, r = int(win[0].rescale(binsize.units) / binsize), int(
-                    win[1].rescale(binsize.units) / binsize)
-        # Case without explicit window parameter
-        elif window == 'full':
-            # cch computed for all the possible entries
-            # Assign left and right edges of the cch
-            r = binned_st2.num_bins - 1
-            l = - binned_st1.num_bins + 1
-            # cch compute only for the entries that completely overlap
-        elif window == 'valid':
-            # cch computed only for valid entries
-            # Assign left and right edges of the cch
-            r = max(binned_st2.num_bins - binned_st1.num_bins, 0)
-            l = min(binned_st2.num_bins - binned_st1.num_bins, 0)
-        # Check the mode parameter
-        else:
-            raise KeyError("Invalid window parameter")
 
         # For each row, extract the nonzero column indices
         # and the corresponding # data in the matrix (for performance reasons)
@@ -461,25 +441,28 @@ def cross_correlation_histogram(
         # Initialize the counts to an array of zeroes,
         # and the bin IDs to integers
         # spanning the time axis
-        counts = np.zeros(np.abs(l) + np.abs(r) + 1)
-        bin_ids = np.arange(l, r + 1)
-        # Compute the CCH at lags in l,...,r only
+        counts = np.zeros(np.abs(left_edge) + np.abs(right_edge) + 1)
+        bin_ids = np.arange(left_edge, right_edge + 1)
+        # Compute the CCH at lags in left_edge,...,right_edge only
         for idx, i in enumerate(st1_bin_idx_unique):
-            il = np.searchsorted(st2_bin_idx_unique, l + i)
-            ir = np.searchsorted(st2_bin_idx_unique, r + i, side='right')
+            il = np.searchsorted(st2_bin_idx_unique, left_edge + i)
+            ir = np.searchsorted(st2_bin_idx_unique,
+                                 right_edge + i, side='right')
             timediff = st2_bin_idx_unique[il:ir] - i
-            assert ((timediff >= l) & (timediff <= r)).all(), 'Not all the '
+            assert ((timediff >= left_edge) & (
+                timediff <= right_edge)).all(), 'Not all the '
             'entries of cch lie in the window'
-            counts[timediff + np.abs(l)] += (st1_bin_counts_unique[idx] *
-                                             st2_bin_counts_unique[il:ir])
+            counts[timediff + np.abs(left_edge)] += (
+                    st1_bin_counts_unique[idx] * st2_bin_counts_unique[il:ir])
             st2_bin_idx_unique = st2_bin_idx_unique[il:]
             st2_bin_counts_unique = st2_bin_counts_unique[il:]
         # Border correction
         if border_corr is True:
-            counts = _border_correction(counts, max_num_bins, l, r)
+            counts = _border_correction(
+                counts, max_num_bins, left_edge, right_edge)
         if kern is not None:
             # Smoothing
-            counts = _kernel_smoothing(counts, kern, l, r)
+            counts = _kernel_smoothing(counts, kern, left_edge, right_edge)
         # Transform the array count into an AnalogSignal
         cch_result = neo.AnalogSignal(
             signal=counts.reshape(counts.size, 1),
@@ -490,75 +473,34 @@ def cross_correlation_histogram(
         # central one
         return cch_result, bin_ids
 
-    def _cch_speed(binned_st1, binned_st2, win, border_corr, binary, kern):
+    def _cch_speed(binned_st1, binned_st2, left_edge, right_edge, cch_mode,
+                   border_corr, binary, kern):
 
-        # Retrieve the array of the binne spik train
+        # Retrieve the array of the binne spike train
         st1_arr = binned_st1.to_array()[0, :]
         st2_arr = binned_st2.to_array()[0, :]
-        binsize = binned_st1.binsize
 
         # Convert the to binary version
         if binary:
             st1_arr = np.array(st1_arr > 0, dtype=int)
             st2_arr = np.array(st2_arr > 0, dtype=int)
-        max_num_bins = max(len(st1_arr), len(st2_arr))
-
-        # Cross correlate the spiketrains
-
-        # Case explicit temporal window
-        if not isinstance(win, str):
-            # Window parameter given in number of bins (integer)
-            if isinstance(win[0], int) and isinstance(win[1], int):
-                # Check the window parameter values
-                if win[0] >= win[1] or win[0] <= -max_num_bins \
-                        or win[1] >= max_num_bins:
-                    raise ValueError(
-                        "The window exceed the length of the spike trains")
-                # Assign left and right edges of the cch
-                l, r = win
-            # Window parameter given in time units
-            else:
-                # Check the window parameter values
-                if win[0].rescale(binsize.units).magnitude % \
-                    binsize.magnitude != 0 or win[1].rescale(
-                        binsize.units).magnitude % binsize.magnitude != 0:
-                    raise ValueError(
-                        "The window has to be a multiple of the binsize")
-                if win[0] >= win[1] or win[0] <= -max_num_bins * binsize \
-                        or win[1] >= max_num_bins * binsize:
-                    raise ValueError("The window exceed the length of the"
-                                     " spike trains")
-                # Assign left and right edges of the cch
-                l, r = int(win[0].rescale(binsize.units) / binsize), int(
-                    win[1].rescale(binsize.units) / binsize)
-
-            # Zero padding
-            st1_arr = np.pad(
-                st1_arr, (int(np.abs(np.min([l, 0]))), np.max([r, 0])),
-                mode='constant')
+        if cch_mode == 'pad':
+            # Zero padding to stay between left_edge and right_edge
+            st1_arr = np.pad(st1_arr,
+                             (int(np.abs(np.min([left_edge, 0]))), np.max(
+                                 [right_edge, 0])),
+                             mode='constant')
             cch_mode = 'valid'
-        else:
-            # Assign the edges of the cch for the different mode parameters
-            if win == 'full':
-                # Assign left and right edges of the cch
-                r = binned_st2.num_bins - 1
-                l = - binned_st1.num_bins + 1
-            # cch compute only for the entries that completely overlap
-            elif win == 'valid':
-                # Assign left and right edges of the cch
-                r = max(binned_st2.num_bins - binned_st1.num_bins, 0)
-                l = min(binned_st2.num_bins - binned_st1.num_bins, 0)
-            cch_mode = win
-
         # Cross correlate the spike trains
         counts = np.correlate(st2_arr, st1_arr, mode=cch_mode)
-        bin_ids = np.r_[l:r + 1]
+        bin_ids = np.r_[left_edge:right_edge + 1]
         # Border correction
         if border_corr is True:
-            counts = _border_correction(counts, max_num_bins, l, r)
+            counts = _border_correction(
+                counts, max_num_bins, left_edge, right_edge)
         if kern is not None:
             # Smoothing
-            counts = _kernel_smoothing(counts, kern, l, r)
+            counts = _kernel_smoothing(counts, kern, left_edge, right_edge)
         # Transform the array count into an AnalogSignal
         cch_result = neo.AnalogSignal(
             signal=counts.reshape(counts.size, 1),
@@ -585,17 +527,172 @@ def cross_correlation_histogram(
     if not binned_st1.t_stop == binned_st2.t_stop:
         raise AssertionError("Spike train must have same t stop")
 
+    # The maximum number of of bins
+    max_num_bins = max(binned_st1.num_bins, binned_st2.num_bins)
+
+    # Set the time window in which is computed the cch
+    # Window parameter given in number of bins (integer)
+    if isinstance(window[0], int) and isinstance(window[1], int):
+        # Check the window parameter values
+        if window[0] >= window[1] or window[0] <= -max_num_bins \
+                or window[1] >= max_num_bins:
+            raise ValueError(
+                "The window exceeds the length of the spike trains")
+        # Assign left and right edges of the cch
+        left_edge, right_edge = window[0], window[1]
+        # The mode in which to compute the cch for the speed implementation
+        cch_mode = 'pad'
+    # Case without explicit window parameter
+    elif window == 'full':
+        # cch computed for all the possible entries
+        # Assign left and right edges of the cch
+        right_edge = binned_st2.num_bins - 1
+        left_edge = - binned_st1.num_bins + 1
+        cch_mode = window
+        # cch compute only for the entries that completely overlap
+    elif window == 'valid':
+        # cch computed only for valid entries
+        # Assign left and right edges of the cch
+        right_edge = max(binned_st2.num_bins - binned_st1.num_bins, 0)
+        left_edge = min(binned_st2.num_bins - binned_st1.num_bins, 0)
+        cch_mode = window
+    # Check the mode parameter
+    else:
+        raise KeyError("Invalid window parameter")
+
     if method == "memory":
         cch_result, bin_ids = _cch_memory(
-            binned_st1, binned_st2, window, border_correction, binary,
-            kernel)
+            binned_st1, binned_st2, left_edge, right_edge, border_correction,
+            binary, kernel)
     elif method == "speed":
-
         cch_result, bin_ids = _cch_speed(
-            binned_st1, binned_st2, window, border_correction, binary,
-            kernel)
+            binned_st1, binned_st2, left_edge, right_edge, cch_mode,
+            border_correction, binary, kernel)
+
+    if cross_corr_coef:
+        cch_result = _cross_corr_coef(cch_result, binned_st1, binned_st2)
 
     return cch_result, bin_ids
 
+
 # Alias for common abbreviation
 cch = cross_correlation_histogram
+
+
+def spike_time_tiling_coefficient(spiketrain_1, spiketrain_2, dt=0.005 * pq.s):
+    """
+    Calculates the Spike Time Tiling Coefficient (STTC) as described in
+    (Cutts & Eglen, 2014) following Cutts' implementation in C.
+    The STTC is a pairwise measure of correlation between spike trains.
+    It has been proposed as a replacement for the correlation index as it
+    presents several advantages (e.g. it's not confounded by firing rate,
+    appropriately distinguishes lack of correlation from anti-correlation,
+    periods of silence don't add to the correlation and it's sensible to
+    firing pattern).
+
+    The STTC is calculated as follows:
+
+    .. math::
+        STTC = 1/2((PA - TB)/(1 - PA*TB) + (PB - TA)/(1 - PB*TA))
+
+    Where `PA` is the proportion of spikes from train 1 that lie within
+    `[-dt, +dt]` of any spike of train 2 divided by the total number of spikes
+    in train 1, `PB` is the same proportion for the spikes in train 2;
+    `TA` is the proportion of total recording time within `[-dt, +dt]` of any
+    spike in train 1, TB is the same propotion for train 2.
+
+    This is a Python implementation compatible with the elephant library of
+    the original code by C. Cutts written in C and avaiable at:
+    (https://github.com/CCutts/Detecting_pairwise_correlations_in_spike_trains/blob/master/spike_time_tiling_coefficient.c)
+
+    Parameters
+    ----------
+    spiketrain_1, spiketrain_2: neo.Spiketrain objects to cross-correlate.
+        Must have the same t_start and t_stop.
+    dt: Python Quantity.
+        The synchronicity window is used for both: the quantification of the
+        propotion of total recording time that lies [-dt, +dt] of each spike
+        in each train and the proportion of spikes in `spiketrain_1` that lies
+        `[-dt, +dt]` of any spike in `spiketrain_2`.
+        Default : 0.005 * pq.s
+
+    Returns
+    -------
+    index:  float
+        The spike time tiling coefficient (STTC). Returns np.nan if any spike
+        train is empty.
+
+    References
+    ----------
+    Cutts, C. S., & Eglen, S. J. (2014). Detecting Pairwise Correlations in
+    Spike Trains: An Objective Comparison of Methods and Application to the
+    Study of Retinal Waves. Journal of Neuroscience, 34(43), 14288–14303.
+    """
+
+    def run_P(spiketrain_1, spiketrain_2, N1, N2, dt):
+        """
+        Check every spike in train 1 to see if there's a spike in train 2
+        within dt
+        """
+        Nab = 0
+        j = 0
+        for i in range(N1):
+            while j < N2:  # don't need to search all j each iteration
+                if np.abs(spiketrain_1[i] - spiketrain_2[j]) <= dt:
+                    Nab = Nab + 1
+                    break
+                elif spiketrain_2[j] > spiketrain_1[i]:
+                    break
+                else:
+                    j = j + 1
+        return Nab
+
+    def run_T(spiketrain, N, dt):
+        """
+        Calculate the proportion of the total recording time 'tiled' by spikes.
+        """
+        time_A = 2 * N * dt  # maxium possible time
+
+        if N == 1:  # for just one spike in train
+            if spiketrain[0] - spiketrain.t_start < dt:
+                time_A = time_A - dt + spiketrain[0] - spiketrain.t_start
+            elif spiketrain[0] + dt > spiketrain.t_stop:
+                time_A = time_A - dt - spiketrain[0] + spiketrain.t_stop
+
+        else:  # if more than one spike in train
+            i = 0
+            while i < (N - 1):
+                diff = spiketrain[i + 1] - spiketrain[i]
+
+                if diff < (2 * dt):  # subtract overlap
+                    time_A = time_A - 2 * dt + diff
+                i += 1
+                # check if spikes are within dt of the start and/or end
+                # if so just need to subract overlap of first and/or last spike
+            if (spiketrain[0] - spiketrain.t_start) < dt:
+                time_A = time_A + spiketrain[0] - dt - spiketrain.t_start
+
+            if (spiketrain.t_stop - spiketrain[N - 1]) < dt:
+                time_A = time_A - spiketrain[-1] - dt + spiketrain.t_stop
+
+        T = (time_A / (spiketrain.t_stop - spiketrain.t_start)).item()
+        return T
+
+    N1 = len(spiketrain_1)
+    N2 = len(spiketrain_2)
+
+    if N1 == 0 or N2 == 0:
+        index = np.nan
+    else:
+        TA = run_T(spiketrain_1, N1, dt)
+        TB = run_T(spiketrain_2, N2, dt)
+        PA = run_P(spiketrain_1, spiketrain_2, N1, N2, dt)
+        PA = PA / N1
+        PB = run_P(spiketrain_2, spiketrain_1, N2, N1, dt)
+        PB = PB / N2
+        index = 0.5 * (PA - TB) / (1 - PA * TB) + 0.5 * (PB - TA) / (
+            1 - PB * TA)
+    return index
+
+
+sttc = spike_time_tiling_coefficient

+ 93 - 0
code/elephant/elephant/spike_train_generation.py

@@ -337,6 +337,99 @@ def homogeneous_poisson_process(rate, t_start=0.0 * ms, t_stop=1000.0 * ms,
         np.random.exponential, (mean_interval,), rate, t_start, t_stop,
         as_array)
 
+def inhomogeneous_poisson_process(rate, as_array=False):
+    """
+    Returns a spike train whose spikes are a realization of an inhomogeneous
+    Poisson process with the given rate profile.
+
+    Parameters
+    ----------
+    rate : neo.AnalogSignal
+        A `neo.AnalogSignal` representing the rate profile evolving over time. 
+        Its values have all to be `>=0`. The output spiketrain will have 
+        `t_start = rate.t_start` and `t_stop = rate.t_stop`
+    as_array : bool
+           If True, a NumPy array of sorted spikes is returned,
+           rather than a SpikeTrain object.
+    Raises
+    ------
+    ValueError : If `rate` contains any negative value.
+    """
+    # Check rate contains only positive values
+    if any(rate < 0) or not rate.size:
+        raise ValueError(
+            'rate must be a positive non empty signal, representing the'
+            'rate at time t')
+    else:
+        #Generate n hidden Poisson SpikeTrains with rate equal to the peak rate
+        max_rate = np.max(rate)
+        homogeneous_poiss = homogeneous_poisson_process(
+            rate=max_rate, t_stop=rate.t_stop, t_start=rate.t_start)
+        # Compute the rate profile at each spike time by interpolation
+        rate_interpolated = _analog_signal_linear_interp(
+            signal=rate, times=homogeneous_poiss.magnitude *
+                               homogeneous_poiss.units)
+        # Accept each spike at time t with probability rate(t)/max_rate
+        u = np.random.uniform(size=len(homogeneous_poiss)) * max_rate
+        spikes = homogeneous_poiss[u < rate_interpolated.flatten()]
+        if as_array:
+            spikes = spikes.magnitude
+        return spikes
+
+
+def _analog_signal_linear_interp(signal, times):
+    '''
+    Compute the linear interpolation of a signal at desired times.
+
+    Given the `signal` (neo.AnalogSignal) taking value `s0` and `s1` at two
+    consecutive time points `t0` and `t1` `(t0 < t1)`, for every time `t` in 
+    `times`, such that `t0<t<=t1` is returned the value of the linear 
+    interpolation, given by:
+                `s = ((s1 - s0) / (t1 - t0)) * t + s0`.
+
+    Parameters
+    ----------
+    times : Quantity vector(time)
+        The time points for which the interpolation is computed
+
+    signal : neo.core.AnalogSignal
+        The analog signal containing the discretization of the function to
+        interpolate
+    Returns
+    ------
+    out: Quantity array representing the values of the interpolated signal at the
+    times given by times
+
+    Notes
+    -----
+    If `signal` has sampling period `dt=signal.sampling_period`, its values 
+    are defined at `t=signal.times`, such that `t[i] = signal.t_start + i * dt` 
+    The last of such times is lower than 
+    signal.t_stop`:t[-1] = signal.t_stop - dt`. 
+    For the interpolation at times t such that `t[-1] <= t <= signal.t_stop`,
+    the value of `signal` at `signal.t_stop` is taken to be that
+    at time `t[-1]`.
+    '''
+    dt = signal.sampling_period
+    t_start = signal.t_start.rescale(signal.times.units)
+    t_stop = signal.t_stop.rescale(signal.times.units)
+
+    # Extend the signal (as a dimensionless array) copying the last value
+    # one time, and extend its times to t_stop
+    signal_extended = np.vstack(
+        [signal.magnitude, signal[-1].magnitude]).flatten()
+    times_extended = np.hstack([signal.times, t_stop]) * signal.times.units
+    time_ids = np.floor(((times - t_start) / dt).rescale(
+        dimensionless).magnitude).astype('i')
+
+    # Compute the slope m of the signal at each time in times
+    y1 = signal_extended[time_ids]
+    y2 = signal_extended[time_ids + 1]
+    m = (y2 - y1) / dt
+
+    # Interpolate the signal at each time in times by linear interpolation
+    out = (y1 + m * (times - times_extended[time_ids])) * signal.units
+    return out.rescale(signal.units)
 
 def homogeneous_gamma_process(a, b, t_start=0.0 * ms, t_stop=1000.0 * ms,
                               as_array=False):

+ 1 - 1
code/elephant/elephant/sta.py

@@ -226,7 +226,7 @@ def spike_field_coherence(signal, spiketrain, **kwargs):
 
     >>> import numpy as np
     >>> import matplotlib.pyplot as plt
-    >>> from quantities import ms, mV, Hz, kHz
+    >>> from quantities import s, ms, mV, Hz, kHz
     >>> import neo, elephant
 
     >>> t = pq.Quantity(range(10000),units='ms')

+ 117 - 13
code/elephant/elephant/statistics.py

@@ -250,9 +250,71 @@ def lv(v):
     return 3. * np.mean(np.power(np.diff(v) / (v[:-1] + v[1:]), 2))
 
 
+def cv2(v):
+    """
+    Calculate the measure of CV2 for a sequence of time intervals between 
+    events.
+
+    Given a vector v containing a sequence of intervals, the CV2 is
+    defined as:
+
+    .math $$ CV2 := \\frac{1}{N}\\sum_{i=1}^{N-1}
+
+                   \\frac{2|isi_{i+1}-isi_i|}
+                          {|isi_{i+1}+isi_i|} $$
+
+    The CV2 is typically computed as a substitute for the classical
+    coefficient of variation (CV) for sequences of events which include
+    some (relatively slow) rate fluctuation.  As with the CV, CV2=1 for
+    a sequence of intervals generated by a Poisson process.
+
+    Parameters
+    ----------
+
+    v : quantity array, numpy array or list
+        Vector of consecutive time intervals
+
+    Returns
+    -------
+    cv2 : float
+       The CV2 of the inter-spike interval of the input sequence.
+
+    Raises
+    ------
+    AttributeError :
+       If an empty list is specified, or if the sequence has less
+       than two entries, an AttributeError will be raised.
+    AttributeError :
+        Only vector inputs are supported.  If a matrix is passed to the
+        function an AttributeError will be raised.
+
+    References
+    ----------
+    ..[1] Holt, G. R., Softky, W. R., Koch, C., & Douglas, R. J. (1996). 
+    Comparison of discharge variability in vitro and in vivo in cat visual 
+    cortex neurons. Journal of neurophysiology, 75(5), 1806-1814.
+    """
+    # convert to array, cast to float
+    v = np.asarray(v)
+
+    # ensure the input ia a vector
+    if len(v.shape) > 1:
+        raise AttributeError("Input shape is larger than 1. Please provide "
+                             "a vector in input.")
+
+    # ensure we have enough entries
+    if v.size < 2:
+        raise AttributeError("Input size is too small. Please provide "
+                             "an input with more than 1 entry.")
+
+    # calculate CV2 and return result
+    return 2. * np.mean(np.absolute(np.diff(v)) / (v[:-1] + v[1:]))
+
+
 # sigma2kw and kw2sigma only needed for oldfct_instantaneous_rate!
 # to finally be taken out of Elephant
-def sigma2kw(form):
+
+def sigma2kw(form): # pragma: no cover
     warnings.simplefilter('always', DeprecationWarning)
     warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
     if form.upper() == 'BOX':
@@ -271,14 +333,14 @@ def sigma2kw(form):
     return coeff
 
 
-def kw2sigma(form):
+def kw2sigma(form): # pragma: no cover
     warnings.simplefilter('always', DeprecationWarning)
     warnings.warn("deprecated", DeprecationWarning, stacklevel=2)
     return 1/sigma2kw(form)
 
 
 # to finally be taken out of Elephant
-def make_kernel(form, sigma, sampling_period, direction=1):
+def make_kernel(form, sigma, sampling_period, direction=1): # pragma: no cover
     """
     Creates kernel functions for convolution.
 
@@ -445,7 +507,7 @@ def make_kernel(form, sigma, sampling_period, direction=1):
 # to finally be taken out of Elephant
 def oldfct_instantaneous_rate(spiketrain, sampling_period, form,
                        sigma='auto', t_start=None, t_stop=None,
-                       acausal=True, trim=False):
+                       acausal=True, trim=False): # pragma: no cover
     """
     Estimate instantaneous firing rate by kernel convolution.
 
@@ -596,7 +658,7 @@ def instantaneous_rate(spiketrain, sampling_period, kernel='auto',
 
     Parameters
     -----------
-    spiketrain : 'neo.SpikeTrain'
+    spiketrain : neo.SpikeTrain or list of neo.SpikeTrain objects
         Neo object that contains spike times, the unit of the time stamps
         and t_start and t_stop of the spike train.
     sampling_period : Time Quantity
@@ -670,6 +732,20 @@ def instantaneous_rate(spiketrain, sampling_period, kernel='auto',
     ..[1] H. Shimazaki, S. Shinomoto, J Comput Neurosci (2010) 29:171–182.
 
     """
+    # Merge spike trains if list of spike trains given:
+    if isinstance(spiketrain, list):
+        _check_consistency_of_spiketrainlist(spiketrain, t_start=t_start, t_stop=t_stop)
+        if t_start is None:
+            t_start = spiketrain[0].t_start
+        if t_stop is None:
+            t_stop = spiketrain[0].t_stop
+        spikes = np.concatenate([st.magnitude for st in spiketrain])
+        merged_spiketrain = SpikeTrain(np.sort(spikes), units=spiketrain[0].units,
+                                       t_start=t_start, t_stop=t_stop)
+        return instantaneous_rate(merged_spiketrain, sampling_period=sampling_period,
+                                  kernel=kernel, cutoff=cutoff, t_start=t_start,
+                                  t_stop=t_stop, trim=trim)
+
     # Checks of input variables:
     if not isinstance(spiketrain, SpikeTrain):
         raise TypeError(
@@ -689,8 +765,12 @@ def instantaneous_rate(spiketrain, sampling_period, kernel='auto',
     if kernel == 'auto':
         kernel_width = sskernel(spiketrain.magnitude, tin=None,
                                 bootstrap=True)['optw']
+        if kernel_width is None:
+            raise ValueError(
+                "Unable to calculate optimal kernel width for "
+                "instantaneous rate from input data.")
         unit = spiketrain.units
-        sigma = 1/(2.0 * 2.7) * kernel_width * unit
+        sigma = 1 / (2.0 * 2.7) * kernel_width * unit
         # factor 2.0 connects kernel width with its half width,
         # factor 2.7 connects half width of Gaussian distribution with
         #             99% probability mass with its standard deviation.
@@ -705,13 +785,13 @@ def instantaneous_rate(spiketrain, sampling_period, kernel='auto',
         raise TypeError("cutoff must be float or integer!")
 
     if not (t_start is None or (isinstance(t_start, pq.Quantity) and
-            t_start.dimensionality.simplified ==
-            pq.Quantity(1, "s").dimensionality)):
+                                t_start.dimensionality.simplified ==
+                                pq.Quantity(1, "s").dimensionality)):
         raise TypeError("t_start must be a time quantity!")
 
     if not (t_stop is None or (isinstance(t_stop, pq.Quantity) and
-            t_stop.dimensionality.simplified ==
-            pq.Quantity(1, "s").dimensionality)):
+                               t_stop.dimensionality.simplified ==
+                               pq.Quantity(1, "s").dimensionality)):
         raise TypeError("t_stop must be a time quantity!")
 
     if not (isinstance(trim, bool)):
@@ -1045,6 +1125,9 @@ def sskernel(spiketimes, tin=None, w=None, bootstrap=False):
     'C': cost functions of w,
     'confb95': (lower bootstrap confidence level, upper bootstrap confidence level),
     'yb': bootstrap samples.
+    
+    If no optimal kernel could be found, all entries of the dictionary are set
+    to None.
 
 
     Ref: Shimazaki, Hideaki, and Shigeru Shinomoto. 2010. Kernel
@@ -1132,7 +1215,8 @@ def sskernel(spiketimes, tin=None, w=None, bootstrap=False):
     # Bootstrap confidence intervals
     confb95 = None
     yb = None
-    if bootstrap:
+    # If bootstrap is requested, and an optimal kernel was found
+    if bootstrap and optw:
         nbs = 1000
         yb = np.zeros((nbs, len(tin)))
         for ii in range(nbs):
@@ -1147,11 +1231,31 @@ def sskernel(spiketimes, tin=None, w=None, bootstrap=False):
         y95b = ybsort[np.floor(0.05 * nbs).astype(int), :]
         y95u = ybsort[np.floor(0.95 * nbs).astype(int), :]
         confb95 = (y95b, y95u)
-    ret = np.interp(tin, t, y)
-    return {'y': ret,
+    # Only perform interpolation if y could be calculated
+    if y is not None:
+        y = np.interp(tin, t, y)
+    return {'y': y,
             't': tin,
             'optw': optw,
             'w': w,
             'C': C,
             'confb95': confb95,
             'yb': yb}
+
+
+def _check_consistency_of_spiketrainlist(spiketrainlist, t_start=None, t_stop=None):
+    for spiketrain in spiketrainlist:
+        if not isinstance(spiketrain, SpikeTrain):
+            raise TypeError(
+                "spike train must be instance of :class:`SpikeTrain` of Neo!\n"
+                "    Found: %s, value %s" % (type(spiketrain), str(spiketrain)))
+        if t_start is None and not spiketrain.t_start == spiketrainlist[0].t_start:
+            raise ValueError(
+                "the spike trains must have the same t_start!")
+        if t_stop is None and not spiketrain.t_stop == spiketrainlist[0].t_stop:
+            raise ValueError(
+                "the spike trains must have the same t_stop!")
+        if not spiketrain.units == spiketrainlist[0].units:
+            raise ValueError(
+                "the spike trains must have the same units!")
+    return None

+ 1 - 1
code/elephant/elephant/test/make_spike_extraction_test_data.py

@@ -1,4 +1,4 @@
-def main():
+def main(): # pragma: no cover
   from brian2 import start_scope,mvolt,ms,NeuronGroup,StateMonitor,run
   import matplotlib.pyplot as plt
   import neo

BIN
code/elephant/elephant/test/spike_extraction_test_data.npz


+ 24 - 1
code/elephant/elephant/test/test_conversion.py

@@ -500,6 +500,29 @@ class TimeHistogramTestCase(unittest.TestCase):
             np.array_equal(xa.bin_edges[:-1],
                            xb.bin_edges[:-1].rescale(binsize.units)))
 
+    def test_binnend_spiketrain_rescaling(self):
+        train = neo.SpikeTrain(times=np.array([1.001, 1.002, 1.005]) * pq.s,
+                               t_start=1 * pq.s, t_stop=1.01 * pq.s)
+        bst = cv.BinnedSpikeTrain(train,
+                                  t_start=1 * pq.s, t_stop=1.01 * pq.s,
+                                  binsize=1 * pq.ms)
+        target_edges = np.array([1000, 1001, 1002, 1003, 1004, 1005, 1006,
+                                 1007, 1008, 1009, 1010], dtype=np.float)
+        target_centers = np.array(
+            [1000.5, 1001.5, 1002.5, 1003.5, 1004.5, 1005.5, 1006.5, 1007.5,
+             1008.5, 1009.5], dtype=np.float)
+        self.assertTrue(np.allclose(bst.bin_edges.magnitude, target_edges))
+        self.assertTrue(np.allclose(bst.bin_centers.magnitude, target_centers))
+        self.assertTrue(bst.bin_centers.units == pq.ms)
+        self.assertTrue(bst.bin_edges.units == pq.ms)
+        bst = cv.BinnedSpikeTrain(train,
+                                  t_start=1 * pq.s, t_stop=1010 * pq.ms,
+                                  binsize=1 * pq.ms)
+        self.assertTrue(np.allclose(bst.bin_edges.magnitude, target_edges))
+        self.assertTrue(np.allclose(bst.bin_centers.magnitude, target_centers))
+        self.assertTrue(bst.bin_centers.units == pq.ms)
+        self.assertTrue(bst.bin_edges.units == pq.ms)
+
 
 if __name__ == '__main__':
-    unittest.main()
+    unittest.main()

+ 6 - 6
code/elephant/elephant/test/test_csd.py

@@ -29,21 +29,21 @@ class LFP_TestCase(unittest.TestCase):
         ele_pos = utils.generate_electrodes(dim=1).reshape(5, 1)
         lfp = csd.generate_lfp(utils.gauss_1d_dipole, ele_pos)
         self.assertEqual(ele_pos.shape[1], 1)
-        self.assertEqual(ele_pos.shape[0], len(lfp))
+        self.assertEqual(ele_pos.shape[0], lfp.shape[1])
 
     def test_lfp2d_electrodes(self):
         ele_pos = utils.generate_electrodes(dim=2)
         xx_ele, yy_ele = ele_pos
         lfp = csd.generate_lfp(utils.large_source_2D, xx_ele, yy_ele)
         self.assertEqual(len(ele_pos), 2)
-        self.assertEqual(xx_ele.shape[0], len(lfp))
+        self.assertEqual(xx_ele.shape[0], lfp.shape[1])
 
     def test_lfp3d_electrodes(self):
         ele_pos = utils.generate_electrodes(dim=3, res=3)
         xx_ele, yy_ele, zz_ele = ele_pos
         lfp = csd.generate_lfp(utils.gauss_3d_dipole, xx_ele, yy_ele, zz_ele)
         self.assertEqual(len(ele_pos), 3)
-        self.assertEqual(xx_ele.shape[0], len(lfp))
+        self.assertEqual(xx_ele.shape[0], lfp.shape[1])
 
 
 class CSD1D_TestCase(unittest.TestCase):
@@ -83,7 +83,7 @@ class CSD1D_TestCase(unittest.TestCase):
         result = self.csd_method(self.lfp, method=method)
         self.assertEqual(result.t_start, 0.0 * pq.s)
         self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
-        self.assertEqual(len(result.times), 1)
+        self.assertEqual(result.shape[0], 1)
 
     def test_inputs_deltasplineicsd(self):
         methods = ['DeltaiCSD', 'SplineiCSD']
@@ -94,7 +94,7 @@ class CSD1D_TestCase(unittest.TestCase):
                                      **self.params[method])
             self.assertEqual(result.t_start, 0.0 * pq.s)
             self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
-            self.assertEqual(len(result.times), 1)
+            self.assertEqual(result.times.shape[0], 1)
 
     def test_inputs_stepicsd(self):
         method = 'StepiCSD'
@@ -107,7 +107,7 @@ class CSD1D_TestCase(unittest.TestCase):
                                  **self.params[method])
         self.assertEqual(result.t_start, 0.0 * pq.s)
         self.assertEqual(result.sampling_rate, 1000 * pq.Hz)
-        self.assertEqual(len(result.times), 1)
+        self.assertEqual(result.times.shape[0], 1)
 
     def test_inuts_kcsd(self):
         method = 'KCSD1D'

+ 3 - 3
code/elephant/elephant/test/test_kcsd.py

@@ -30,7 +30,7 @@ class KCSD1D_TestCase(unittest.TestCase):
         temp_signals = []
         for ii in range(len(self.pots)):
             temp_signals.append(self.pots[ii])
-        self.an_sigs = neo.AnalogSignal(temp_signals * pq.mV,
+        self.an_sigs = neo.AnalogSignal(np.array(temp_signals).T * pq.mV,
                                        sampling_rate=1000 * pq.Hz)
         chidx = neo.ChannelIndex(range(len(self.pots)))
         chidx.analogsignals.append(self.an_sigs)
@@ -80,7 +80,7 @@ class KCSD2D_TestCase(unittest.TestCase):
         temp_signals = []
         for ii in range(len(self.pots)):
             temp_signals.append(self.pots[ii])
-        self.an_sigs = neo.AnalogSignal(temp_signals * pq.mV,
+        self.an_sigs = neo.AnalogSignal(np.array(temp_signals).T * pq.mV,
                                        sampling_rate=1000 * pq.Hz)
         chidx = neo.ChannelIndex(range(len(self.pots)))
         chidx.analogsignals.append(self.an_sigs)
@@ -144,7 +144,7 @@ class KCSD3D_TestCase(unittest.TestCase):
         temp_signals = []
         for ii in range(len(self.pots)):
             temp_signals.append(self.pots[ii])
-        self.an_sigs = neo.AnalogSignal(temp_signals * pq.mV,
+        self.an_sigs = neo.AnalogSignal(np.array(temp_signals).T * pq.mV,
                                        sampling_rate=1000 * pq.Hz)
         chidx = neo.ChannelIndex(range(len(self.pots)))
         chidx.analogsignals.append(self.an_sigs)

+ 134 - 45
code/elephant/elephant/test/test_pandas_bridge.py

@@ -13,7 +13,7 @@ from itertools import chain
 
 from neo.test.generate_datasets import fake_neo
 import numpy as np
-from numpy.testing.utils import assert_array_equal
+from numpy.testing import assert_array_equal
 import quantities as pq
 
 try:
@@ -1636,7 +1636,10 @@ class MultiEventsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, targ.columns.names)
         self.assertCountEqual(keys, res0.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+
 
         assert_frame_equal(targ, res0)
 
@@ -1681,9 +1684,15 @@ class MultiEventsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res1.columns.names)
         self.assertCountEqual(keys, res2.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
-        assert_array_equal(targ.values, res2.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res2.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -1733,10 +1742,18 @@ class MultiEventsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res2.columns.names)
         self.assertCountEqual(keys, res3.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
-        assert_array_equal(targ.values, res2.values)
-        assert_array_equal(targ.values, res3.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res2.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res3.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -1779,8 +1796,12 @@ class MultiEventsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res0.columns.names)
         self.assertCountEqual(keys, res1.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -1827,9 +1848,15 @@ class MultiEventsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res1.columns.names)
         self.assertCountEqual(keys, res2.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
-        assert_array_equal(targ.values, res2.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res2.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -1880,10 +1907,18 @@ class MultiEventsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res2.columns.names)
         self.assertCountEqual(keys, res3.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
-        assert_array_equal(targ.values, res2.values)
-        assert_array_equal(targ.values, res3.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res2.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res3.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -1927,8 +1962,12 @@ class MultiEventsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res0.columns.names)
         self.assertCountEqual(keys, res1.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -1964,7 +2003,9 @@ class MultiEventsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, targ.columns.names)
         self.assertCountEqual(keys, res0.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
 
@@ -1998,7 +2039,9 @@ class MultiEventsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, targ.columns.names)
         self.assertCountEqual(keys, res0.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
 
@@ -2033,7 +2076,9 @@ class MultiEventsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, targ.columns.names)
         self.assertCountEqual(keys, res0.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
 
@@ -2152,7 +2197,9 @@ class MultiEpochsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, targ.columns.names)
         self.assertCountEqual(keys, res0.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
 
@@ -2197,9 +2244,15 @@ class MultiEpochsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res1.columns.names)
         self.assertCountEqual(keys, res2.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
-        assert_array_equal(targ.values, res2.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res2.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -2249,10 +2302,18 @@ class MultiEpochsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res2.columns.names)
         self.assertCountEqual(keys, res3.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
-        assert_array_equal(targ.values, res2.values)
-        assert_array_equal(targ.values, res3.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res2.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res3.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -2295,8 +2356,12 @@ class MultiEpochsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res0.columns.names)
         self.assertCountEqual(keys, res1.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -2343,9 +2408,15 @@ class MultiEpochsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res1.columns.names)
         self.assertCountEqual(keys, res2.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
-        assert_array_equal(targ.values, res2.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res2.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -2396,10 +2467,18 @@ class MultiEpochsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res2.columns.names)
         self.assertCountEqual(keys, res3.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
-        assert_array_equal(targ.values, res2.values)
-        assert_array_equal(targ.values, res3.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res2.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res3.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -2443,8 +2522,12 @@ class MultiEpochsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, res0.columns.names)
         self.assertCountEqual(keys, res1.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
-        assert_array_equal(targ.values, res1.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res1.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
         assert_frame_equal(targ, res1)
@@ -2480,7 +2563,9 @@ class MultiEpochsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, targ.columns.names)
         self.assertCountEqual(keys, res0.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
 
@@ -2514,7 +2599,9 @@ class MultiEpochsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, targ.columns.names)
         self.assertCountEqual(keys, res0.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
 
@@ -2549,7 +2636,9 @@ class MultiEpochsToDataframeTestCase(unittest.TestCase):
         self.assertCountEqual(keys, targ.columns.names)
         self.assertCountEqual(keys, res0.columns.names)
 
-        assert_array_equal(targ.values, res0.values)
+        assert_array_equal(
+            np.array(targ.values, dtype=np.float),
+            np.array(res0.values, dtype=np.float))
 
         assert_frame_equal(targ, res0)
 

+ 179 - 16
code/elephant/elephant/test/test_signal_processing.py

@@ -46,10 +46,10 @@ class ZscoreTestCase(unittest.TestCase):
                           20, 15, 4, 7, 10, 14, 15, 15, 20, 1]
 
     def test_zscore_single_dup(self):
-        '''
+        """
         Test z-score on a single AnalogSignal, asking to return a
         duplicate.
-        '''
+        """
         signal = neo.AnalogSignal(
             self.test_seq1, units='mV',
             t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)
@@ -69,10 +69,10 @@ class ZscoreTestCase(unittest.TestCase):
         self.assertEqual(signal[0].magnitude, self.test_seq1[0])
 
     def test_zscore_single_inplace(self):
-        '''
+        """
         Test z-score on a single AnalogSignal, asking for an inplace
         operation.
-        '''
+        """
         signal = neo.AnalogSignal(
             self.test_seq1, units='mV',
             t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)
@@ -92,10 +92,10 @@ class ZscoreTestCase(unittest.TestCase):
         self.assertEqual(signal[0].magnitude, target[0])
 
     def test_zscore_single_multidim_dup(self):
-        '''
+        """
         Test z-score on a single AnalogSignal with multiple dimensions, asking
         to return a duplicate.
-        '''
+        """
         signal = neo.AnalogSignal(
             np.transpose(
                 np.vstack([self.test_seq1, self.test_seq2])), units='mV',
@@ -113,10 +113,10 @@ class ZscoreTestCase(unittest.TestCase):
         self.assertEqual(signal[0, 0].magnitude, self.test_seq1[0])
 
     def test_zscore_single_multidim_inplace(self):
-        '''
+        """
         Test z-score on a single AnalogSignal with multiple dimensions, asking
         for an inplace operation.
-        '''
+        """
         signal = neo.AnalogSignal(
             np.vstack([self.test_seq1, self.test_seq2]), units='mV',
             t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=float)
@@ -133,11 +133,11 @@ class ZscoreTestCase(unittest.TestCase):
         self.assertEqual(signal[0, 0].magnitude, target[0, 0])
 
     def test_zscore_single_dup_int(self):
-        '''
+        """
         Test if the z-score is correctly calculated even if the input is an
         AnalogSignal of type int, asking for a duplicate (duplicate should
         be of type float).
-        '''
+        """
         signal = neo.AnalogSignal(
             self.test_seq1, units='mV',
             t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=int)
@@ -154,10 +154,10 @@ class ZscoreTestCase(unittest.TestCase):
         self.assertEqual(signal.magnitude[0], self.test_seq1[0])
 
     def test_zscore_single_inplace_int(self):
-        '''
+        """
         Test if the z-score is correctly calculated even if the input is an
         AnalogSignal of type int, asking for an inplace operation.
-        '''
+        """
         signal = neo.AnalogSignal(
             self.test_seq1, units='mV',
             t_start=0. * pq.ms, sampling_rate=1000. * pq.Hz, dtype=int)
@@ -174,10 +174,10 @@ class ZscoreTestCase(unittest.TestCase):
         self.assertEqual(signal[0].magnitude, target.astype(int)[0])
 
     def test_zscore_list_dup(self):
-        '''
+        """
         Test zscore on a list of AnalogSignal objects, asking to return a
         duplicate.
-        '''
+        """
         signal1 = neo.AnalogSignal(
             np.transpose(np.vstack([self.test_seq1, self.test_seq1])),
             units='mV',
@@ -212,10 +212,10 @@ class ZscoreTestCase(unittest.TestCase):
         self.assertEqual(signal2.magnitude[0, 1], self.test_seq2[0])
 
     def test_zscore_list_inplace(self):
-        '''
+        """
         Test zscore on a list of AnalogSignal objects, asking for an
         inplace operation.
-        '''
+        """
         signal1 = neo.AnalogSignal(
             np.transpose(np.vstack([self.test_seq1, self.test_seq1])),
             units='mV',
@@ -568,5 +568,168 @@ class HilbertTestCase(unittest.TestCase):
                 decimal=decimal)
 
 
+class WaveletTestCase(unittest.TestCase):
+    def setUp(self):
+        # generate a 10-sec test data of pure 50 Hz cosine wave
+        self.fs = 1000.0
+        self.times = np.arange(0, 10.0, 1/self.fs)
+        self.test_freq1 = 50.0
+        self.test_freq2 = 60.0
+        self.test_data1 = np.cos(2*np.pi*self.test_freq1*self.times)
+        self.test_data2 = np.sin(2*np.pi*self.test_freq2*self.times)
+        self.test_data_arr = np.vstack([self.test_data1, self.test_data2])
+        self.test_data = neo.AnalogSignal(
+            self.test_data_arr.T*pq.mV, t_start=self.times[0]*pq.s,
+            t_stop=self.times[-1]*pq.s, sampling_period=(1/self.fs)*pq.s)
+        self.true_phase1 = np.angle(
+            self.test_data1 + 1j*np.sin(2*np.pi*self.test_freq1*self.times))
+        self.true_phase2 = np.angle(
+            self.test_data2 - 1j*np.cos(2*np.pi*self.test_freq2*self.times))
+        self.wt_freqs = [10, 20, 30]
+
+    def test_wavelet_errors(self):
+        """
+        Tests if errors are raised as expected.
+        """
+        # too high center frequency
+        kwds = {'signal': self.test_data, 'freq': self.fs/2}
+        self.assertRaises(
+            ValueError, elephant.signal_processing.wavelet_transform, **kwds)
+        kwds = {'signal': self.test_data_arr, 'freq': self.fs/2, 'fs': self.fs}
+        self.assertRaises(
+            ValueError, elephant.signal_processing.wavelet_transform, **kwds)
+
+        # too high center frequency in a list
+        kwds = {'signal': self.test_data, 'freq': [self.fs/10, self.fs/2]}
+        self.assertRaises(
+            ValueError, elephant.signal_processing.wavelet_transform, **kwds)
+        kwds = {'signal': self.test_data_arr,
+                'freq': [self.fs/10, self.fs/2], 'fs': self.fs}
+        self.assertRaises(
+            ValueError, elephant.signal_processing.wavelet_transform, **kwds)
+
+        # nco is not positive
+        kwds = {'signal': self.test_data, 'freq': self.fs/10, 'nco': 0}
+        self.assertRaises(
+            ValueError, elephant.signal_processing.wavelet_transform, **kwds)
+
+    def test_wavelet_io(self):
+        """
+        Tests the data type and data shape of the output is consistent with
+        that of the input, and also test the consistency between the outputs
+        of different types
+        """
+        # check the shape of the result array
+        # --- case of single center frequency
+        wt = elephant.signal_processing.wavelet_transform(self.test_data,
+                                                          self.fs/10)
+        self.assertTrue(wt.ndim == self.test_data.ndim)
+        self.assertTrue(wt.shape[0] == self.test_data.shape[0])  # time axis
+        self.assertTrue(wt.shape[1] == self.test_data.shape[1])  # channel axis
+
+        wt_arr = elephant.signal_processing.wavelet_transform(
+            self.test_data_arr, self.fs/10, fs=self.fs)
+        self.assertTrue(wt_arr.ndim == self.test_data.ndim)
+        # channel axis
+        self.assertTrue(wt_arr.shape[0] == self.test_data_arr.shape[0])
+        # time axis
+        self.assertTrue(wt_arr.shape[1] == self.test_data_arr.shape[1])
+
+        wt_arr1d = elephant.signal_processing.wavelet_transform(
+            self.test_data1, self.fs/10, fs=self.fs)
+        self.assertTrue(wt_arr1d.ndim == self.test_data1.ndim)
+        # time axis
+        self.assertTrue(wt_arr1d.shape[0] == self.test_data1.shape[0])
+
+        # --- case of multiple center frequencies
+        wt = elephant.signal_processing.wavelet_transform(
+            self.test_data, self.wt_freqs)
+        self.assertTrue(wt.ndim == self.test_data.ndim+1)
+        self.assertTrue(wt.shape[0] == self.test_data.shape[0])  # time axis
+        self.assertTrue(wt.shape[1] == self.test_data.shape[1])  # channel axis
+        self.assertTrue(wt.shape[2] == len(self.wt_freqs))  # frequency axis
+
+        wt_arr = elephant.signal_processing.wavelet_transform(
+            self.test_data_arr, self.wt_freqs, fs=self.fs)
+        self.assertTrue(wt_arr.ndim == self.test_data_arr.ndim+1)
+        # channel axis
+        self.assertTrue(wt_arr.shape[0] == self.test_data_arr.shape[0])
+        # frequency axis
+        self.assertTrue(wt_arr.shape[1] == len(self.wt_freqs))
+        # time axis
+        self.assertTrue(wt_arr.shape[2] == self.test_data_arr.shape[1])
+
+        wt_arr1d = elephant.signal_processing.wavelet_transform(
+            self.test_data1, self.wt_freqs, fs=self.fs)
+        self.assertTrue(wt_arr1d.ndim == self.test_data1.ndim+1)
+        # frequency axis
+        self.assertTrue(wt_arr1d.shape[0] == len(self.wt_freqs))
+        # time axis
+        self.assertTrue(wt_arr1d.shape[1] == self.test_data1.shape[0])
+
+        # check that the result does not depend on data type
+        self.assertTrue(np.all(wt[:, 0, :] == wt_arr[0, :, :].T))  # channel 0
+        self.assertTrue(np.all(wt[:, 1, :] == wt_arr[1, :, :].T))  # channel 1
+
+        # check the data contents in the case where freq is given as a list
+        # Note: there seems to be a bug in np.fft since NumPy 1.14.1, which
+        # causes that the values of wt_1freq[:, 0] and wt_3freqs[:, 0, 0] are
+        # not exactly equal, even though they use the same center frequency for
+        # wavelet transform (in NumPy 1.13.1, they become identical). Here we
+        # only check that they are almost equal.
+        wt_1freq = elephant.signal_processing.wavelet_transform(
+            self.test_data, self.wt_freqs[0])
+        wt_3freqs = elephant.signal_processing.wavelet_transform(
+            self.test_data, self.wt_freqs)
+        assert_array_almost_equal(wt_1freq[:, 0], wt_3freqs[:, 0, 0],
+                                  decimal=12)
+
+    def test_wavelet_amplitude(self):
+        """
+        Tests amplitude properties of the obtained wavelet transform
+        """
+        # check that the amplitude of WT of a sinusoid is (almost) constant
+        wt = elephant.signal_processing.wavelet_transform(self.test_data,
+                                                          self.test_freq1)
+        # take a middle segment in order to avoid edge effects
+        amp = np.abs(wt[int(len(wt)/3):int(len(wt)//3*2), 0])
+        mean_amp = amp.mean()
+        assert_array_almost_equal((amp - mean_amp) / mean_amp,
+                                  np.zeros_like(amp), decimal=6)
+
+        # check that the amplitude of WT is (almost) zero when center frequency
+        # is considerably different from signal frequency
+        wt_low = elephant.signal_processing.wavelet_transform(
+            self.test_data, self.test_freq1/10)
+        amp_low = np.abs(wt_low[int(len(wt)/3):int(len(wt)//3*2), 0])
+        assert_array_almost_equal(amp_low, np.zeros_like(amp), decimal=6)
+
+        # check that zero padding hardly affect the result
+        wt_padded = elephant.signal_processing.wavelet_transform(
+            self.test_data, self.test_freq1, zero_padding=False)
+        amp_padded = np.abs(wt_padded[int(len(wt)/3):int(len(wt)//3*2), 0])
+        assert_array_almost_equal(amp_padded, amp, decimal=9)
+
+    def test_wavelet_phase(self):
+        """
+        Tests phase properties of the obtained wavelet transform
+        """
+        # check that the phase of WT is (almost) same as that of the original
+        # sinusoid
+        wt = elephant.signal_processing.wavelet_transform(self.test_data,
+                                                          self.test_freq1)
+        phase = np.angle(wt[int(len(wt)/3):int(len(wt)//3*2), 0])
+        true_phase = self.true_phase1[int(len(wt)/3):int(len(wt)//3*2)]
+        assert_array_almost_equal(np.exp(1j*phase), np.exp(1j*true_phase),
+                                  decimal=6)
+
+        # check that zero padding hardly affect the result
+        wt_padded = elephant.signal_processing.wavelet_transform(
+            self.test_data, self.test_freq1, zero_padding=False)
+        phase_padded = np.angle(wt_padded[int(len(wt)/3):int(len(wt)//3*2), 0])
+        assert_array_almost_equal(np.exp(1j*phase_padded), np.exp(1j*phase),
+                                  decimal=9)
+
+
 if __name__ == '__main__':
     unittest.main()

+ 101 - 56
code/elephant/elephant/test/test_spike_train_correlation.py

@@ -244,6 +244,10 @@ class cross_correlation_histogram_TestCase(unittest.TestCase):
         self.binned_st2 = conv.BinnedSpikeTrain(
             [self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms,
             binsize=1 * pq.ms)
+        self.binned_sts = conv.BinnedSpikeTrain(
+            [self.st_1, self.st_2], t_start=0 * pq.ms, t_stop=50. * pq.ms,
+            binsize=1 * pq.ms)
+
         # Binned sts to check errors raising
         self.st_check_binsize = conv.BinnedSpikeTrain(
             [self.st_1], t_start=0 * pq.ms, t_stop=50. * pq.ms,
@@ -274,9 +278,10 @@ class cross_correlation_histogram_TestCase(unittest.TestCase):
         cch_clipped_mem, bin_ids_clipped_mem = sc.cross_correlation_histogram(
             self.binned_st1, self.binned_st2, window='full',
             binary=True, method='memory')
-        cch_unclipped_mem, bin_ids_unclipped_mem = sc.cross_correlation_histogram(
-            self.binned_st1, self.binned_st2, window='full',
-            binary=False, method='memory')
+        cch_unclipped_mem, bin_ids_unclipped_mem = \
+            sc.cross_correlation_histogram(
+                self.binned_st1, self.binned_st2, window='full',
+                binary=False, method='memory')
         # Check consistency two methods
         assert_array_equal(
             np.squeeze(cch_clipped.magnitude), np.squeeze(
@@ -302,6 +307,46 @@ class cross_correlation_histogram_TestCase(unittest.TestCase):
         assert_array_equal(
             target_numpy, np.squeeze(cch_unclipped.magnitude))
 
+        # Check cross correlation function for several displacements tau
+        # Note: Use Elephant corrcoeff to verify result
+        tau = [-25.0, 0.0, 13.0]  # in ms
+        for t in tau:
+            # adjust t_start, t_stop to shift by tau
+            t0 = np.min([self.st_1.t_start + t * pq.ms, self.st_2.t_start])
+            t1 = np.max([self.st_1.t_stop + t * pq.ms, self.st_2.t_stop])
+            st1 = neo.SpikeTrain(self.st_1.magnitude + t, units='ms',
+                                 t_start=t0 * pq.ms, t_stop=t1 * pq.ms)
+            st2 = neo.SpikeTrain(self.st_2.magnitude, units='ms',
+                                 t_start=t0 * pq.ms, t_stop=t1 * pq.ms)
+            binned_sts = conv.BinnedSpikeTrain([st1, st2],
+                                               binsize=1 * pq.ms,
+                                               t_start=t0 * pq.ms,
+                                               t_stop=t1 * pq.ms)
+            # caluclate corrcoef
+            corrcoef = sc.corrcoef(binned_sts)[1, 0]
+
+            # expand t_stop to have two spike trains with same length as st1,
+            # st2
+            st1 = neo.SpikeTrain(self.st_1.magnitude, units='ms',
+                                 t_start=self.st_1.t_start,
+                                 t_stop=self.st_1.t_stop + np.abs(t) * pq.ms)
+            st2 = neo.SpikeTrain(self.st_2.magnitude, units='ms',
+                                 t_start=self.st_2.t_start,
+                                 t_stop=self.st_2.t_stop + np.abs(t) * pq.ms)
+            binned_st1 = conv.BinnedSpikeTrain(
+                st1, t_start=0 * pq.ms, t_stop=(50 + np.abs(t)) * pq.ms,
+                binsize=1 * pq.ms)
+            binned_st2 = conv.BinnedSpikeTrain(
+                st2, t_start=0 * pq.ms, t_stop=(50 + np.abs(t)) * pq.ms,
+                binsize=1 * pq.ms)
+            # calculate CCHcoef and take value at t=tau
+            CCHcoef, _ = sc.cch(binned_st1, binned_st2,
+                                cross_corr_coef=True)
+            left_edge = - binned_st1.num_bins + 1
+            tau_bin = int(t / float(binned_st1.binsize.magnitude))
+            assert_array_equal(
+                corrcoef, CCHcoef[tau_bin - left_edge].magnitude)
+
         # Check correlation using binary spike trains
         mat1 = np.array(self.binned_st1.to_bool_array()[0], dtype=int)
         mat2 = np.array(self.binned_st2.to_bool_array()[0], dtype=int)
@@ -329,9 +374,10 @@ class cross_correlation_histogram_TestCase(unittest.TestCase):
         cch_clipped_mem, bin_ids_clipped_mem = sc.cross_correlation_histogram(
             self.binned_st1, self.binned_st2, window='valid',
             binary=True, method='memory')
-        cch_unclipped_mem, bin_ids_unclipped_mem = sc.cross_correlation_histogram(
-            self.binned_st1, self.binned_st2, window='valid',
-            binary=False, method='memory')
+        cch_unclipped_mem, bin_ids_unclipped_mem = \
+            sc.cross_correlation_histogram(
+                self.binned_st1, self.binned_st2, window='valid',
+                binary=False, method='memory')
 
         # Check consistency two methods
         assert_array_equal(
@@ -424,22 +470,6 @@ class cross_correlation_histogram_TestCase(unittest.TestCase):
             self.binned_st1, self.binned_st2, window='full', binary=False)
         assert_array_equal(cch_win, cch_unclipped[19:80])
 
-        cch_win, bin_ids = sc.cch(
-            self.binned_st1, self.binned_st2, window=[-25*pq.ms, 25*pq.ms])
-        cch_win_mem, bin_ids_mem = sc.cch(
-            self.binned_st1, self.binned_st2, window=[-25*pq.ms, 25*pq.ms],
-            method='memory')
-
-        assert_array_equal(bin_ids, np.arange(-25, 26, 1))
-        assert_array_equal(
-            (bin_ids - 0.5) * self.binned_st1.binsize, cch_win.times)
-
-        assert_array_equal(bin_ids_mem, np.arange(-25, 26, 1))
-        assert_array_equal(
-            (bin_ids_mem - 0.5) * self.binned_st1.binsize, cch_win.times)
-
-        assert_array_equal(cch_win, cch_win_mem)
-
         _, bin_ids = sc.cch(
             self.binned_st1, self.binned_st2, window=[20, 30])
         _, bin_ids_mem = sc.cch(
@@ -458,48 +488,21 @@ class cross_correlation_histogram_TestCase(unittest.TestCase):
         assert_array_equal(bin_ids, np.arange(-30, -19, 1))
         assert_array_equal(bin_ids_mem, np.arange(-30, -19, 1))
 
-        # Cehck for wrong assignments to the window parameter
+        # Check for wrong assignments to the window parameter
+        # Test for window longer than the total length of the spike trains
         self.assertRaises(
             ValueError, sc.cross_correlation_histogram, self.binned_st1,
             self.binned_st2, window=[-60, 50])
-        self.assertRaises(
-            ValueError, sc.cross_correlation_histogram, self.binned_st1,
-            self.binned_st2, window=[-60, 50], method='memory')
-
         self.assertRaises(
             ValueError, sc.cross_correlation_histogram, self.binned_st1,
             self.binned_st2, window=[-50, 60])
+        # Test for no integer or wrong string in input
         self.assertRaises(
-            ValueError, sc.cross_correlation_histogram, self.binned_st1,
-            self.binned_st2, window=[-50, 60], method='memory')
-
-        self.assertRaises(
-            ValueError, sc.cross_correlation_histogram, self.binned_st1,
-            self.binned_st2, window=[-25.5*pq.ms, 25*pq.ms])
-        self.assertRaises(
-            ValueError, sc.cross_correlation_histogram, self.binned_st1,
-            self.binned_st2, window=[-25.5*pq.ms, 25*pq.ms], method='memory')
-
-        self.assertRaises(
-            ValueError, sc.cross_correlation_histogram, self.binned_st1,
-            self.binned_st2, window=[-25*pq.ms, 25.5*pq.ms])
-        self.assertRaises(
-            ValueError, sc.cross_correlation_histogram, self.binned_st1,
-            self.binned_st2, window=[-25*pq.ms, 25.5*pq.ms], method='memory')
-
-        self.assertRaises(
-            ValueError, sc.cross_correlation_histogram, self.binned_st1,
-            self.binned_st2, window=[-60*pq.ms, 50*pq.ms])
-        self.assertRaises(
-            ValueError, sc.cross_correlation_histogram, self.binned_st1,
-            self.binned_st2, window=[-60*pq.ms, 50*pq.ms], method='memory')
-
-        self.assertRaises(
-            ValueError, sc.cross_correlation_histogram, self.binned_st1,
-            self.binned_st2, window=[-50*pq.ms, 60*pq.ms])
+            KeyError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window=[-25.5, 25.5])
         self.assertRaises(
-            ValueError, sc.cross_correlation_histogram, self.binned_st1,
-            self.binned_st2, window=[-50*pq.ms, 60*pq.ms], method='memory')
+            KeyError, sc.cross_correlation_histogram, self.binned_st1,
+            self.binned_st2, window='test')
 
     def test_border_correction(self):
         '''Test if the border correction for bins at the edges is correctly
@@ -557,5 +560,47 @@ class cross_correlation_histogram_TestCase(unittest.TestCase):
         '''
         self.assertEqual(sc.cross_correlation_histogram, sc.cch)
 
+
+class SpikeTimeTilingCoefficientTestCase(unittest.TestCase):
+
+    def setUp(self):
+        # These two arrays must be such that they do not have coincidences
+        # spanning across two neighbor bins assuming ms bins [0,1),[1,2),...
+        self.test_array_1d_1 = [
+            1.3, 7.56, 15.87, 28.23, 30.9, 34.2, 38.2, 43.2]
+        self.test_array_1d_2 = [
+            1.02, 2.71, 18.82, 28.46, 28.79, 43.6]
+
+        # Build spike trains
+        self.st_1 = neo.SpikeTrain(
+            self.test_array_1d_1, units='ms', t_stop=50.)
+        self.st_2 = neo.SpikeTrain(
+            self.test_array_1d_2, units='ms', t_stop=50.)
+
+    def test_sttc(self):
+        # test for result
+        target = 0.8748350567
+        self.assertAlmostEqual(target, sc.sttc(self.st_1, self.st_2,
+                                               0.005 * pq.s))
+        # test no spiketrains
+        self.assertTrue(np.isnan(sc.sttc([], [])))
+
+        # test one spiketrain
+        self.assertTrue(np.isnan(sc.sttc(self.st_1, [])))
+
+        # test for one spike in a spiketrain
+        st1 = neo.SpikeTrain([1], units='ms', t_stop=1.)
+        st2 = neo.SpikeTrain([5], units='ms', t_stop=10.)
+        self.assertEqual(sc.sttc(st1, st2), 1.0)
+        self.assertTrue(bool(sc.sttc(st1, st2, 0.1 * pq.ms) < 0))
+
+        # test for high value of dt
+        self.assertEqual(sc.sttc(self.st_1, self.st_2, dt=5 * pq.s), 1.0)
+
+    def test_exist_alias(self):
+        # Test if alias cch still exists.
+        self.assertEqual(sc.spike_time_tiling_coefficient, sc.sttc)
+
+
 if __name__ == '__main__':
     unittest.main()

+ 77 - 15
code/elephant/elephant/test/test_spike_train_generation.py

@@ -14,11 +14,11 @@ import warnings
 import neo
 import numpy as np
 from numpy.testing.utils import assert_array_almost_equal
-from scipy.stats import kstest, expon
-from quantities import ms, second, Hz, kHz, mV, dimensionless
+from scipy.stats import kstest, expon, poisson
+from quantities import V, s, ms, second, Hz, kHz, mV, dimensionless
 import elephant.spike_train_generation as stgen
 from elephant.statistics import isi
-
+from scipy.stats import expon
 
 def pdiff(a, b):
     """Difference between a and b as a fraction of a
@@ -40,10 +40,12 @@ class AnalogSignalThresholdDetectionTestCase(unittest.TestCase):
         # Load membrane potential simulated using Brian2
         # according to make_spike_extraction_test_data.py.
         curr_dir = os.path.dirname(os.path.realpath(__file__))
-        npz_file_loc = os.path.join(curr_dir,'spike_extraction_test_data.npz')
-        iom2 = neo.io.PyNNNumpyIO(npz_file_loc)
-        data = iom2.read()
-        vm = data[0].segments[0].analogsignals[0]
+        raw_data_file_loc = os.path.join(curr_dir,'spike_extraction_test_data.txt')
+        raw_data = []
+        with open(raw_data_file_loc, 'r') as f:
+            for x in (f.readlines()):
+                raw_data.append(float(x))
+        vm = neo.AnalogSignal(raw_data, units=V, sampling_period=0.1*ms)
         spike_train = stgen.threshold_detection(vm)
         try:
             len(spike_train)
@@ -73,10 +75,12 @@ class AnalogSignalPeakDetectionTestCase(unittest.TestCase):
 
     def setUp(self):
         curr_dir = os.path.dirname(os.path.realpath(__file__))
-        npz_file_loc = os.path.join(curr_dir, 'spike_extraction_test_data.npz')
-        iom2 = neo.io.PyNNNumpyIO(npz_file_loc)
-        data = iom2.read()
-        self.vm = data[0].segments[0].analogsignals[0]
+        raw_data_file_loc = os.path.join(curr_dir, 'spike_extraction_test_data.txt')
+        raw_data = []
+        with open(raw_data_file_loc, 'r') as f:
+            for x in (f.readlines()):
+                raw_data.append(float(x))
+        self.vm = neo.AnalogSignal(raw_data, units=V, sampling_period=0.1*ms)
         self.true_time_stamps = [0.0124,  0.0354,  0.0713,  0.1192,  0.1695,
                                  0.2201,  0.2711] * second
 
@@ -100,10 +104,12 @@ class AnalogSignalSpikeExtractionTestCase(unittest.TestCase):
     
     def setUp(self):
         curr_dir = os.path.dirname(os.path.realpath(__file__))
-        npz_file_loc = os.path.join(curr_dir, 'spike_extraction_test_data.npz')
-        iom2 = neo.io.PyNNNumpyIO(npz_file_loc)
-        data = iom2.read()
-        self.vm = data[0].segments[0].analogsignals[0]
+        raw_data_file_loc = os.path.join(curr_dir, 'spike_extraction_test_data.txt')
+        raw_data = []
+        with open(raw_data_file_loc, 'r') as f:
+            for x in (f.readlines()):
+                raw_data.append(float(x))
+        self.vm = neo.AnalogSignal(raw_data, units=V, sampling_period=0.1*ms)
         self.first_spike = np.array([-0.04084546, -0.03892033, -0.03664779,
                                      -0.03392689, -0.03061474, -0.02650277,
                                      -0.0212756, -0.01443531, -0.00515365,
@@ -181,6 +187,62 @@ class HomogeneousPoissonProcessTestCase(unittest.TestCase):
         self.assertLess(expected_last_spike - spiketrain[-1], 4*expected_mean_isi)
 
 
+class InhomogeneousPoissonProcessTestCase(unittest.TestCase):
+    def setUp(self):
+        rate_list = [[20] for i in range(1000)] + [[200] for i in range(1000)]
+        self.rate_profile = neo.AnalogSignal(
+            rate_list * Hz, sampling_period=0.001*s)
+        rate_0 = [[0] for i in range(1000)]
+        self.rate_profile_0 = neo.AnalogSignal(
+            rate_0 * Hz, sampling_period=0.001*s)
+        rate_negative = [[-1] for i in range(1000)]
+        self.rate_profile_negative = neo.AnalogSignal(
+            rate_negative * Hz, sampling_period=0.001 * s)
+        pass
+
+    def test_statistics(self):
+        # This is a statistical test that has a non-zero chance of failure
+        # during normal operation. Thus, we set the random seed to a value that
+        # creates a realization passing the test.
+        np.random.seed(seed=12345)
+
+        for rate in [self.rate_profile, self.rate_profile.rescale(kHz)]:
+            spiketrain = stgen.inhomogeneous_poisson_process(rate)
+            intervals = isi(spiketrain)
+
+            # Computing expected statistics and percentiles
+            expected_spike_count = (np.sum(
+                rate) * rate.sampling_period).simplified
+            percentile_count = poisson.ppf(.999, expected_spike_count)
+            expected_min_isi = (1 / np.min(rate))
+            expected_max_isi = (1 / np.max(rate))
+            percentile_min_isi = expon.ppf(.999, expected_min_isi)
+            percentile_max_isi = expon.ppf(.999, expected_max_isi)
+
+            # Testing (each should fail 1 every 1000 times)
+            self.assertLess(spiketrain.size, percentile_count)
+            self.assertLess(np.min(intervals), percentile_min_isi)
+            self.assertLess(np.max(intervals), percentile_max_isi)
+
+            # Testing t_start t_stop
+            self.assertEqual(rate.t_stop, spiketrain.t_stop)
+            self.assertEqual(rate.t_start, spiketrain.t_start)
+
+        # Testing type
+        spiketrain_as_array = stgen.inhomogeneous_poisson_process(
+            rate, as_array=True)
+        self.assertTrue(isinstance(spiketrain_as_array, np.ndarray))
+        self.assertTrue(isinstance(spiketrain, neo.SpikeTrain))
+
+    def test_low_rates(self):
+        spiketrain = stgen.inhomogeneous_poisson_process(self.rate_profile_0)
+        self.assertEqual(spiketrain.size, 0)
+        
+    def test_negative_rates(self):
+        self.assertRaises(
+            ValueError, stgen.inhomogeneous_poisson_process,
+            self.rate_profile_negative)
+
 class HomogeneousGammaProcessTestCase(unittest.TestCase):
 
     def setUp(self):

+ 2 - 2
code/elephant/elephant/test/test_sta.py

@@ -185,7 +185,7 @@ class sta_TestCase(unittest.TestCase):
             units='mV', sampling_rate=10 / ms)
         cmp_array = cmp_array / 0.
         cmp_array.t_start = -1 * ms
-        assert_array_equal(STA[:, 1], cmp_array[:, 0])
+        assert_array_equal(STA.magnitude[:, 1], cmp_array.magnitude[:, 0])
 
     def test_all_spiketrains_empty(self):
         st = SpikeTrain([], units='ms', t_stop=self.asiga1.t_stop)
@@ -201,7 +201,7 @@ class sta_TestCase(unittest.TestCase):
             nan_array.fill(np.nan)
             cmp_array = AnalogSignal(np.array([nan_array, nan_array]).T,
                 units='mV', sampling_rate=10 / ms)
-            assert_array_equal(STA, cmp_array)
+            assert_array_equal(STA.magnitude, cmp_array.magnitude)
 
 
 # =========================================================================

+ 106 - 35
code/elephant/elephant/test/test_statistics.py

@@ -5,6 +5,7 @@ Unit tests for the statistics module.
 :copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
 :license: Modified BSD, see LICENSE.txt for details.
 """
+from __future__ import division
 
 import unittest
 
@@ -18,6 +19,7 @@ import elephant.statistics as es
 import elephant.kernels as kernels
 import warnings
 
+
 class isi_TestCase(unittest.TestCase):
     def setUp(self):
         self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
@@ -339,6 +341,40 @@ class LVTestCase(unittest.TestCase):
         self.assertRaises(ValueError, es.lv, np.array([seq, seq]))
 
 
+class CV2TestCase(unittest.TestCase):
+    def setUp(self):
+        self.test_seq = [1, 28,  4, 47,  5, 16,  2,  5, 21, 12,
+                         4, 12, 59,  2,  4, 18, 33, 25,  2, 34,
+                         4,  1,  1, 14,  8,  1, 10,  1,  8, 20,
+                         5,  1,  6,  5, 12,  2,  8,  8,  2,  8,
+                         2, 10,  2,  1,  1,  2, 15,  3, 20,  6,
+                         11, 6, 18,  2,  5, 17,  4,  3, 13,  6,
+                         1, 18,  1, 16, 12,  2, 52,  2,  5,  7,
+                         6, 25,  6,  5,  3, 15,  4,  3, 16,  3,
+                         6,  5, 24, 21,  3,  3,  4,  8,  4, 11,
+                         5,  7,  5,  6,  8, 11, 33, 10,  7,  4]
+
+        self.target = 1.0022235296529176
+
+    def test_cv2_with_quantities(self):
+        seq = pq.Quantity(self.test_seq, units='ms')
+        assert_array_almost_equal(es.cv2(seq), self.target, decimal=9)
+
+    def test_cv2_with_plain_array(self):
+        seq = np.array(self.test_seq)
+        assert_array_almost_equal(es.cv2(seq), self.target, decimal=9)
+
+    def test_cv2_with_list(self):
+        seq = self.test_seq
+        assert_array_almost_equal(es.cv2(seq), self.target, decimal=9)
+
+    def test_cv2_raise_error(self):
+        seq = self.test_seq
+        self.assertRaises(AttributeError, es.cv2, [])
+        self.assertRaises(AttributeError, es.cv2, 1)
+        self.assertRaises(AttributeError, es.cv2, np.array([seq, seq]))
+
+
 class RateEstimationTestCase(unittest.TestCase):
 
     def setUp(self):
@@ -348,8 +384,10 @@ class RateEstimationTestCase(unittest.TestCase):
         self.st_margin = 5.0  # seconds
         self.st_rate = 10.0  # Hertz
 
-        st_num_spikes = np.random.poisson(self.st_rate*(self.st_dur-2*self.st_margin))
-        spike_train = np.random.rand(st_num_spikes) * (self.st_dur-2*self.st_margin) + self.st_margin
+        st_num_spikes = np.random.poisson(
+            self.st_rate*(self.st_dur-2*self.st_margin))
+        spike_train = np.random.rand(
+            st_num_spikes) * (self.st_dur-2*self.st_margin) + self.st_margin
         spike_train.sort()
 
         # convert spike train into neo objects
@@ -358,7 +396,7 @@ class RateEstimationTestCase(unittest.TestCase):
                                           t_stop=self.st_tr[1]*pq.s)
 
         # generation of a multiply used specific kernel
-        self.kernel = kernels.TriangularKernel(sigma = 0.03*pq.s)
+        self.kernel = kernels.TriangularKernel(sigma=0.03*pq.s)
 
     def test_instantaneous_rate_and_warnings(self):
         st = self.spike_train
@@ -366,24 +404,27 @@ class RateEstimationTestCase(unittest.TestCase):
         with warnings.catch_warnings(record=True) as w:
             inst_rate = es.instantaneous_rate(
                 st, sampling_period, self.kernel, cutoff=0)
-            self.assertEqual("The width of the kernel was adjusted to a minimally "
-                             "allowed width.", str(w[-2].message))
-            self.assertEqual("Instantaneous firing rate approximation contains "
-                             "negative values, possibly caused due to machine "
-                             "precision errors.", str(w[-1].message))
+            message1 = "The width of the kernel was adjusted to a minimally " \
+                       "allowed width."
+            message2 = "Instantaneous firing rate approximation contains " \
+                       "negative values, possibly caused due to machine " \
+                       "precision errors."
+            warning_message = [str(m.message) for m in w]
+            self.assertTrue(message1 in warning_message)
+            self.assertTrue(message2 in warning_message)
         self.assertIsInstance(inst_rate, neo.core.AnalogSignal)
-        self.assertEquals(
+        self.assertEqual(
             inst_rate.sampling_period.simplified, sampling_period.simplified)
-        self.assertEquals(inst_rate.simplified.units, pq.Hz)
-        self.assertEquals(inst_rate.t_stop.simplified, st.t_stop.simplified)
-        self.assertEquals(inst_rate.t_start.simplified, st.t_start.simplified)
+        self.assertEqual(inst_rate.simplified.units, pq.Hz)
+        self.assertEqual(inst_rate.t_stop.simplified, st.t_stop.simplified)
+        self.assertEqual(inst_rate.t_start.simplified, st.t_start.simplified)
 
     def test_error_instantaneous_rate(self):
         self.assertRaises(
-            TypeError, es.instantaneous_rate, spiketrain=[1,2,3]*pq.s,
+            TypeError, es.instantaneous_rate, spiketrain=[1, 2, 3]*pq.s,
             sampling_period=0.01*pq.ms, kernel=self.kernel)
         self.assertRaises(
-            TypeError, es.instantaneous_rate, spiketrain=[1,2,3],
+            TypeError, es.instantaneous_rate, spiketrain=[1, 2, 3],
             sampling_period=0.01*pq.ms, kernel=self.kernel)
         st = self.spike_train
         self.assertRaises(
@@ -396,9 +437,9 @@ class RateEstimationTestCase(unittest.TestCase):
             TypeError, es.instantaneous_rate, spiketrain=st,
             sampling_period=0.01*pq.ms, kernel='NONE')
         self.assertRaises(TypeError, es.instantaneous_rate, self.spike_train,
-            sampling_period=0.01*pq.s, kernel='wrong_string',
-            t_start=self.st_tr[0]*pq.s, t_stop=self.st_tr[1]*pq.s,
-            trim=False)
+                          sampling_period=0.01*pq.s, kernel='wrong_string',
+                          t_start=self.st_tr[0]*pq.s, t_stop=self.st_tr[1]*pq.s,
+                          trim=False)
         self.assertRaises(
             TypeError, es.instantaneous_rate, spiketrain=st,
             sampling_period=0.01*pq.ms, kernel=self.kernel, cutoff=20*pq.ms)
@@ -428,30 +469,30 @@ class RateEstimationTestCase(unittest.TestCase):
         kernel_resolution = 0.01*pq.s
         for kernel in kernel_list:
             rate_estimate_a0 = es.instantaneous_rate(self.spike_train,
-                                            sampling_period=kernel_resolution,
-                                            kernel='auto',
-                                            t_start=self.st_tr[0]*pq.s,
-                                            t_stop=self.st_tr[1]*pq.s,
-                                            trim=False)
+                                                     sampling_period=kernel_resolution,
+                                                     kernel='auto',
+                                                     t_start=self.st_tr[0]*pq.s,
+                                                     t_stop=self.st_tr[1]*pq.s,
+                                                     trim=False)
 
             rate_estimate0 = es.instantaneous_rate(self.spike_train,
-                                            sampling_period=kernel_resolution,
-                                            kernel=kernel)
+                                                   sampling_period=kernel_resolution,
+                                                   kernel=kernel)
 
             rate_estimate1 = es.instantaneous_rate(self.spike_train,
-                                            sampling_period=kernel_resolution,
-                                            kernel=kernel,
-                                            t_start=self.st_tr[0]*pq.s,
-                                            t_stop=self.st_tr[1]*pq.s,
-                                            trim=False)
+                                                   sampling_period=kernel_resolution,
+                                                   kernel=kernel,
+                                                   t_start=self.st_tr[0]*pq.s,
+                                                   t_stop=self.st_tr[1]*pq.s,
+                                                   trim=False)
 
             rate_estimate2 = es.instantaneous_rate(self.spike_train,
-                                            sampling_period=kernel_resolution,
-                                            kernel=kernel,
-                                            t_start=self.st_tr[0]*pq.s,
-                                            t_stop=self.st_tr[1]*pq.s,
-                                            trim=True)
-            ### test consistency
+                                                   sampling_period=kernel_resolution,
+                                                   kernel=kernel,
+                                                   t_start=self.st_tr[0]*pq.s,
+                                                   t_stop=self.st_tr[1]*pq.s,
+                                                   trim=True)
+            # test consistency
             rate_estimate_list = [rate_estimate0, rate_estimate1,
                                   rate_estimate2, rate_estimate_a0]
 
@@ -461,6 +502,36 @@ class RateEstimationTestCase(unittest.TestCase):
                                      x=rate_estimate.times.rescale('s').magnitude)[-1]
                 self.assertAlmostEqual(num_spikes, auc, delta=0.05*num_spikes)
 
+    def test_instantaneous_rate_spiketrainlist(self):
+        st_num_spikes = np.random.poisson(
+            self.st_rate*(self.st_dur-2*self.st_margin))
+        spike_train2 = np.random.rand(
+            st_num_spikes) * (self.st_dur - 2 * self.st_margin) + self.st_margin
+        spike_train2.sort()
+        spike_train2 = neo.SpikeTrain(spike_train2 * pq.s,
+                                      t_start=self.st_tr[0] * pq.s,
+                                      t_stop=self.st_tr[1] * pq.s)
+        st_rate_1 = es.instantaneous_rate(self.spike_train,
+                                          sampling_period=0.01*pq.s,
+                                          kernel=self.kernel)
+        st_rate_2 = es.instantaneous_rate(spike_train2,
+                                          sampling_period=0.01*pq.s,
+                                          kernel=self.kernel)
+        combined_rate = es.instantaneous_rate([self.spike_train, spike_train2],
+                                              sampling_period=0.01*pq.s,
+                                              kernel=self.kernel)
+        summed_rate = st_rate_1 + st_rate_2  # equivalent for identical kernels
+        for a, b in zip(combined_rate.magnitude, summed_rate.magnitude):
+            self.assertAlmostEqual(a, b, delta=0.0001)
+
+    # Regression test for #144
+    def test_instantaneous_rate_regression_144(self):
+        # The following spike train contains spikes that are so close to each
+        # other, that the optimal kernel cannot be detected. Therefore, the
+        # function should react with a ValueError.
+        st = neo.SpikeTrain([2.12, 2.13, 2.15] * pq.s, t_stop=10 * pq.s)
+        self.assertRaises(ValueError, es.instantaneous_rate, st, 1 * pq.ms)
+
 
 class TimeHistogramTestCase(unittest.TestCase):
     def setUp(self):

+ 153 - 0
code/elephant/elephant/test/test_unitary_event_analysis.py

@@ -11,6 +11,16 @@ import quantities as pq
 import types
 import elephant.unitary_event_analysis as ue
 import neo
+import sys
+import os
+
+from distutils.version import StrictVersion
+
+
+def _check_for_incompatibilty():
+    smaller_version = StrictVersion(np.__version__) < '1.10.0'
+    return sys.version_info >= (3, 0) and smaller_version
+
 
 class UETestCase(unittest.TestCase):
 
@@ -338,6 +348,149 @@ class UETestCase(unittest.TestCase):
             UE_dic['indices']['trial26'],expected_indecis_tril26))
         self.assertTrue(np.allclose(
             UE_dic['indices']['trial4'],expected_indecis_tril4))
+        
+    @staticmethod    
+    def load_gdf2Neo(fname, trigger, t_pre, t_post):
+        """
+        load and convert the gdf file to Neo format by
+        cutting and aligning around a given trigger
+        # codes for trigger events (extracted from a
+        # documentation of an old file after
+        # contacting Dr. Alexa Rihle)
+        # 700 : ST (correct) 701, 702, 703, 704*
+        # 500 : ST (error =5) 501, 502, 503, 504*
+        # 1000: ST (if no selec) 1001,1002,1003,1004*
+        # 11  : PS 111, 112, 113, 114
+        # 12  : RS 121, 122, 123, 124
+        # 13  : RT 131, 132, 133, 134
+        # 14  : MT 141, 142, 143, 144
+        # 15  : ES 151, 152, 153, 154
+        # 16  : ES 161, 162, 163, 164
+        # 17  : ES 171, 172, 173, 174
+        # 19  : RW 191, 192, 193, 194
+        # 20  : ET 201, 202, 203, 204
+        """
+        data = np.loadtxt(fname)
+    
+        if trigger == 'PS_4':
+            trigger_code = 114
+        if trigger == 'RS_4':
+            trigger_code = 124
+        if trigger == 'RS':
+            trigger_code = 12
+        if trigger == 'ES':
+            trigger_code = 15
+        # specify units
+        units_id = np.unique(data[:, 0][data[:, 0] < 7])
+        # indecies of the trigger
+        sel_tr_idx = np.where(data[:, 0] == trigger_code)[0]
+        # cutting the data by aligning on the trigger
+        data_tr = []
+        for id_tmp in units_id:
+            data_sel_units = []
+            for i_cnt, i in enumerate(sel_tr_idx):
+                start_tmp = data[i][1] - t_pre.magnitude
+                stop_tmp = data[i][1] + t_post.magnitude
+                sel_data_tmp = np.array(
+                    data[np.where((data[:, 1] <= stop_tmp) &
+                                     (data[:, 1] >= start_tmp))])
+                sp_units_tmp = sel_data_tmp[:, 1][
+                    np.where(sel_data_tmp[:, 0] == id_tmp)[0]]
+                if len(sp_units_tmp) > 0:
+                    aligned_time = sp_units_tmp - start_tmp
+                    data_sel_units.append(neo.SpikeTrain(
+                        aligned_time * pq.ms, t_start=0 * pq.ms,
+                        t_stop=t_pre + t_post))
+                else:
+                    data_sel_units.append(neo.SpikeTrain(
+                        [] * pq.ms, t_start=0 * pq.ms,
+                        t_stop=t_pre + t_post))
+            data_tr.append(data_sel_units)
+        data_tr.reverse()
+        spiketrain = np.vstack([i for i in data_tr]).T
+        return spiketrain
+
+    # test if the result of newly implemented Unitary Events in
+    # Elephant is consistent with the result of
+    # Riehle et al 1997 Science
+    # (see Rostami et al (2016) [Re] Science, 3(1):1-17)
+    @unittest.skipIf(_check_for_incompatibilty(),
+                     'Incompatible package versions')
+    def test_Riehle_et_al_97_UE(self):      
+        from neo.rawio.tests.tools import (download_test_file,
+                                           create_local_temp_dir,
+                                           make_all_directories)
+        from neo.test.iotest.tools import (cleanup_test_file)
+        url = [
+            "https://raw.githubusercontent.com/ReScience-Archives/" +
+            "Rostami-Ito-Denker-Gruen-2017/master/data",
+            "https://raw.githubusercontent.com/ReScience-Archives/" +
+            "Rostami-Ito-Denker-Gruen-2017/master/data"]
+        shortname = "unitary_event_analysis_test_data"
+        local_test_dir = create_local_temp_dir(
+            shortname, os.environ.get("ELEPHANT_TEST_FILE_DIR"))
+        files_to_download = ["extracted_data.npy", "winny131_23.gdf"]
+        make_all_directories(files_to_download,
+                             local_test_dir)
+        for f_cnt, f in enumerate(files_to_download):
+            download_test_file(f, local_test_dir, url[f_cnt])
+
+        # load spike data of figure 2 of Riehle et al 1997
+        sys.path.append(local_test_dir)
+        file_name = '/winny131_23.gdf'
+        trigger = 'RS_4'
+        t_pre = 1799 * pq.ms
+        t_post = 300 * pq.ms
+        spiketrain = self.load_gdf2Neo(local_test_dir + file_name,
+                                       trigger, t_pre, t_post)
+
+        # calculating UE ...
+        winsize = 100 * pq.ms
+        binsize = 5 * pq.ms
+        winstep = 5 * pq.ms
+        pattern_hash = [3]
+        method = 'analytic_TrialAverage'
+        t_start = spiketrain[0][0].t_start
+        t_stop = spiketrain[0][0].t_stop
+        t_winpos = ue._winpos(t_start, t_stop, winsize, winstep)
+        significance_level = 0.05
+
+        UE = ue.jointJ_window_analysis(
+            spiketrain, binsize, winsize, winstep,
+            pattern_hash, method=method)
+        # load extracted data from figure 2 of Riehle et al 1997
+        try:
+            extracted_data = np.load(
+                local_test_dir + '/extracted_data.npy').item()
+        except UnicodeError:
+            extracted_data = np.load(
+                local_test_dir + '/extracted_data.npy', encoding='latin1').item()
+        Js_sig = ue.jointJ(significance_level)
+        sig_idx_win = np.where(UE['Js'] >= Js_sig)[0]
+        diff_UE_rep = []
+        y_cnt = 0
+        for tr in range(len(spiketrain)):
+            x_idx = np.sort(
+                np.unique(UE['indices']['trial' + str(tr)],
+                          return_index=True)[1])
+            x = UE['indices']['trial' + str(tr)][x_idx]
+            if len(x) > 0:
+                # choose only the significant coincidences
+                xx = []
+                for j in sig_idx_win:
+                    xx = np.append(xx, x[np.where(
+                        (x * binsize >= t_winpos[j]) &
+                        (x * binsize < t_winpos[j] + winsize))])
+                x_tmp = np.unique(xx) * binsize.magnitude
+                if len(x_tmp) > 0:
+                    ue_trial = np.sort(extracted_data['ue'][y_cnt])
+                    diff_UE_rep = np.append(
+                        diff_UE_rep, x_tmp - ue_trial)
+                    y_cnt += +1
+        np.testing.assert_array_less(np.abs(diff_UE_rep), 0.3)
+        cleanup_test_file('dir', local_test_dir)
+
+        
 def suite():
     suite = unittest.makeSuite(UETestCase, 'test')
     return suite

+ 1 - 1
code/elephant/elephant/unitary_event_analysis.py

@@ -564,7 +564,7 @@ def gen_pval_anal(
             if len(n_emp) > 1:
                 raise ValueError(
                     'in surrogate method the p_value can be calculated only for one pattern!')
-            return np.sum(exp_dist[n_emp[0]:])
+            return np.sum(exp_dist[int(n_emp[0]):])
 
     return pval, n_exp
 

+ 2 - 11
code/elephant/requirements.txt

@@ -1,14 +1,5 @@
-# essential
--e git+https://github.com/NeuralEnsemble/python-neo.git@snapshot-20150821#egg=neo-snapshot-20150821
+neo>=0.5.0
 numpy>=1.8.2
 quantities>=0.10.1
 scipy>=0.14.0
-six
-# optional
-#pandas>=0.14.1
-# for building documentation
-#numpydoc>=0.5
-#sklearn>=0.15.1
-#sphinx>=1.2.2
-# for running tests
-nose>=1.3.3
+six>=1.10.0

+ 44 - 17
code/elephant/setup.py

@@ -2,38 +2,65 @@
 
 from setuptools import setup
 import os
+import sys
+try:
+    from urllib.request import urlretrieve
+except ImportError:
+    from urllib import urlretrieve
 
 long_description = open("README.rst").read()
-install_requires = ['neo>0.3.3',
-                    'numpy>=1.8.2',
-                    'quantities>=0.10.1',
-                    'scipy>=0.14.0',
-                    'six>=1.10.0']
-extras_require = {'pandas': ['pandas>=0.14.1'],
-                  'docs': ['numpydoc>=0.5',
-                           'sphinx>=1.2.2'],
-                  'tests': ['nose>=1.3.3']}
+with open('requirements.txt', 'r') as fp:
+    install_requires = fp.read()
+extras_require = {}
+for extra in ['extras', 'docs', 'tests']:
+    with open('requirements-{0}.txt'.format(extra), 'r') as fp:
+        extras_require[extra] = fp.read()
+
+# spade specific
+is_64bit = sys.maxsize > 2 ** 32
+is_python3 = float(sys.version[0:3]) > 2.7
+
+if is_python3:
+    if is_64bit:
+        urlretrieve('http://www.borgelt.net/bin64/py3/fim.so',
+                    'elephant/spade_src/fim.so')
+    else:
+        urlretrieve('http://www.borgelt.net/bin32/py3/fim.so',
+                    'elephant/spade_src/fim.so')
+else:
+    if is_64bit:
+        urlretrieve('http://www.borgelt.net/bin64/py2/fim.so',
+                    'elephant/spade_src/fim.so')
+    else:
+        urlretrieve('http://www.borgelt.net/bin32/py2/fim.so',
+                    'elephant/spade_src/fim.so')
 
 setup(
     name="elephant",
-    version='0.4.1',
+    version='0.6.0',
     packages=['elephant', 'elephant.test'],
-    package_data = {'elephant' : [os.path.join('current_source_density_src', 'test_data.mat'),
-                                  os.path.join('current_source_density_src', 'LICENSE'),
-                                  os.path.join('current_source_density_src', 'README.md'),
-                                  os.path.join('current_source_density_src', '*.py')]},
-    
+    package_data={'elephant': [
+        os.path.join('current_source_density_src', 'test_data.mat'),
+        os.path.join('current_source_density_src', 'LICENSE'),
+        os.path.join('current_source_density_src', 'README.md'),
+        os.path.join('current_source_density_src', '*.py'),
+        os.path.join('spade_src', '*.py'),
+        os.path.join('spade_src', 'LICENSE'),
+        os.path.join('spade_src', '*.so')
+    ]},
+
     install_requires=install_requires,
     extras_require=extras_require,
 
     author="Elephant authors and contributors",
     author_email="andrew.davison@unic.cnrs-gif.fr",
-    description="Elephant is a package for analysis of electrophysiology data in Python",
+    description="Elephant is a package for analysis of electrophysiology"
+                " data in Python",
     long_description=long_description,
     license="BSD",
     url='http://neuralensemble.org/elephant',
     classifiers=[
-        'Development Status :: 4 - Beta',
+        'Development Status :: 5 - Production/Stable',
         'Intended Audience :: Science/Research',
         'License :: OSI Approved :: BSD License',
         'Natural Language :: English',

+ 7 - 7
code/odml_utils.py

@@ -57,16 +57,16 @@ def get_TrialCount(doc, trialtype=None, performance_code=None):
     sec = doc['Recording']['TaskSettings']
 
     if performance_code == 255:
-        output = sec.properties['CorrectTrialCount'].value.data
+        output = sec.properties['CorrectTrialCount'].values[0]
 
     elif performance_code == 191:
-        output = sec.properties['GripErrorTrialCount'].value.data
+        output = sec.properties['GripErrorTrialCount'].values[0]
 
     elif performance_code in [0, 159, 161, 163, 167, 175]:
         subsec = sec['TrialTypeSettings']
 
     else:
-        output = sec.properties['TotalTrialCount'].value.data
+        output = sec.properties['TotalTrialCount'].values[0]
 
     # TODO: extend to trial types and other performance codes
 
@@ -93,7 +93,7 @@ def get_TrialIDs(doc, idtype='TrialID'):
     for trsec in sec.itersections(filter_func=ff):
         def FF(x): return x.name == idtype
         output.append(
-            [p for p in trsec.iterproperties(filter_func=FF)][0].value.data)
+            [p for p in trsec.iterproperties(filter_func=FF)][0].values[0])
 
     return sorted(output)
 
@@ -118,7 +118,7 @@ def get_TrialType(doc, trialid, code=True):
     def ff(x): return x.name == 'Trial_%03i' % trialid
     sec = [s for s in doc.itersections(filter_func=ff)][0]
 
-    output = sec.properties['TrialType'].value.data
+    output = sec.properties['TrialType'].values[0]
 
     return output
 
@@ -145,7 +145,7 @@ def get_PerformanceCode(doc, trialid, code=True):
     sec = [s for s in doc.itersections(filter_func=ff1)][0]
 
     def ff2(x): return x.name == 'PerformanceCode'
-    output = [p for p in sec.iterproperties(filter_func=ff2)][0].value.data
+    output = [p for p in sec.iterproperties(filter_func=ff2)][0].values[0]
 
     if code:
         return output
@@ -155,7 +155,7 @@ def get_PerformanceCode(doc, trialid, code=True):
         sec = [s for s in doc.itersections(filter_func=ff3)][0]
 
         def ff4(x): return x.name == 'pc_%i' % output
-        output = [p for p in sec.iterproperties(filter_func=ff4)][0].value.data
+        output = [p for p in sec.iterproperties(filter_func=ff4)][0].values[0]
 
         return output
 

+ 2 - 0
code/python-neo/.gitignore

@@ -13,6 +13,8 @@
 .settings
 *.tmp*
 .idea
+*.swp
+*.swo
 
 # Compiled source #
 ###################

+ 2 - 4
code/python-neo/.travis.yml

@@ -3,15 +3,13 @@ python:
   - "2.7"
   - "3.4"
   - "3.5"
-  - "3.6"  
+  - "3.6"
 
 # command to install dependencies
 install:
   - pip install -r requirements.txt
   - pip install coveralls
-  - pip install . 
+  - pip install .
 # command to run tests, e.g. python setup.py test
 script:
   nosetests --with-coverage --cover-package=neo
-after_success:
-  coveralls

+ 17 - 17
code/python-neo/CITATION.txt

@@ -1,21 +1,21 @@
 To cite Neo in publications, please use:
 
-Garcia S., Guarino D., Jaillet F., Jennings T.R., Pröpper R., Rautenberg P.L.,
-Rodgers C., Sobolev A.,Wachtler T., Yger P. and Davison A.P. (2014)
-Neo: an object model for handling electrophysiology data in multiple formats.
-Frontiers in Neuroinformatics 8:10: doi:10.3389/fninf.2014.00010
+    Garcia S., Guarino D., Jaillet F., Jennings T.R., Pröpper R., Rautenberg P.L.,
+    Rodgers C., Sobolev A.,Wachtler T., Yger P. and Davison A.P. (2014)
+    Neo: an object model for handling electrophysiology data in multiple formats.
+    Frontiers in Neuroinformatics 8:10: doi:10.3389/fninf.2014.00010
 
-A BibTeX entry for LaTeX users is
+A BibTeX entry for LaTeX users is::
 
-@article{neo09,
- author = {Garcia S. and Guarino D. and Jaillet F. and Jennings T.R. and Pröpper R. and
-           Rautenberg P.L. and Rodgers C. and Sobolev A. and Wachtler T. and Yger P.
-           and Davison A.P.},
- doi = {10.3389/fninf.2014.00010},
- full_text = {http://www.frontiersin.org/Journal/10.3389/fninf.2014.00010/abstract},
- journal = {Frontiers in Neuroinformatics},
- month = {February},
- title = {Neo: an object model for handling electrophysiology data in multiple formats},
- volume = {8:10},
- year = {2014}
-}
+    @article{neo09,
+        author = {Garcia S. and Guarino D. and Jaillet F. and Jennings T.R. and Pröpper R. and
+                  Rautenberg P.L. and Rodgers C. and Sobolev A. and Wachtler T. and Yger P.
+                  and Davison A.P.},
+        doi = {10.3389/fninf.2014.00010},
+        full_text = {http://www.frontiersin.org/Journal/10.3389/fninf.2014.00010/abstract},
+        journal = {Frontiers in Neuroinformatics},
+        month = {February},
+        title = {Neo: an object model for handling electrophysiology data in multiple formats},
+        volume = {8:10},
+        year = {2014}
+    }

+ 1 - 1
code/python-neo/LICENSE.txt

@@ -1,4 +1,4 @@
-Copyright (c) 2010-2016, Neo authors and contributors
+Copyright (c) 2010-2018, Neo authors and contributors
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

+ 2 - 1
code/python-neo/MANIFEST.in

@@ -1,8 +1,9 @@
 include README.rst
+include LICENSE.txt
 include CITATION.rst
 prune drafts
 include examples/*.py
 recursive-include doc *
 prune doc/build
 exclude doc/source/images/*.svg
-exclude doc/source/images/*.dia
+exclude doc/source/images/*.dia

+ 14 - 5
code/python-neo/README.rst

@@ -13,9 +13,11 @@ shared object model. In order to be as lightweight a dependency as possible,
 Neo is deliberately limited to represention of data, with no functions for data
 analysis or visualization.
 
-Neo is used by a number of other software tools, including OpenElectrophy_
-and SpykeViewer_ (data analysis and visualization), Elephant_ (data analysis),
-the G-node_ suite (databasing) and PyNN_ (simulations).
+Neo is used by a number of other software tools, including 
+SpykeViewer_ (data analysis and visualization), Elephant_ (data analysis),
+the G-node_ suite (databasing), PyNN_ (simulations), tridesclous_ (spike sorting)
+and ephyviewer_ (data visualization).
+OpenElectrophy_ (data analysis and visualization) uses an older version of neo.
 
 Neo implements a hierarchical data model well adapted to intracellular and
 extracellular electrophysiology and EEG data with support for multi-electrodes
@@ -31,7 +33,10 @@ Code status
 
 .. image:: https://travis-ci.org/NeuralEnsemble/python-neo.png?branch=master
    :target: https://travis-ci.org/NeuralEnsemble/python-neo
-   :alt: Unit Test Status
+   :alt: Unit Test Status (TravisCI)
+.. image:: https://circleci.com/gh/NeuralEnsemble/python-neo.svg?style=svg
+    :target: https://circleci.com/gh/NeuralEnsemble/python-neo
+    :alt: Unit Test Status (CircleCI)
 .. image:: https://coveralls.io/repos/NeuralEnsemble/python-neo/badge.png
    :target: https://coveralls.io/r/NeuralEnsemble/python-neo
    :alt: Unit Test Coverage
@@ -49,7 +54,9 @@ More information
 
 For installation instructions, see doc/source/install.rst
 
-:copyright: Copyright 2010-2016 by the Neo team, see doc/source/authors.rst.
+To cite Neo in publications, see CITATION.txt
+
+:copyright: Copyright 2010-2018 by the Neo team, see doc/source/authors.rst.
 :license: 3-Clause Revised BSD License, see LICENSE.txt for details.
 
 
@@ -63,3 +70,5 @@ For installation instructions, see doc/source/install.rst
 .. _quantities: http://pypi.python.org/pypi/quantities
 .. _`NeuralEnsemble mailing list`: http://groups.google.com/group/neuralensemble
 .. _`issue tracker`: https://github.c
+.. _tridesclous: https://github.com/tridesclous/tridesclous
+.. _ephyviewer: https://github.com/NeuralEnsemble/ephyviewer

+ 13 - 2
code/python-neo/doc/source/authors.rst

@@ -36,7 +36,15 @@ and may not be the current affiliation of a contributor.
 * Mieszko Grodzicki
 * Rick Gerkin [15]
 * Matthieu Sénoville [2]
-
+* Chadwick Boulay [16]
+* Björn Müller [13]
+* William Hart [17]
+* erikli(github)
+* Jeffrey Gill [18]
+* Lucas (lkoelman@github)
+* Mark Histed
+* Mike Sintsov
+* Scott W Harden [19]
 
 1. Centre de Recherche en Neuroscience de Lyon, CNRS UMR5292 - INSERM U1028 - Universite Claude Bernard Lyon 1
 2. Unité de Neuroscience, Information et Complexité, CNRS UPR 3293, Gif-sur-Yvette, France
@@ -53,6 +61,9 @@ and may not be the current affiliation of a contributor.
 13. INM-6, Forschungszentrum Jülich, Germany
 14. University of Texas at Austin
 15. Arizona State University
-
+16. Ottawa Hospital Research Institute, Canada
+17. Swinburne University of Technology, Australia
+18. Case Western Reserve University (CWRU) · Department of Biology
+19. Harden Technologies, LLC
 
 If we've somehow missed you off the list we're very sorry - please let us know.

+ 53 - 39
code/python-neo/doc/source/conf.py

@@ -15,12 +15,22 @@
 import os
 import sys
 
+from distutils.version import LooseVersion
+
+with open("../../neo/version.py") as fp:
+    d = {}
+    exec(fp.read(), d)
+    neo_release = d['version']
+
+neo_version = '.'.join(str(e) for e in LooseVersion(neo_release).version[:2])
+
+
 AUTHORS = u'Neo authors and contributors <neuralensemble@googlegroups.com>'
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.append(os.path.abspath('.'))
+# sys.path.append(os.path.abspath('.'))
 
 # -- General configuration ----------------------------------------------------
 
@@ -35,36 +45,36 @@ templates_path = ['_templates']
 source_suffix = '.rst'
 
 # The encoding of source files.
-#source_encoding = 'utf-8'
+# source_encoding = 'utf-8'
 
 # The master toctree document.
 master_doc = 'index'
 
 # General information about the project.
 project = u'Neo'
-copyright = u'2010-2017, ' + AUTHORS
+copyright = u'2010-2018, ' + AUTHORS
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 #
 # The short X.Y version.
-version = '0.5'
+version = neo_version
 # The full version, including alpha/beta/rc tags.
-release = '0.5.2'
+release = neo_release
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
 
 # List of documents that shouldn't be included in the build.
-#unused_docs = []
+# unused_docs = []
 
 # List of directories, relative to source directory, that shouldn't be searched
 # for source files.
@@ -72,50 +82,50 @@ exclude_trees = []
 
 # The reST default role (used for this markup: `text`)
 # to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
-#show_authors = False
+# show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
 pygments_style = 'sphinx'
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 
 # -- Options for HTML output --------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  Major themes that come with
 # Sphinx are currently 'default' and 'sphinxdoc'.
-#html_theme = 'default'
+# html_theme = 'default'
 html_theme = 'sphinxdoc'
-#html_theme = 'haiku'
-#html_theme = 'scrolls'
-#html_theme = 'agogo'
+# html_theme = 'haiku'
+# html_theme = 'scrolls'
+# html_theme = 'agogo'
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-#html_theme_options = {}
+# html_theme_options = {}
 
 # Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
+# html_theme_path = []
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
@@ -135,38 +145,38 @@ html_static_path = ['_static']
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_use_modindex = True
+# html_use_modindex = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
+# html_file_suffix = ''
 
 # Output file base name for HTML help builder.
 htmlhelp_basename = 'neodoc'
@@ -175,10 +185,10 @@ htmlhelp_basename = 'neodoc'
 # -- Options for LaTeX output -------------------------------------------------
 
 # The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
+# latex_paper_size = 'letter'
 
 # The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
+# latex_font_size = '10pt'
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author,
@@ -188,19 +198,23 @@ latex_documents = [('index', 'neo.tex', u'Neo Documentation',
 
 # The name of an image file (relative to this directory) to place at the
 # top of the title page.
-#latex_logo = None
+# latex_logo = None
 
 # For "manual" documents, if this is true, then toplevel headings are parts,
 # not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
 
 # Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
+# latex_preamble = ''
 
 # Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
 
 # If false, no module index is generated.
-#latex_use_modindex = True
+# latex_use_modindex = True
 
 todo_include_todos = True  # set to False before releasing documentation
+
+rst_epilog = """
+.. |neo_github_url| replace:: https://github.com/NeuralEnsemble/python-neo/archive/neo-{0}.zip
+""".format(neo_release)

+ 31 - 13
code/python-neo/doc/source/core.rst

@@ -73,6 +73,10 @@ and an array, giving you access to all of the methods available for those object
 For example, you can pass a :py:class:`SpikeTrain` directly to the :py:func:`numpy.histogram`
 function, or an :py:class:`AnalogSignal` directly to the :py:func:`numpy.std` function.
 
+If you want to get a numpy.ndarray you use magnitude and rescale from quantities::
+
+   >>> np_sig = neo_analogsignal.rescale('mV').magnitude
+   >>> np_times = neo_analogsignal.times.rescale('s').magnitude
 
 Relationships between objects
 =============================
@@ -80,17 +84,17 @@ Relationships between objects
 Container objects like :py:class:`Block` or :py:class:`Segment` are gateways to
 access other objects. For example, a :class:`Block` can access a :class:`Segment`
 with::
-     
+
     >>> bl = Block()
     >>> bl.segments
     # gives a list of segments
 
 A :class:`Segment` can access the :class:`AnalogSignal` objects that it contains with::
-    
+
     >>> seg = Segment()
     >>> seg.analogsignals
     # gives a list of AnalogSignals
-    
+
 In the :ref:`neo_diagram` below, these *one to many* relationships are represented by cyan arrows.
 In general, an object can access its children with an attribute *childname+s* in lower case, e.g.
 
@@ -127,7 +131,7 @@ In some cases, a one-to-many relationship is sufficient. Here is a simple exampl
 
     from neo import Block, ChannelIndex
     bl = Block()
-    
+
     # the four tetrodes
     for i in range(4):
         chx = ChannelIndex(name='Tetrode %d' % i,
@@ -183,11 +187,6 @@ Relationship:
 
 For more details, see the :doc:`api_reference`.
 
-    
-
-
-
-
 Initialization
 ==============
 
@@ -195,7 +194,8 @@ Neo objects are initialized with "required", "recommended", and "additional" arg
 
     - Required arguments MUST be provided at the time of initialization. They are used in the construction of the object.
     - Recommended arguments may be provided at the time of initialization. They are accessible as Python attributes. They can also be set or modified after initialization.
-    - Additional arguments are defined by the user and are not part of the Neo object model. A primary goal of the Neo project is extensibility. These additional arguments are entries in an attribute of the object: a Python dict called :py:attr:`annotations`.
+    - Additional arguments are defined by the user and are not part of the Neo object model. A primary goal of the Neo project is extensibility. These additional arguments are entries in an attribute of the object: a Python dict called :py:attr:`annotations`. 
+      Note : Neo annotations are not the same as the *__annotations__* attribute introduced in Python 3.6.
 
 Example: SpikeTrain
 -------------------
@@ -231,7 +231,7 @@ Finally, let's consider "additional arguments". These are the ones you define fo
     >>> st = neo.SpikeTrain(times=[3, 4, 5], units='sec', t_stop=10.0, rat_name='Fred')
     >>> print(st.annotations)
     {'rat_name': 'Fred'}
-    
+
 Because ``rat_name`` is not part of the Neo object model, it is placed in the dict :py:attr:`annotations`. This dict can be modified as necessary by your code.
 
 Annotations
@@ -248,8 +248,26 @@ possessed by all Neo core objects, e.g.::
 
 Since annotations may be written to a file or database, there are some
 limitations on the data types of annotations: they must be "simple" types or
-containers (lists, dicts, NumPy arrays) of simple types, where the simple types
+containers (lists, dicts, tuples, NumPy arrays) of simple types, where the simple types
 are ``integer``, ``float``, ``complex``, ``Quantity``, ``string``, ``date``, ``time`` and
 ``datetime``.
 
-See :ref:`specific_annotations`
+Array Annotations
+-----------------
+
+Next to "regular" annotations there is also a way to annotate arrays of values
+in order to create annotations with one value per data point. Using this feature,
+called Array Annotations, the consistency of those annotations with the actual data
+is ensured.
+Apart from adding those on object construction, Array Annotations can also be added
+using the :meth:`array_annotate` method provided by all Neo data objects, e.g.::
+
+    >>> sptr = SpikeTrain(times=[1, 2, 3]*pq.s, t_stop=3*pq.s)
+    >>> sptr.array_annotate(index=[0, 1, 2], relevant=[True, False, True])
+    >>> print(sptr.array_annotations)
+    {'index': array([0, 1, 2]), 'relevant': array([ True, False,  True])}
+
+Since Array Annotations may be written to a file or database, there are some
+limitations on the data types of arrays: they must be 1-dimensional (i.e. not nested)
+and contain the same types as annotations:
+ ``integer``, ``float``, ``complex``, ``Quantity``, ``string``, ``date``, ``time`` and ``datetime``.

+ 20 - 21
code/python-neo/doc/source/developers_guide.rst

@@ -38,11 +38,9 @@ a GitHub account and then set to watch the repository at `GitHub Repository`_
 Requirements
 ------------
 
-    * Python_ 2.6, 2.7, 3.3-3.5
+    * Python_ 2.7, 3.4 or later
     * numpy_ >= 1.7.1
     * quantities_ >= 0.9.0
-    * if using Python 2.6, unittest2_ >= 0.5.1
-    * Setuptools >= 0.7
     * nose_ >= 0.11.1 (for running tests)
     * Sphinx_ >= 0.6.4 (for building documentation)
     * (optional) tox_ >= 0.9 (makes it easier to test with multiple Python versions)
@@ -102,7 +100,7 @@ on your system::
 
     $ cd neo/test
 
-With Python 2.7 or 3.3::
+With Python 2.7 or 3.x::
 
     $ python -m unittest discover
     $ python3 -m unittest discover
@@ -198,13 +196,12 @@ open a pull request on GitHub
 (see https://help.github.com/articles/using-pull-requests).
 
 
-Python 3
---------
+Python version
+--------------
 
-Neo core should work with both recent versions of Python 2 (versions 2.6 and
-2.7) and Python 3 (version 3.3 or newer). Neo IO modules should ideally work with both
-Python 2 and 3, but certain modules may only work with one or the other
-(see :doc:`install`).
+Neo core should work with both Python 2.7 and Python 3 (version 3.4 or newer).
+Neo IO modules should ideally work with both Python 2 and 3, but certain
+modules may only work with one or the other (see :doc:`install`).
 
 So far, we have managed to write code that works with both Python 2 and 3.
 Mainly this involves avoiding the ``print`` statement (use ``logging.info``
@@ -225,7 +222,7 @@ Coding standards and style
 --------------------------
 
 All code should conform as much as possible to `PEP 8`_, and should run with
-Python 2.6, 2.7, and 3.3 or newer.
+Python 2.7, and 3.4 or newer.
 
 You can use the `pep8`_ program to check the code for PEP 8 conformity.
 You can also use `flake8`_, which combines pep8 and pyflakes.
@@ -245,25 +242,27 @@ Making a release
 
 Add a section in :file:`/doc/source/whatisnew.rst` for the release.
 
-First check that the version string (in :file:`neo/version.py`,
-:file:`setup.py`, :file:`doc/conf.py` and :file:`doc/install.rst`) is correct.
+First check that the version string (in :file:`neo/version.py`) is correct.
 
 To build a source package::
 
     $ python setup.py sdist
 
-To upload the package to `PyPI`_ (currently Samuel Garcia and Andrew Davison
-have the necessary permissions to do this)::
 
-    $ python setup.py sdist upload
-    $ python setup.py upload_docs --upload-dir=doc/build/html
-
-.. talk about readthedocs
-
-Finally, tag the release in the Git repository and push it::
+Tag the release in the Git repository and push it::
 
     $ git tag <version>
     $ git push --tags origin
+    $ git push --tags upstream
+
+
+To upload the package to `PyPI`_ (currently Samuel Garcia,  Andrew Davison,
+Michael Denker and Julia Sprenger have the necessary permissions to do this)::
+
+    $ twine upload dist/neo-0.X.Y.tar.gz
+
+.. talk about readthedocs
+
     
 
 .. make a release branch

+ 40 - 41
code/python-neo/doc/source/images/generate_diagram.py

@@ -32,20 +32,20 @@ def get_rect_height(name, obj):
     nlines += len(getattr(obj, '_single_child_objects', []))
     nlines += len(getattr(obj, '_multi_child_objects', []))
     nlines += len(getattr(obj, '_multi_parent_objects', []))
-    return nlines*line_heigth
+    return nlines * line_heigth
 
 
 def annotate(ax, coord1, coord2, connectionstyle, color, alpha):
     arrowprops = dict(arrowstyle='fancy',
-                      #~ patchB=p,
+                      # ~ patchB=p,
                       shrinkA=.3, shrinkB=.3,
                       fc=color, ec=color,
                       connectionstyle=connectionstyle,
                       alpha=alpha)
     bbox = dict(boxstyle="square", fc="w")
     a = ax.annotate('', coord1, coord2,
-                    #xycoords="figure fraction",
-                    #textcoords="figure fraction",
+                    # xycoords="figure fraction",
+                    # textcoords="figure fraction",
                     ha="right", va="center",
                     size=fontsize,
                     arrowprops=arrowprops,
@@ -55,7 +55,7 @@ def annotate(ax, coord1, coord2, connectionstyle, color, alpha):
 
 def calc_coordinates(pos, height):
     x = pos[0]
-    y = pos[1] + height - line_heigth*.5
+    y = pos[1] + height - line_heigth * .5
 
     return pos[0], y
 
@@ -102,45 +102,44 @@ def generate_diagram(filename, rect_pos, rect_width, figsize):
     for name, pos in rect_pos.items():
         htotal = all_h[name]
         obj = objs[name]
-        allrelationship = (list(getattr(obj, '_child_containers', [])) +
-                           list(getattr(obj, '_multi_parent_containers', [])))
+        allrelationship = (list(getattr(obj, '_child_containers', []))
+                           + list(getattr(obj, '_multi_parent_containers', [])))
 
         rect = Rectangle(pos, rect_width, htotal,
                          facecolor='w', edgecolor='k', linewidth=2.)
         ax.add_patch(rect)
 
         # title green
-        pos2 = pos[0], pos[1]+htotal - line_heigth*1.5
-        rect = Rectangle(pos2, rect_width, line_heigth*1.5,
+        pos2 = pos[0], pos[1] + htotal - line_heigth * 1.5
+        rect = Rectangle(pos2, rect_width, line_heigth * 1.5,
                          facecolor='g', edgecolor='k', alpha=.5, linewidth=2.)
         ax.add_patch(rect)
 
         # single relationship
         relationship = getattr(obj, '_single_child_objects', [])
-        pos2 = pos[1] + htotal - line_heigth*(1.5+len(relationship))
-        rect_height = len(relationship)*line_heigth
+        pos2 = pos[1] + htotal - line_heigth * (1.5 + len(relationship))
+        rect_height = len(relationship) * line_heigth
 
         rect = Rectangle((pos[0], pos2), rect_width, rect_height,
                          facecolor='c', edgecolor='k', alpha=.5)
         ax.add_patch(rect)
 
         # multi relationship
-        relationship = (list(getattr(obj, '_multi_child_objects', [])) +
-                        list(getattr(obj, '_multi_parent_containers', [])))
-        pos2 = (pos[1]+htotal - line_heigth*(1.5+len(relationship)) -
-                rect_height)
-        rect_height = len(relationship)*line_heigth
+        relationship = (list(getattr(obj, '_multi_child_objects', []))
+                        + list(getattr(obj, '_multi_parent_containers', [])))
+        pos2 = (pos[1] + htotal - line_heigth * (1.5 + len(relationship))
+                - rect_height)
+        rect_height = len(relationship) * line_heigth
 
         rect = Rectangle((pos[0], pos2), rect_width, rect_height,
                          facecolor='m', edgecolor='k', alpha=.5)
         ax.add_patch(rect)
 
         # necessary attr
-        pos2 = (pos[1]+htotal -
-                line_heigth*(1.5+len(allrelationship) +
-                             len(obj._necessary_attrs)))
+        pos2 = (pos[1] + htotal
+                - line_heigth * (1.5 + len(allrelationship) + len(obj._necessary_attrs)))
         rect = Rectangle((pos[0], pos2), rect_width,
-                         line_heigth*len(obj._necessary_attrs),
+                         line_heigth * len(obj._necessary_attrs),
                          facecolor='r', edgecolor='k', alpha=.5)
         ax.add_patch(rect)
 
@@ -149,17 +148,17 @@ def generate_diagram(filename, rect_pos, rect_width, figsize):
             post = '* '
         else:
             post = ''
-        ax.text(pos[0]+rect_width/2., pos[1]+htotal - line_heigth*1.5/2.,
-                name+post,
+        ax.text(pos[0] + rect_width / 2., pos[1] + htotal - line_heigth * 1.5 / 2.,
+                name + post,
                 horizontalalignment='center', verticalalignment='center',
-                fontsize=fontsize+2,
+                fontsize=fontsize + 2,
                 fontproperties=FontProperties(weight='bold'),
                 )
 
-        #relationship
+        # relationship
         for i, relat in enumerate(allrelationship):
-            ax.text(pos[0]+left_text_shift, pos[1]+htotal - line_heigth*(i+2),
-                    relat+': list',
+            ax.text(pos[0] + left_text_shift, pos[1] + htotal - line_heigth * (i + 2),
+                    relat + ': list',
                     horizontalalignment='left', verticalalignment='center',
                     fontsize=fontsize,
                     )
@@ -167,9 +166,9 @@ def generate_diagram(filename, rect_pos, rect_width, figsize):
         for i, attr in enumerate(obj._all_attrs):
             attrname, attrtype = attr[0], attr[1]
             t1 = attrname
-            if (hasattr(obj, '_quantity_attr') and
-                    obj._quantity_attr == attrname):
-                t1 = attrname+'(object itself)'
+            if (hasattr(obj, '_quantity_attr')
+                    and obj._quantity_attr == attrname):
+                t1 = attrname + '(object itself)'
             else:
                 t1 = attrname
 
@@ -185,9 +184,9 @@ def generate_diagram(filename, rect_pos, rect_width, figsize):
             else:
                 t2 = attrtype.__name__
 
-            t = t1+' :  '+t2
-            ax.text(pos[0]+left_text_shift,
-                    pos[1]+htotal - line_heigth*(i+len(allrelationship)+2),
+            t = t1 + ' :  ' + t2
+            ax.text(pos[0] + left_text_shift,
+                    pos[1] + htotal - line_heigth * (i + len(allrelationship) + 2),
                     t,
                     horizontalalignment='left', verticalalignment='center',
                     fontsize=fontsize,
@@ -206,15 +205,15 @@ def generate_diagram_simple():
     figsize = (18, 12)
     rw = rect_width = 3.
     bf = blank_fact = 1.2
-    rect_pos = {'Block': (.5+rw*bf*0, 4),
-                'Segment': (.5+rw*bf*1, .5),
-                'Event': (.5+rw*bf*4, 3.0),
-                'Epoch': (.5+rw*bf*4, 1.0),
-                'ChannelIndex': (.5+rw*bf*1, 7.5),
-                'Unit': (.5+rw*bf*2., 9.9),
-                'SpikeTrain': (.5+rw*bf*3, 7.5),
-                'IrregularlySampledSignal': (.5+rw*bf*3, 0.5),
-                'AnalogSignal': (.5+rw*bf*3, 4.9),
+    rect_pos = {'Block': (.5 + rw * bf * 0, 4),
+                'Segment': (.5 + rw * bf * 1, .5),
+                'Event': (.5 + rw * bf * 4, 3.0),
+                'Epoch': (.5 + rw * bf * 4, 1.0),
+                'ChannelIndex': (.5 + rw * bf * 1, 7.5),
+                'Unit': (.5 + rw * bf * 2., 9.9),
+                'SpikeTrain': (.5 + rw * bf * 3, 7.5),
+                'IrregularlySampledSignal': (.5 + rw * bf * 3, 0.5),
+                'AnalogSignal': (.5 + rw * bf * 3, 4.9),
                 }
     generate_diagram('simple_generated_diagram.svg',
                      rect_pos, rect_width, figsize)

+ 17 - 5
code/python-neo/doc/source/index.rst

@@ -6,7 +6,7 @@
 Neo is a Python package for working with electrophysiology data in Python, together
 with support for reading a wide range of neurophysiology file formats, including
 Spike2, NeuroExplorer, AlphaOmega, Axon, Blackrock, Plexon, Tdt, Igor Pro, and support for
-writing to a subset of these formats plus non-proprietary formats including Klustakwik and HDF5.
+writing to a subset of these formats plus non-proprietary formats including Kwik and HDF5.
 
 The goal of Neo is to improve interoperability between Python tools for
 analyzing, visualizing and generating electrophysiology data, by providing a common,
@@ -14,9 +14,12 @@ shared object model. In order to be as lightweight a dependency as possible,
 Neo is deliberately limited to represention of data, with no functions for data
 analysis or visualization.
 
-Neo is used by a number of other software tools, including OpenElectrophy_
-and SpykeViewer_ (data analysis and visualization), Elephant_ (data analysis),
-the G-node_ suite (databasing) and PyNN_ (simulations).
+Neo is used by a number of other software tools, including 
+SpykeViewer_ (data analysis and visualization), Elephant_ (data analysis),
+the G-node_ suite (databasing), PyNN_ (simulations), tridesclous_ (spike sorting)
+and ephyviewer_ (data visualization).
+OpenElectrophy_ (data analysis and visualization) used an older version of Neo.
+
 
 Neo implements a hierarchical data model well adapted to intracellular and
 extracellular electrophysiology and EEG data with support for multi-electrodes
@@ -38,6 +41,7 @@ Documentation
    core
    usecases
    io
+   rawio
    examples
    api_reference
    whatisnew
@@ -68,6 +72,12 @@ and all contributions are welcomed - see the :doc:`developers_guide` for more in
 `Source code <https://github.com/NeuralEnsemble/python-neo>`_ is on GitHub.
 
 
+Citation
+--------
+
+.. include:: ../../CITATION.txt
+
+
 .. _OpenElectrophy: https://github.com/OpenElectrophy/OpenElectrophy
 .. _Elephant: http://neuralensemble.org/elephant
 .. _G-node: http://www.g-node.org/
@@ -77,4 +87,6 @@ and all contributions are welcomed - see the :doc:`developers_guide` for more in
 .. _PyNN: http://neuralensemble.org/PyNN
 .. _quantities: http://pypi.python.org/pypi/quantities
 .. _`NeuralEnsemble mailing list`: http://groups.google.com/group/neuralensemble
-.. _`issue tracker`: https://github.com/NeuralEnsemble/python-neo/issues
+.. _`issue tracker`: https://github.com/NeuralEnsemble/python-neo/issues
+.. _tridesclous: https://github.com/tridesclous/tridesclous
+.. _ephyviewer: https://github.com/NeuralEnsemble/ephyviewer

+ 12 - 7
code/python-neo/doc/source/install.rst

@@ -10,7 +10,7 @@ Dependencies
   
     * Python_ >= 2.7
     * numpy_ >= 1.7.1
-    * quantities_ >= 0.9.0
+    * quantities_ >= 0.12.1
 
 For Debian/Ubuntu, you can install these using::
 
@@ -28,7 +28,7 @@ Neo will still install but the IO module that uses them will fail on loading:
    * h5py >= 2.5 for Hdf5IO, KwikIO
    * klusta for KwikIO
    * igor >= 0.2 for IgorIO
-   * nixio >= 1.2 for NixIO
+   * nixio >= 1.5 for NixIO
    * stfio for StimfitIO
 
 
@@ -47,14 +47,19 @@ on).
     
 To download and install manually, download:
 
-    https://github.com/NeuralEnsemble/python-neo/archive/neo-0.5.2.zip
+    |neo_github_url|
+    
 
-Then::
+Then:
 
-    $ unzip neo-0.5.2.zip
-    $ cd neo-0.5.2
-    $ python setup.py install
+.. parsed-literal::
     
+    $ unzip neo-|release|.zip
+    $ cd neo-|release|
+    $ python setup.py install
+
+
+
 or::
 
     $ python3 setup.py install

+ 15 - 50
code/python-neo/doc/source/io.rst

@@ -15,7 +15,7 @@ It is not only file-oriented, it can also read/write objects from a database.
 
 At the moment, there are 3 families of IO modules:
     1. for reading closed manufacturers' formats (Spike2, Plexon, AlphaOmega, BlackRock, Axon, ...)
-    2. for reading(/writing) formats from open source tools (KlustaKwik, Elan, WinEdr, WinWcp, PyNN, ...)
+    2. for reading(/writing) formats from open source tools (KlustaKwik, Elan, WinEdr, WinWcp, ...)
     3. for reading/writing Neo structure in neutral formats (HDF5, .mat, ...) but with Neo structure inside (NeoHDF5, NeoMatlab, ...)
 
 Combining **1** for reading and **3** for writing is a good example of use: converting your datasets
@@ -33,7 +33,7 @@ Depending on the file format, i.e. if it is streamable or not, the whole :class:
 particular :class:`Segment` objects can be accessed individually.
 Within a :class:`Segment`, the same hierarchical organisation applies.
 A :class:`Segment` embeds several objects, such as :class:`SpikeTrain`,
-:class:`AnalogSignal`, :class:`AnaloSignalArray`, :class:`EpochArray`, :class:`EventArray`
+:class:`AnalogSignal`, :class:`IrregularlySampledSignal`, :class:`Epoch`, :class:`Event`
 (basically, all the different Neo objects).
 
 Depending on the file format, these objects can sometimes be loaded separately, without the need to load the whole file.
@@ -109,60 +109,26 @@ All IOs have a read() method that returns a list of :class:`Block` objects (repr
     neo.core.Segment
 
 
-Lazy and cascade options
+Lazy option (deprecated)
 ========================
 
 In some cases you may not want to load everything in memory because it could be too big.
-For this scenario, two options are available:
+For this scenario, some IOs implement ``lazy=True/False``. With ``lazy=True`` all arrays will have a size of zero,
+but all the metadata will be loaded. The *lazy_shape* attribute is added to all array-like objects
+(AnalogSignal, IrregularlySampledSignal, SpikeTrain, Epoch, Event).
+In this case, *lazy_shape* is a tuple that has the same value as *shape* with ``lazy=False``.
+To know if a class supports lazy mode use ``ClassIO.support_lazy``.
+By default (if not specified), ``lazy=False``, i.e. all data is loaded.
+The lazy option will be removed in future Neo versions. Similar functionality will be
+implemented using proxy objects.
 
-  * ``lazy=True/False``. With ``lazy=True`` all arrays will have a size of zero, but all the metadata will be loaded. lazy_shape attribute is added to all object that
-    inheritate Quantitities or numpy.ndarray (AnalogSignal, AnalogSignalArray, SpikeTrain)  and to object that have array like attributes (EpochArray, EventArray)
-    In that cases, lazy_shape is a tuple that have the same shape with lazy=False.
-  * ``cascade=True/False``. With ``cascade=False`` only one object is read (and *one_to_many* and *many_to_many* relationship are not read).
-
-By default (if they are not specified), ``lazy=False`` and ``cascade=True``, i.e. all data is loaded.
-
-Example cascade::
-
-    >>> seg = reader.read_segment( cascade=True)
-    >>> print(len(seg.analogsignals))  # this is N
-    >>> seg = reader.read_segment(cascade=False)
-    >>> print(len(seg.analogsignals))  # this is zero
-
-Example lazy::
+Example of lazy loading::
 
     >>> seg = reader.read_segment(lazy=False)
-    >>> print(seg.analogsignals[0].shape)  # this is N
+    >>> print(seg.analogsignals[0].shape)  # this is (N, M)
     >>> seg = reader.read_segment(lazy=True)
-    >>> print(seg.analogsignals[0].shape)  # this is zero, the AnalogSignal is empty
-    >>> print(seg.analogsignals[0].lazy_shape)  # this is N
-
-Some IOs support advanced forms of lazy loading, cascading or both (these features are currently limited to the HDF5 IO, which supports both forms).
-
-* For lazy loading, these IOs have a :meth:`load_lazy_object` method that takes a single parameter: a data object previously loaded by the same IO
-  in lazy mode. It returns the fully loaded object, without links to container objects (Segment etc.). Continuing the lazy example above::
-
-    >>> lazy_sig = seg.analogsignals[0]  # Empty signal
-    >>> full_sig = reader.load_lazy_object(lazy_sig)
-    >>> print(lazy_sig.lazy_shape, full_sig.shape)  # Identical
-    >>> print(lazy_sig.segment)  # Has the link to the object "seg"
-    >>> print(full_sig.segment)  # Does not have the link: None
-
-* For lazy cascading, IOs have a :meth:`load_lazy_cascade` method. This method is not called directly when interacting with the IO, but its
-  presence can be used to check if an IO supports lazy cascading. To use lazy cascading, the cascade parameter is set to ``'lazy'``::
-
-    >>> block = reader.read(cascade='lazy')
-
-  You do not have to do anything else, lazy cascading is now active for the object you just loaded. You can interact with the object in the same way
-  as if it was loaded with ``cascade=True``. However, only the objects that are actually accessed are loaded as soon as they are needed::
-
-    >>> print(block.channelindexes[0].name)  # The first ChannelIndex is loaded
-    >>> print(block.segments[0].analogsignals[1])  # The first Segment and its second AnalogSignal are loaded
-
-  Once an object has been loaded with lazy cascading, it stays in memory::
-
-    >>> print(block.segments[0].analogsignals[0])  # The first Segment is already in memory, its first AnalogSignal is loaded
-
+    >>> print(seg.analogsignals[0].shape)  # this is 0, the AnalogSignal is empty
+    >>> print(seg.analogsignals[0].lazy_shape)  # this is (N, M)
 
 .. _neo_io_API:
 
@@ -177,7 +143,6 @@ The :mod:`neo.io` API is designed to be simple and intuitive:
     - each IO class has a :meth:`read()` method that returns a list of :class:`Block` objects. If the IO only supports :class:`Segment` reading, the list will contain one block with all segments from the file.
     - each IO class that supports writing has a :meth:`write()` method that takes as a parameter a list of blocks, a single block or a single segment, depending on the IO's :attr:`writable_objects`.
     - each IO is able to do a *lazy* load: all metadata (e.g. :attr:`sampling_rate`) are read, but not the actual numerical data. lazy_shape attribute is added to provide information on real size.
-    - each IO is able to do a *cascade* load: if ``True`` (default) all child objects are loaded, otherwise only the top level object is loaded.
     - each IO is able to save and load all required attributes (metadata) of the objects it supports.
     - each IO can freely add user-defined or manufacturer-defined metadata to the :attr:`annotations` attribute of an object.
 

+ 39 - 53
code/python-neo/doc/source/io_developers_guide.rst

@@ -10,86 +10,68 @@ IO developers' guide
 Guidelines for IO implementation
 ================================
 
+There are two ways to add a new IO module:
+  * By directly adding a new IO class in a module within :mod:`neo.io`: the reader/writer will deal directly with Neo objects
+  * By adding a RawIO class in a module within :mod:`neo.rawio`: the reader should work with raw buffers from the file and provide
+    some internal headers for the scale/units/name/... 
+    You can then generate an IO module simply by inheriting from your RawIO class and from :class:`neo.io.BaseFromRaw`
+
+For read only classes, we encourage you to write a :class:`RawIO` class because it allows slice reading,
+and is generally much quicker and easier (although only for reading) than implementing a full IO class.
+For read/write classes you can mix the two levels neo.rawio for reading and neo.io for writing.
+
 Recipe to develop an IO module for a new data format:
     1. Fully understand the object model. See :doc:`core`. If in doubt ask the `mailing list`_.
-    2. Fully understand :mod:`neo.io.exampleio`, It is a fake IO to explain the API. If in doubt ask the list.
-    3. Copy/paste ``exampleio.py`` and choose clear file and class names for your IO.
-    4. Decide which **supported objects** and **readable objects** your IO will deal with. This is the crucial point.
-    5. Implement all methods :meth:`read_XXX` related to **readable objects**.
-    6. Optional: If your IO supports reading multiple blocks from one file, implement a :meth:`read_all_blocks` method.
-    7. Do not forget all lazy and cascade combinations.
-    8. Optional: Support loading lazy objects by implementing a :meth:`load_lazy_object` method and / or lazy cascading by
-       implementing a :meth:`load_lazy_cascade` method.
-    9. Write good docstrings. List dependencies, including minimum version numbers.
-    10. Add your class to :mod:`neo.io.__init__`. Keep the import inside try/except for dependency reasons.
-    11. Contact the Neo maintainers to put sample files for testing on the G-Node server (write access is not public).
-    12. Write tests in ``neo/test/io/test_xxxxxio.py``. You must at least pass the standard tests (inherited from :class:`BaseTestIO`).
-    13. Commit or send a patch only if all tests pass.
+    2. Fully understand :mod:`neo.io.examplerawio`, It is a fake IO to explain the API. If in doubt ask the list.
+    3. Copy/paste ``examplerawio.py`` and choose clear file and class names for your IO.
+    4. implement all methods that **raise(NotImplementedError)** in :mod:`neo.rawio.baserawio`. Return None when the object is not supported (spike/waveform)
+    5. Write good docstrings. List dependencies, including minimum version numbers.
+    6. Add your class to :mod:`neo.rawio.__init__`. Keep imports inside ``try/except`` for dependency reasons.
+    7. Create a class in :file:`neo/io/`
+    8. Add your class to :mod:`neo.io.__init__`. Keep imports inside ``try/except`` for dependency reasons.
+    9. Create an account at https://gin.g-node.org and deposit files in :file:`NeuralEnsemble/ephy_testing_data`.
+    10. Write tests in :file:`neo/rawio/test_xxxxxrawio.py`. You must at least pass the standard tests (inherited from :class:`BaseTestRawIO`). See :file:`test_examplerawio.py`
+    11. Write a similar test in :file:`neo.tests/iotests/test_xxxxxio.py`. See :file:`test_exampleio.py`
+    12. Make a pull request when all tests pass.
 
 Miscellaneous
 =============
 
-    * If your IO supports several version of a format (like ABF1, ABF2), upload to G-node test file repository all file version possible. (for utest coverage).
+    * If your IO supports several versions of a format (like ABF1, ABF2), upload to the gin.g-node.org test file repository all file versions possible. (for test coverage).
     * :py:func:`neo.core.Block.create_many_to_one_relationship` offers a utility to complete the hierachy when all one-to-many relationships have been created.
-    * :py:func:`neo.io.tools.populate_RecordingChannel` offers a utility to
-      create inside a :class:`Block` all :class:`RecordingChannel` objects and links to :class:`AnalogSignal`, :class:`SpikeTrain`, ...
     * In the docstring, explain where you obtained the file format specification if it is a closed one.
     * If your IO is based on a database mapper, keep in mind that the returned object MUST be detached,
       because this object can be written to another url for copying.
 
-Advanced lazy loading
-=====================
-
-If your IO supports a format that might take a long time to load or require lots of memory, consider implementing one or both of the following methods to
-enable advanced lazy loading:
-
-* ``load_lazy_object(self, obj)``: This method takes a lazily loaded object and returns the corresponding fully loaded object.
-  It does not set any links of the newly loaded object (e.g. the segment attribute of a SpikeTrain). The information needed to fully load the
-  lazy object should usually be stored in the IO object (e.g. in a dictionary with lazily loaded objects as keys and the address
-  in the file as values).
-* ``load_lazy_cascade(self, address, lazy)``: This method takes two parameters: The information required by your IO to load an object and a boolean that
-  indicates if data objects should be lazy loaded (in the same way as with regular :meth:`read_XXX` methods). The method should return a loaded
-  objects, including all the links for one-to-many and many-to-many relationships (lists of links should be replaced by ``LazyList`` objects,
-  see below).
-
-  To implement lazy cascading, your read methods need to react when a user calls them with the ``cascade`` parameter set to ``lazy``.
-  In this case, you have to replace all the link lists of your loaded objects with instances of :class:`neo.io.tools.LazyList`. Instead
-  of the actual objects that your IO would load at this point, fill the list with items that ``load_lazy_cascade`` needs to load the
-  object.
-
-  Because the links of objects can point to previously loaded objects, you need to cache all loaded objects in the IO. If :meth:`load_lazy_cascade`
-  is called with the address of a previously loaded object, return the object instead of loading it again. Also, a call to :meth:`load_lazy_cascade`
-  might require you to load additional objects further up in the hierarchy. For example, if a :class:`SpikeTrain` is accessed through a
-  :class:`Segment`, its :class:`Unit` and the :class:`ChannelIndex` of the :class:`Unit` might have to be loaded at that point as well
-  if they have not been accessed before.
-
-  Note that you are free to restrict lazy cascading to certain objects. For example, you could use the ``LazyList`` only for the ``analogsignals``
-  property of :class:`Segment` and :class:`RecordingChannel` objects and load the rest of file immediately.
 
 Tests
 =====
 
-:py:class:`neo.test.io.commun_io_test.BaseTestIO` provide standard tests.
-To use these you need to upload some sample data files at the `G-Node portal`_. They will be publicly accessible for testing Neo.
+:py:class:`neo.rawio.tests.common_rawio_test.BaseTestRawIO` and :py:class:`neo.test.io.commun_io_test.BaseTestIO` provide standard tests.
+To use these you need to upload some sample data files at `gin-gnode`_. They will be publicly accessible for testing Neo.
 These tests:
 
   * check the compliance with the schema: hierachy, attribute types, ...
-  * check if the IO respects the *lazy* and *cascade* keywords.
-  * For IO able to both write and read data, it compares a generated dataset with the same data after a write/read cycle.
+  * For IO modules able to both write and read data, it compares a generated dataset with the same data after a write/read cycle.
 
-The test scripts download all files from the `G-Node portal`_ and store them locally in ``neo/test/io/files_for_tests/``.
+The test scripts download all files from `gin-gnode`_ and stores them locally in ``/tmp/files_for_tests/``.
 Subsequent test runs use the previously downloaded files, rather than trying to download them each time.
 
-Here is an example test script taken from the distribution: ``test_axonio.py``:
+Each test must have at least one class that inherits ``BaseTestRawIO`` and that has 3 attributes:
+  * ``rawioclass``: the class
+  * ``entities_to_test``: a list of files (or directories) to be tested one by one
+  * ``files_to_download``: a list of files to download (sometimes bigger than ``entities_to_test``)
+
+Here is an example test script taken from the distribution: :file:`test_axonrawio.py`:
 
-.. literalinclude:: ../../neo/test/iotest/test_axonio.py
+.. literalinclude:: ../../neo/rawio/tests/test_axonrawio.py
 
 
 Logging
 =======
 
 All IO classes by default have logging using the standard :mod:`logging` module: already set up.
-The logger name is the same as the full qualified class name, e.g. :class:`neo.io.hdf5io.NeoHdf5IO`.
+The logger name is the same as the fully qualified class name, e.g. :class:`neo.io.hdf5io.NeoHdf5IO`.
 The :attr:`class.logger` attribute holds the logger for easy access.
 
 There are generally 3 types of situations in which an IO class should use a logger
@@ -112,12 +94,16 @@ In the tests for the io class, if you intentionally test broken files, please di
 ExampleIO
 =========
 
+.. autoclass:: neo.rawio.ExampleRawIO
+
 .. autoclass:: neo.io.ExampleIO
 
 Here is the entire file:
 
+.. literalinclude:: ../../neo/rawio/examplerawio.py
+
 .. literalinclude:: ../../neo/io/exampleio.py
 
 
 .. _`mailing list`: http://groups.google.com/group/neuralensemble
-.. _G-node portal: https://portal.g-node.org/neo/
+.. _gin-gnode: https://web.gin.g-node.org/NeuralEnsemble/ephy_testing_data

+ 7 - 3
code/python-neo/doc/source/whatisnew.rst

@@ -5,10 +5,14 @@ Release notes
 
 .. toctree::
    :maxdepth: 1
-
-   releases/0.5.0.rst
-   releases/0.5.1.rst
+   
+   releases/0.7.0.rst
+   releases/0.6.0.rst
    releases/0.5.2.rst
+   releases/0.5.1.rst
+   releases/0.5.0.rst
+   
+   
 
 ..   releases/0.2.0.rst
 ..   releases/0.2.1.rst

+ 30 - 36
code/python-neo/examples/generated_data.py

@@ -12,7 +12,7 @@ from matplotlib import pyplot as plt
 import neo
 
 
-def generate_block(n_segments=3, n_channels=8, n_units=3,
+def generate_block(n_segments=3, n_channels=4, n_units=3,
                    data_samples=1000, feature_samples=100):
     """
     Generate a block with a single recording channel group and a number of
@@ -21,53 +21,48 @@ def generate_block(n_segments=3, n_channels=8, n_units=3,
     """
     feature_len = feature_samples / data_samples
 
-    # Create container and grouping objects
-    segments = [neo.Segment(index=i) for i in range(n_segments)]
-
-    chx = neo.ChannelIndex(index =1, name='T0')
-
-    for i in range(n_channels):
-        rc = neo.RecordingChannel(name='C%d' % i, index=i)
-        rc.channelindexes = [chx]
-        chx.recordingchannels.append(rc)
+    # Create Block to contain all generated data
+    block = neo.Block()
 
-    units = [neo.Unit('U%d' % i) for i in range(n_units)]
-    chx.units = units
+    # Create multiple Segments
+    block.segments = [neo.Segment(index=i) for i in range(n_segments)]
+    # Create multiple ChannelIndexes
+    block.channel_indexes = [neo.ChannelIndex(name='C%d' % i, index=i) for i in range(n_channels)]
 
-    block = neo.Block()
-    block.segments = segments
-    block.channel_indexes = [chx]
+    # Attach multiple Units to each ChannelIndex
+    for channel_idx in block.channel_indexes:
+        channel_idx.units = [neo.Unit('U%d' % i) for i in range(n_units)]
 
     # Create synthetic data
-    for seg in segments:
+    for seg in block.segments:
         feature_pos = np.random.randint(0, data_samples - feature_samples)
 
         # Analog signals: Noise with a single sinewave feature
         wave = 3 * np.sin(np.linspace(0, 2 * np.pi, feature_samples))
-        for rc in chx.recordingchannels:
+        for channel_idx in block.channel_indexes:
             sig = np.random.randn(data_samples)
             sig[feature_pos:feature_pos + feature_samples] += wave
 
             signal = neo.AnalogSignal(sig * pq.mV, sampling_rate=1 * pq.kHz)
             seg.analogsignals.append(signal)
-            rc.analogsignals.append(signal)
+            channel_idx.analogsignals.append(signal)
 
-        # Spike trains: Random spike times with elevated rate in short period
-        feature_time = feature_pos / data_samples
-        for u in units:
-            random_spikes = np.random.rand(20)
-            feature_spikes = np.random.rand(5) * feature_len + feature_time
-            spikes = np.hstack([random_spikes, feature_spikes])
+            # Spike trains: Random spike times with elevated rate in short period
+            feature_time = feature_pos / data_samples
+            for u in channel_idx.units:
+                random_spikes = np.random.rand(20)
+                feature_spikes = np.random.rand(5) * feature_len + feature_time
+                spikes = np.hstack([random_spikes, feature_spikes])
 
-            train = neo.SpikeTrain(spikes * pq.s, 1 * pq.s)
-            seg.spiketrains.append(train)
-            u.spiketrains.append(train)
+                train = neo.SpikeTrain(spikes * pq.s, 1 * pq.s)
+                seg.spiketrains.append(train)
+                u.spiketrains.append(train)
 
     block.create_many_to_one_relationship()
     return block
 
-block = generate_block()
 
+block = generate_block()
 
 # In this example, we treat each segment in turn, averaging over the channels
 # in each:
@@ -86,22 +81,21 @@ for seg in block.segments:
 # averaging over trials. For example, perhaps you wish to see which physical
 # location produces the strongest response, and each stimulus was the same:
 
-# We assume that our block has only 1 ChannelIndex and each
-# RecordingChannel only has 1 AnalogSignal.
-chx = block.channel_indexes[0]
-for rc in chx.recordingchannels:
-    print("Analysing channel %d: %s" % (rc.index, rc.name))
+# There are multiple ChannelIndex objects connected to the block, each
+# corresponding to a a physical electrode
+for channel_idx in block.channel_indexes:
+    print("Analysing channel %d: %s" % (channel_idx.index, channel_idx.name))
 
-    siglist = rc.analogsignals
+    siglist = channel_idx.analogsignals
     time_points = siglist[0].times
     avg = np.mean(siglist, axis=0)  # Average over signals of RecordingChannel
 
     plt.figure()
     plt.plot(time_points, avg)
-    plt.title("Average response on channel %d" % rc.index)
+    plt.title("Average response on channel %d" % channel_idx.index)
 
 # There are three ways to access the spike train data: by Segment,
-# by RecordingChannel or by Unit.
+# by ChannelIndex or by Unit.
 
 # By Segment. In this example, each Segment represents data from one trial,
 # and we want a peristimulus time histogram (PSTH) for each trial from all

+ 0 - 47
code/python-neo/examples/read_files.py

@@ -1,47 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-This is an example for reading files with neo.io
-"""
-
-import urllib
-
-import neo
-
-
-# Plexon files
-distantfile = 'https://portal.g-node.org/neo/plexon/File_plexon_3.plx'
-localfile = './File_plexon_3.plx'
-urllib.request.urlretrieve(distantfile, localfile)
-
-# create a reader
-reader = neo.io.PlexonIO(filename='File_plexon_3.plx')
-# read the blocks
-blks = reader.read(cascade=True, lazy=False)
-print (blks)
-# access to segments
-for blk in blks:
-    for seg in blk.segments:
-        print (seg)
-        for asig in seg.analogsignals:
-            print (asig)
-        for st in seg.spiketrains:
-            print (st)
-
-
-# CED Spike2 files
-distantfile = 'https://portal.g-node.org/neo/spike2/File_spike2_1.smr'
-localfile = './File_spike2_1.smr'
-urllib.request.urlretrieve(distantfile, localfile)
-
-# create a reader
-reader = neo.io.Spike2IO(filename='File_spike2_1.smr')
-# read the block
-bl = reader.read(cascade=True, lazy=False)[0]
-print (bl)
-# access to segments
-for seg in bl.segments:
-    print (seg)
-    for asig in seg.analogsignals:
-        print (asig)
-    for st in seg.spiketrains:
-        print (st)

+ 5 - 7
code/python-neo/examples/simple_plot_with_matplotlib.py

@@ -18,26 +18,24 @@ url = 'https://portal.g-node.org/neo/'
 distantfile = 'https://portal.g-node.org/neo/plexon/File_plexon_3.plx'
 localfile = './File_plexon_3.plx'
 
-
 urllib.request.urlretrieve(distantfile, localfile)
 
-
 # reader = neo.io.NeuroExplorerIO(filename='File_neuroexplorer_2.nex')
 reader = neo.io.PlexonIO(filename='File_plexon_3.plx')
 
-
-bl = reader.read(cascade=True, lazy=False)[0]
+bl = reader.read(lazy=False)[0]
 for seg in bl.segments:
-    print ("SEG: "+str(seg.file_origin))
+    print("SEG: " + str(seg.file_origin))
     fig = pyplot.figure()
     ax1 = fig.add_subplot(2, 1, 1)
     ax2 = fig.add_subplot(2, 1, 2)
     ax1.set_title(seg.file_origin)
+    ax1.set_ylabel('arbitrary units')
     mint = 0 * pq.s
     maxt = np.inf * pq.s
     for i, asig in enumerate(seg.analogsignals):
-        times = asig.times.rescale('s').magnitude        
-        asig = asig.rescale('mV').magnitude
+        times = asig.times.rescale('s').magnitude
+        asig = asig.magnitude
         ax1.plot(times, asig)
 
     trains = [st.rescale('s').magnitude for st in seg.spiketrains]

+ 2 - 0
code/python-neo/neo/__init__.py

@@ -9,5 +9,7 @@ import logging
 logging_handler = logging.StreamHandler()
 
 from neo.core import *
+# ~ import neo.rawio
 from neo.io import *
+
 from neo.version import version as __version__

+ 135 - 222
code/python-neo/neo/core/analogsignal.py

@@ -2,8 +2,10 @@
 '''
 This module implements :class:`AnalogSignal`, an array of analog signals.
 
-:class:`AnalogSignal` inherits from :class:`basesignal.BaseSignal` and 
-:class:`quantities.Quantity`, which inherits from :class:`numpy.array`.
+:class:`AnalogSignal` inherits from :class:`basesignal.BaseSignal` which
+derives from :class:`BaseNeo`, and from :class:`quantites.Quantity`which
+in turn inherits from :class:`numpy.array`.
+
 Inheritance from :class:`numpy.array` is explained here:
 http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
 
@@ -25,12 +27,13 @@ import numpy as np
 import quantities as pq
 
 from neo.core.baseneo import BaseNeo, MergeError, merge_annotations
+from neo.core.dataobject import DataObject
 from neo.core.channelindex import ChannelIndex
 from copy import copy, deepcopy
 
-logger = logging.getLogger("Neo")
+from neo.core.basesignal import BaseSignal
 
-from neo.core import basesignal
+logger = logging.getLogger("Neo")
 
 
 def _get_sampling_rate(sampling_rate, sampling_period):
@@ -40,8 +43,7 @@ def _get_sampling_rate(sampling_rate, sampling_period):
     '''
     if sampling_period is None:
         if sampling_rate is None:
-            raise ValueError("You must provide either the sampling rate or " +
-                             "sampling period")
+            raise ValueError("You must provide either the sampling rate or " + "sampling period")
     elif sampling_rate is None:
         sampling_rate = 1.0 / sampling_period
     elif sampling_period != 1.0 / sampling_rate:
@@ -51,11 +53,10 @@ def _get_sampling_rate(sampling_rate, sampling_period):
     return sampling_rate
 
 
-def _new_AnalogSignalArray(cls, signal, units=None, dtype=None, copy=True,
-                          t_start=0*pq.s, sampling_rate=None,
-                          sampling_period=None, name=None, file_origin=None,
-                          description=None, annotations=None,
-                          channel_index=None, segment=None):
+def _new_AnalogSignalArray(cls, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
+                           sampling_rate=None, sampling_period=None, name=None, file_origin=None,
+                           description=None, array_annotations=None, annotations=None,
+                           channel_index=None, segment=None):
     '''
     A function to map AnalogSignal.__new__ to function that
         does not do the unit checking. This is needed for pickle to work.
@@ -64,13 +65,13 @@ def _new_AnalogSignalArray(cls, signal, units=None, dtype=None, copy=True,
               t_start=t_start, sampling_rate=sampling_rate,
               sampling_period=sampling_period, name=name,
               file_origin=file_origin, description=description,
-              **annotations)
+              array_annotations=array_annotations, **annotations)
     obj.channel_index = channel_index
     obj.segment = segment
     return obj
 
 
-class AnalogSignal(basesignal.BaseSignal):
+class AnalogSignal(BaseSignal):
     '''
     Array of one or more continuous analog signals.
 
@@ -119,6 +120,8 @@ class AnalogSignal(basesignal.BaseSignal):
     *Optional attributes/properties*:
         :dtype: (numpy dtype or str) Override the dtype of the signal array.
         :copy: (bool) True by default.
+        :array_annotations: (dict) Dict mapping strings to numpy arrays containing annotations \
+                                   for all data points
 
     Note: Any other additional arguments are assumed to be user-specific
     metadata and stored in :attr:`annotations`.
@@ -146,6 +149,9 @@ class AnalogSignal(basesignal.BaseSignal):
         Otherwise an :class:`AnalogSignal` (actually a view) is
         returned, with the same metadata, except that :attr:`t_start`
         is changed if the start index along dimension 1 is greater than 1.
+        Note that slicing an :class:`AnalogSignal` may give a different
+        result to slicing the underlying NumPy array since signals
+        are always two-dimensional.
 
     *Operations available on this object*:
         == != + * /
@@ -159,10 +165,9 @@ class AnalogSignal(basesignal.BaseSignal):
                         ('t_start', pq.Quantity, 0))
     _recommended_attrs = BaseNeo._recommended_attrs
 
-    def __new__(cls, signal, units=None, dtype=None, copy=True,
-                t_start=0 * pq.s, sampling_rate=None, sampling_period=None,
-                name=None, file_origin=None, description=None,
-                **annotations):
+    def __new__(cls, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
+                sampling_rate=None, sampling_period=None, name=None, file_origin=None,
+                description=None, array_annotations=None, **annotations):
         '''
         Constructs new :class:`AnalogSignal` from data.
 
@@ -171,14 +176,7 @@ class AnalogSignal(basesignal.BaseSignal):
 
         __array_finalize__ is called on the new object.
         '''
-        if units is None:
-            if not hasattr(signal, "units"):
-                raise ValueError("Units must be specified")
-        elif isinstance(signal, pq.Quantity):
-            # could improve this test, what if units is a string?
-            if units != signal.units:
-                signal = signal.rescale(units)
-
+        signal = cls._rescale(signal, units=units)
         obj = pq.Quantity(signal, units=units, dtype=dtype, copy=copy).view(cls)
 
         if obj.ndim == 1:
@@ -194,10 +192,9 @@ class AnalogSignal(basesignal.BaseSignal):
         obj.channel_index = None
         return obj
 
-    def __init__(self, signal, units=None, dtype=None, copy=True,
-                 t_start=0 * pq.s, sampling_rate=None, sampling_period=None,
-                 name=None, file_origin=None, description=None,
-                 **annotations):
+    def __init__(self, signal, units=None, dtype=None, copy=True, t_start=0 * pq.s,
+                 sampling_rate=None, sampling_period=None, name=None, file_origin=None,
+                 description=None, array_annotations=None, **annotations):
         '''
         Initializes a newly constructed :class:`AnalogSignal` instance.
         '''
@@ -208,78 +205,56 @@ class AnalogSignal(basesignal.BaseSignal):
 
         # Calls parent __init__, which grabs universally recommended
         # attributes and sets up self.annotations
-        BaseNeo.__init__(self, name=name, file_origin=file_origin,
-                         description=description, **annotations)
+        DataObject.__init__(self, name=name, file_origin=file_origin, description=description,
+                            array_annotations=array_annotations, **annotations)
 
     def __reduce__(self):
         '''
         Map the __new__ function onto _new_AnalogSignalArray, so that pickle
         works
         '''
-        return _new_AnalogSignalArray, (self.__class__,
-                                        np.array(self),
-                                        self.units,
-                                        self.dtype,
-                                        True,
-                                        self.t_start,
-                                        self.sampling_rate,
-                                        self.sampling_period,
-                                        self.name,
-                                        self.file_origin,
-                                        self.description,
-                                        self.annotations,
-                                        self.channel_index,
-                                        self.segment)
-    def __deepcopy__(self, memo):
-        cls = self.__class__
-        new_AS = cls(np.array(self), units=self.units, dtype=self.dtype,
-               t_start=self.t_start, sampling_rate=self.sampling_rate,
-               sampling_period=self.sampling_period, name=self.name,
-               file_origin=self.file_origin, description=self.description)
-        new_AS.__dict__.update(self.__dict__)
-        memo[id(self)] = new_AS
-        for k, v in self.__dict__.items():
-            try:
-                setattr(new_AS, k, deepcopy(v, memo))
-            except:
-                setattr(new_AS, k, v)
-        return new_AS
+        return _new_AnalogSignalArray, (self.__class__, np.array(self), self.units, self.dtype,
+                                        True, self.t_start, self.sampling_rate,
+                                        self.sampling_period, self.name, self.file_origin,
+                                        self.description, self.array_annotations,
+                                        self.annotations, self.channel_index, self.segment)
 
-    def __array_finalize__(self, obj):
+    def _array_finalize_spec(self, obj):
         '''
-        This is called every time a new :class:`AnalogSignal` is created.
-
-        It is the appropriate place to set default values for attributes
-        for :class:`AnalogSignal` constructed by slicing or viewing.
+        Set default values for attributes specific to :class:`AnalogSignal`.
 
-        User-specified values are only relevant for construction from
-        constructor, and these are set in __new__. Then they are just
-        copied over here.
+        Common attributes are defined in
+        :meth:`__array_finalize__` in :class:`basesignal.BaseSignal`),
+        which is called every time a new signal is created
+        and calls this method.
         '''
-        super(AnalogSignal, self).__array_finalize__(obj)
         self._t_start = getattr(obj, '_t_start', 0 * pq.s)
         self._sampling_rate = getattr(obj, '_sampling_rate', None)
-       
-        # The additional arguments
-        self.annotations = getattr(obj, 'annotations', {})
-
-        # Globally recommended attributes
-        self.name = getattr(obj, 'name', None)
-        self.file_origin = getattr(obj, 'file_origin', None)
-        self.description = getattr(obj, 'description', None)
+        return obj
 
-        # Parent objects
-        self.segment = getattr(obj, 'segment', None)
-        self.channel_index = getattr(obj, 'channel_index', None)
+    def __deepcopy__(self, memo):
+        cls = self.__class__
+        new_signal = cls(np.array(self), units=self.units, dtype=self.dtype, t_start=self.t_start,
+                         sampling_rate=self.sampling_rate, sampling_period=self.sampling_period,
+                         name=self.name, file_origin=self.file_origin,
+                         description=self.description)
+        new_signal.__dict__.update(self.__dict__)
+        memo[id(self)] = new_signal
+        for k, v in self.__dict__.items():
+            try:
+                setattr(new_signal, k, deepcopy(v, memo))
+            except TypeError:
+                setattr(new_signal, k, v)
+        return new_signal
 
     def __repr__(self):
         '''
         Returns a string representing the :class:`AnalogSignal`.
         '''
-        return ('<%s(%s, [%s, %s], sampling rate: %s)>' %
-                (self.__class__.__name__,
-                 super(AnalogSignal, self).__repr__(), self.t_start,
-                 self.t_stop, self.sampling_rate))
+        return ('<%s(%s, [%s, %s], sampling rate: %s)>' % (self.__class__.__name__,
+                                                           super(AnalogSignal, self).__repr__(),
+                                                           self.t_start, self.t_stop,
+                                                           self.sampling_rate))
 
     def get_channel_index(self):
         """
@@ -289,46 +264,54 @@ class AnalogSignal(basesignal.BaseSignal):
         else:
             return None
 
-    def __getslice__(self, i, j):
-        '''
-        Get a slice from :attr:`i` to :attr:`j`.
-
-        Doesn't get called in Python 3, :meth:`__getitem__` is called instead
-        '''
-        return self.__getitem__(slice(i, j))
-
     def __getitem__(self, i):
         '''
         Get the item or slice :attr:`i`.
         '''
-        obj = super(AnalogSignal, self).__getitem__(i)
         if isinstance(i, (int, np.integer)):  # a single point in time across all channels
+            obj = super(AnalogSignal, self).__getitem__(i)
             obj = pq.Quantity(obj.magnitude, units=obj.units)
         elif isinstance(i, tuple):
+            obj = super(AnalogSignal, self).__getitem__(i)
             j, k = i
             if isinstance(j, (int, np.integer)):  # extract a quantity array
                 obj = pq.Quantity(obj.magnitude, units=obj.units)
             else:
                 if isinstance(j, slice):
                     if j.start:
-                        obj.t_start = (self.t_start +
-                                       j.start * self.sampling_period)
+                        obj.t_start = (self.t_start + j.start * self.sampling_period)
                     if j.step:
                         obj.sampling_period *= j.step
                 elif isinstance(j, np.ndarray):
-                    raise NotImplementedError("Arrays not yet supported")
-                    # in the general case, would need to return IrregularlySampledSignal(Array)
+                    raise NotImplementedError(
+                        "Arrays not yet supported")  # in the general case, would need to return
+                    #  IrregularlySampledSignal(Array)
                 else:
                     raise TypeError("%s not supported" % type(j))
                 if isinstance(k, (int, np.integer)):
                     obj = obj.reshape(-1, 1)
                 if self.channel_index:
                     obj.channel_index = self.channel_index.__getitem__(k)
+                obj.array_annotate(**deepcopy(self.array_annotations_at_index(k)))
         elif isinstance(i, slice):
+            obj = super(AnalogSignal, self).__getitem__(i)
             if i.start:
                 obj.t_start = self.t_start + i.start * self.sampling_period
+            obj.array_annotations = deepcopy(self.array_annotations)
+        elif isinstance(i, np.ndarray):
+            # Indexing of an AnalogSignal is only consistent if the resulting number of
+            # samples is the same for each trace. The time axis for these samples is not
+            # guaranteed to be continuous, so returning a Quantity instead of an AnalogSignal here.
+            new_time_dims = np.sum(i, axis=0)
+            if len(new_time_dims) and all(new_time_dims == new_time_dims[0]):
+                obj = np.asarray(self).T.__getitem__(i.T)
+                obj = obj.T.reshape(self.shape[1], -1).T
+                obj = pq.Quantity(obj, units=self.units)
+            else:
+                raise IndexError("indexing of an AnalogSignals needs to keep the same number of "
+                                 "sample for each trace contained")
         else:
-            raise IndexError("index should be an integer, tuple or slice")
+            raise IndexError("index should be an integer, tuple, slice or boolean numpy array")
         return obj
 
     def __setitem__(self, i, value):
@@ -432,52 +415,12 @@ class AnalogSignal(basesignal.BaseSignal):
         '''
         return self.t_start + np.arange(self.shape[0]) / self.sampling_rate
 
-    def rescale(self, units):
-        '''
-        Return a copy of the AnalogSignal converted to the specified
-        units
-        '''
-        to_dims = pq.quantity.validate_dimensionality(units)
-        if self.dimensionality == to_dims:
-            to_u = self.units
-            signal = np.array(self)
-        else:
-            to_u = pq.Quantity(1.0, to_dims)
-            from_u = pq.Quantity(1.0, self.dimensionality)
-            try:
-                cf = pq.quantity.get_conversion_factor(from_u, to_u)
-            except AssertionError:
-                raise ValueError('Unable to convert between units of "%s" \
-                                 and "%s"' % (from_u._dimensionality,
-                                              to_u._dimensionality))
-            signal = cf * self.magnitude
-        new = self.__class__(signal=signal, units=to_u,
-                             sampling_rate=self.sampling_rate)
-        new._copy_data_complement(self)
-        new.channel_index = self.channel_index
-        new.segment = self.segment
-        new.annotations.update(self.annotations)
-
-        return new
-
-    def duplicate_with_new_array(self, signal):
-        '''
-        Create a new :class:`AnalogSignal` with the same metadata
-        but different data
-        '''
-        #signal is the new signal
-        new = self.__class__(signal=signal, units=self.units,
-                             sampling_rate=self.sampling_rate)
-        new._copy_data_complement(self)
-        new.annotations.update(self.annotations)
-        return new
-
     def __eq__(self, other):
         '''
         Equality test (==)
         '''
-        if (self.t_start != other.t_start or
-                self.sampling_rate != other.sampling_rate):
+        if (isinstance(other, AnalogSignal) and (
+                self.t_start != other.t_start or self.sampling_rate != other.sampling_rate)):
             return False
         return super(AnalogSignal, self).__eq__(other)
 
@@ -489,34 +432,19 @@ class AnalogSignal(basesignal.BaseSignal):
         if isinstance(other, AnalogSignal):
             for attr in "t_start", "sampling_rate":
                 if getattr(self, attr) != getattr(other, attr):
-                    raise ValueError("Inconsistent values of %s" % attr)
-            # how to handle name and annotations?
-
-    def _copy_data_complement(self, other):
-        '''
-        Copy the metadata from another :class:`AnalogSignal`.
-        '''
-        for attr in ("t_start", "sampling_rate", "name", "file_origin",
-                     "description", "annotations"):
-            setattr(self, attr, getattr(other, attr, None))
-
-    def __rsub__(self, other, *args):
-        '''
-        Backwards subtraction (other-self)
-        '''
-        return self.__mul__(-1, *args) + other
+                    raise ValueError(
+                        "Inconsistent values of %s" % attr)  # how to handle name and annotations?
 
     def _repr_pretty_(self, pp, cycle):
         '''
         Handle pretty-printing the :class:`AnalogSignal`.
         '''
         pp.text("{cls} with {channels} channels of length {length}; "
-                "units {units}; datatype {dtype} ".format(
-                    cls=self.__class__.__name__,
-                    channels=self.shape[1],
-                    length=self.shape[0],
-                    units=self.units.dimensionality.string,
-                    dtype=self.dtype))
+                "units {units}; datatype {dtype} ".format(cls=self.__class__.__name__,
+                                                          channels=self.shape[1],
+                                                          length=self.shape[0],
+                                                          units=self.units.dimensionality.string,
+                                                          dtype=self.dtype))
         if self._has_repr_pretty_attrs_():
             pp.breakable()
             self._repr_pretty_attrs_(pp, cycle)
@@ -525,11 +453,17 @@ class AnalogSignal(basesignal.BaseSignal):
             pp.breakable()
             with pp.group(indent=1):
                 pp.text(line)
+
         for line in ["sampling rate: {0}".format(self.sampling_rate),
-                     "time: {0} to {1}".format(self.t_start, self.t_stop)
-                     ]:
+                     "time: {0} to {1}".format(self.t_start, self.t_stop)]:
             _pp(line)
 
+    def time_index(self, t):
+        """Return the array index corresponding to the time `t`"""
+        t = t.rescale(self.sampling_period.units)
+        i = (t - self.t_start) / self.sampling_period
+        i = int(np.rint(i.magnitude))
+        return i
 
     def time_slice(self, t_start, t_stop):
         '''
@@ -544,17 +478,13 @@ class AnalogSignal(basesignal.BaseSignal):
         if t_start is None:
             i = 0
         else:
-            t_start = t_start.rescale(self.sampling_period.units)
-            i = (t_start - self.t_start) / self.sampling_period
-            i = int(np.rint(i.magnitude))
+            i = self.time_index(t_start)
 
         # checking stop time and transforming to stop index
         if t_stop is None:
             j = len(self)
         else:
-            t_stop = t_stop.rescale(self.sampling_period.units)
-            j = (t_stop - self.t_start) / self.sampling_period
-            j = int(np.rint(j.magnitude))
+            j = self.time_index(t_stop)
 
         if (i < 0) or (j > len(self)):
             raise ValueError('t_start, t_stop have to be withing the analog \
@@ -563,63 +493,46 @@ class AnalogSignal(basesignal.BaseSignal):
         # we're going to send the list of indicies so that we get *copy* of the
         # sliced data
         obj = super(AnalogSignal, self).__getitem__(np.arange(i, j, 1))
+
+        # If there is any data remaining, there will be data for every channel
+        # In this case, array_annotations need to stay available
+        # super.__getitem__ cannot do this, so it needs to be done here
+        if len(obj) > 0:
+            obj.array_annotations = self.array_annotations
+
         obj.t_start = self.t_start + i * self.sampling_period
 
         return obj
 
-    def merge(self, other):
-        '''
-        Merge another :class:`AnalogSignal` into this one.
+    def splice(self, signal, copy=False):
+        """
+        Replace part of the current signal by a new piece of signal.
 
-        The :class:`AnalogSignal` objects are concatenated horizontally
-        (column-wise, :func:`np.hstack`).
+        The new piece of signal will overwrite part of the current signal
+        starting at the time given by the new piece's `t_start` attribute.
 
-        If the attributes of the two :class:`AnalogSignal` are not
-        compatible, an Exception is raised.
-        '''
-        if self.sampling_rate != other.sampling_rate:
-            raise MergeError("Cannot merge, different sampling rates")
-        if self.t_start != other.t_start:
-            raise MergeError("Cannot merge, different t_start")
-        if self.segment != other.segment:
-            raise MergeError("Cannot merge these two signals as they belong to different segments.")
-        if hasattr(self, "lazy_shape"):
-            if hasattr(other, "lazy_shape"):
-                if self.lazy_shape[0] != other.lazy_shape[0]:
-                    raise MergeError("Cannot merge signals of different length.")
-                merged_lazy_shape = (self.lazy_shape[0], self.lazy_shape[1] + other.lazy_shape[1])
-            else:
-                raise MergeError("Cannot merge a lazy object with a real object.")
-        if other.units != self.units:
-            other = other.rescale(self.units)
-        stack = np.hstack(map(np.array, (self, other)))
-        kwargs = {}
-        for name in ("name", "description", "file_origin"):
-            attr_self = getattr(self, name)
-            attr_other = getattr(other, name)
-            if attr_self == attr_other:
-                kwargs[name] = attr_self
-            else:
-                kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other)
-        merged_annotations = merge_annotations(self.annotations,
-                                               other.annotations)
-        kwargs.update(merged_annotations)
-        signal = AnalogSignal(stack, units=self.units, dtype=self.dtype,
-                              copy=False, t_start=self.t_start,
-                              sampling_rate=self.sampling_rate,
-                              **kwargs)
-        signal.segment = self.segment
-        # merge channel_index (move to ChannelIndex.merge()?)
-        if self.channel_index and other.channel_index:
-            signal.channel_index = ChannelIndex(
-                    index=np.arange(signal.shape[1]),
-                    channel_ids=np.hstack([self.channel_index.channel_ids,
-                                           other.channel_index.channel_ids]),
-                    channel_names=np.hstack([self.channel_index.channel_names,
-                                             other.channel_index.channel_names]))
-        else:
-            signal.channel_index = ChannelIndex(index=np.arange(signal.shape[1]))
+        The signal to be spliced in must have the same physical dimensions,
+        sampling rate, and number of channels as the current signal and
+        fit within it.
 
-        if hasattr(self, "lazy_shape"):
-            signal.lazy_shape = merged_lazy_shape
-        return signal
+        If `copy` is False (the default), modify the current signal in place.
+        If `copy` is True, return a new signal and leave the current one untouched.
+        In this case, the new signal will not be linked to any parent objects.
+        """
+        if signal.t_start < self.t_start:
+            raise ValueError("Cannot splice earlier than the start of the signal")
+        if signal.t_stop > self.t_stop:
+            raise ValueError("Splice extends beyond signal")
+        if signal.sampling_rate != self.sampling_rate:
+            raise ValueError("Sampling rates do not match")
+        i = self.time_index(signal.t_start)
+        j = i + signal.shape[0]
+        if copy:
+            new_signal = deepcopy(self)
+            new_signal.segment = None
+            new_signal.channel_index = None
+            new_signal[i:j, :] = signal
+            return new_signal
+        else:
+            self[i:j, :] = signal
+            return self

+ 2 - 2
code/python-neo/neo/core/baseneo.py

@@ -108,8 +108,8 @@ def merge_annotations(A, B):
             try:
                 merged[name] = merge_annotation(A[name], B[name])
             except BaseException as exc:
-                #exc.args += ('key %s' % name,)
-                #raise
+                # exc.args += ('key %s' % name,)
+                # raise
                 merged[name] = "MERGE CONFLICT"  # temporary hack
         else:
             merged[name] = A[name]

+ 214 - 24
code/python-neo/neo/core/basesignal.py

@@ -1,36 +1,121 @@
 # -*- coding: utf-8 -*-
 '''
 This module implements :class:`BaseSignal`, an array of signals.
+This is a parent class from which all signal objects inherit:
+    :class:`AnalogSignal` and :class:`IrregularlySampledSignal`
 
-:class:`BaseSignal` inherits from :class:`quantites.Quantity`, which
+:class:`BaseSignal` inherits from :class:`quantities.Quantity`, which
 inherits from :class:`numpy.array`.
 Inheritance from :class:`numpy.array` is explained here:
 http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
 
 In brief:
-* Initialization of a new object from constructor happens in :meth:`__new__`.
-This is where user-specified attributes are set.
-
-* :meth:`__array_finalize__` is called for all new objects, including those
-created by slicing. This is where attributes are copied over from
-the old object.
+* Constructor :meth:`__new__` for :class:`BaseSignal` doesn't exist.
+Only child objects :class:`AnalogSignal` and :class:`IrregularlySampledSignal`
+can be created.
 '''
 
-# needed for python 3 compatibility
+# needed for Python 3 compatibility
 from __future__ import absolute_import, division, print_function
 
+import copy
 import logging
 
 import numpy as np
 import quantities as pq
 
 from neo.core.baseneo import BaseNeo, MergeError, merge_annotations
+from neo.core.dataobject import DataObject, ArrayDict
 from neo.core.channelindex import ChannelIndex
 
 logger = logging.getLogger("Neo")
 
-class BaseSignal(BaseNeo, pq.Quantity):    
-    
+
+class BaseSignal(DataObject):
+    '''
+    This is the base class from which all signal objects inherit:
+    :class:`AnalogSignal` and :class:`IrregularlySampledSignal`.
+
+    This class contains all common methods of both child classes.
+    It uses the following child class attributes:
+
+        :_necessary_attrs: a list of the attributes that the class must have.
+
+        :_recommended_attrs: a list of the attributes that the class may
+        optionally have.
+    '''
+
+    def _array_finalize_spec(self, obj):
+        '''
+        Called by :meth:`__array_finalize__`, used to customize behaviour of sub-classes.
+        '''
+        return obj
+
+    def __array_finalize__(self, obj):
+        '''
+        This is called every time a new signal is created.
+
+        It is the appropriate place to set default values for attributes
+        for a signal constructed by slicing or viewing.
+
+        User-specified values are only relevant for construction from
+        constructor, and these are set in __new__ in the child object.
+        Then they are just copied over here. Default values for the
+        specific attributes for subclasses (:class:`AnalogSignal`
+        and :class:`IrregularlySampledSignal`) are set in
+        :meth:`_array_finalize_spec`
+        '''
+        super(BaseSignal, self).__array_finalize__(obj)
+        self._array_finalize_spec(obj)
+
+        # The additional arguments
+        self.annotations = getattr(obj, 'annotations', {})
+        # Add empty array annotations, because they cannot always be copied,
+        # but do not overwrite existing ones from slicing etc.
+        # This ensures the attribute exists
+        if not hasattr(self, 'array_annotations'):
+            self.array_annotations = ArrayDict(self._get_arr_ann_length())
+
+        # Globally recommended attributes
+        self.name = getattr(obj, 'name', None)
+        self.file_origin = getattr(obj, 'file_origin', None)
+        self.description = getattr(obj, 'description', None)
+
+        # Parent objects
+        self.segment = getattr(obj, 'segment', None)
+        self.channel_index = getattr(obj, 'channel_index', None)
+
+    @classmethod
+    def _rescale(self, signal, units=None):
+        '''
+        Check that units are present, and rescale the signal if necessary.
+        This is called whenever a new signal is
+        created from the constructor. See :meth:`__new__' in
+        :class:`AnalogSignal` and :class:`IrregularlySampledSignal`
+        '''
+        if units is None:
+            if not hasattr(signal, "units"):
+                raise ValueError("Units must be specified")
+        elif isinstance(signal, pq.Quantity):
+            # This test always returns True, i.e. rescaling is always executed if one of the units
+            # is a pq.CompoundUnit. This is fine because rescaling is correct anyway.
+            if pq.quantity.validate_dimensionality(units) != signal.dimensionality:
+                signal = signal.rescale(units)
+        return signal
+
+    def rescale(self, units):
+        obj = super(BaseSignal, self).rescale(units)
+        obj.channel_index = self.channel_index
+        return obj
+
+    def __getslice__(self, i, j):
+        '''
+        Get a slice from :attr:`i` to :attr:`j`.attr[0]
+
+        Doesn't get called in Python 3, :meth:`__getitem__` is called instead
+        '''
+        return self.__getitem__(slice(i, j))
+
     def __ne__(self, other):
         '''
         Non-equality test (!=)
@@ -39,15 +124,74 @@ class BaseSignal(BaseNeo, pq.Quantity):
 
     def _apply_operator(self, other, op, *args):
         '''
-        Handle copying metadata to the new :class:`BaseSignal`
+        Handle copying metadata to the new signal
         after a mathematical operation.
         '''
         self._check_consistency(other)
         f = getattr(super(BaseSignal, self), op)
         new_signal = f(other, *args)
         new_signal._copy_data_complement(self)
+        # _copy_data_complement can't always copy array annotations,
+        # so this needs to be done locally
+        new_signal.array_annotations = copy.deepcopy(self.array_annotations)
         return new_signal
 
+    def _get_required_attributes(self, signal, units):
+        '''
+        Return a list of the required attributes for a signal as a dictionary
+        '''
+        required_attributes = {}
+        for attr in self._necessary_attrs:
+            if 'signal' == attr[0]:
+                required_attributes[str(attr[0])] = signal
+            else:
+                required_attributes[str(attr[0])] = getattr(self, attr[0], None)
+        required_attributes['units'] = units
+        return required_attributes
+
+    def duplicate_with_new_data(self, signal, units=None):
+        '''
+        Create a new signal with the same metadata but different data.
+        Required attributes of the signal are used.
+        Note: Array annotations can not be copied here because length of data can change
+        '''
+        if units is None:
+            units = self.units
+        # else:
+        #     units = pq.quantity.validate_dimensionality(units)
+
+        # signal is the new signal
+        required_attributes = self._get_required_attributes(signal, units)
+        new = self.__class__(**required_attributes)
+        new._copy_data_complement(self)
+        new.annotations.update(self.annotations)
+        # Note: Array annotations are not copied here, because it is not ensured
+        # that the same number of signals is used and they would possibly make no sense
+        # when combined with another signal
+        return new
+
+    def _copy_data_complement(self, other):
+        '''
+        Copy the metadata from another signal.
+        Required and recommended attributes of the signal are used.
+        Note: Array annotations can not be copied here because length of data can change
+        '''
+        all_attr = {self._recommended_attrs, self._necessary_attrs}
+        for sub_at in all_attr:
+            for attr in sub_at:
+                if attr[0] != 'signal':
+                    setattr(self, attr[0], getattr(other, attr[0], None))
+        setattr(self, 'annotations', getattr(other, 'annotations', None))
+
+        # Note: Array annotations cannot be copied because length of data can be changed  # here
+        #  which would cause inconsistencies
+
+    def __rsub__(self, other, *args):
+        '''
+        Backwards subtraction (other-self)
+        '''
+        return self.__mul__(-1, *args) + other
+
     def __add__(self, other, *args):
         '''
         Addition (+)
@@ -81,19 +225,65 @@ class BaseSignal(BaseNeo, pq.Quantity):
     __radd__ = __add__
     __rmul__ = __sub__
 
-    def as_array(self, units=None):
-        """
-        Return the signal as a plain NumPy array.
+    def merge(self, other):
+        '''
+        Merge another signal into this one.
+
+        The signal objects are concatenated horizontally
+        (column-wise, :func:`np.hstack`).
+
+        If the attributes of the two signal are not
+        compatible, an Exception is raised.
+
+        Required attributes of the signal are used.
+        '''
+
+        for attr in self._necessary_attrs:
+            if 'signal' != attr[0]:
+                if getattr(self, attr[0], None) != getattr(other, attr[0], None):
+                    raise MergeError("Cannot merge these two signals as the %s differ." % attr[0])
+
+        if self.segment != other.segment:
+            raise MergeError(
+                "Cannot merge these two signals as they belong to different segments.")
+        if hasattr(self, "lazy_shape"):
+            if hasattr(other, "lazy_shape"):
+                if self.lazy_shape[0] != other.lazy_shape[0]:
+                    raise MergeError("Cannot merge signals of different length.")
+                merged_lazy_shape = (self.lazy_shape[0], self.lazy_shape[1] + other.lazy_shape[1])
+            else:
+                raise MergeError("Cannot merge a lazy object with a real object.")
+        if other.units != self.units:
+            other = other.rescale(self.units)
+        stack = np.hstack(map(np.array, (self, other)))
+        kwargs = {}
+        for name in ("name", "description", "file_origin"):
+            attr_self = getattr(self, name)
+            attr_other = getattr(other, name)
+            if attr_self == attr_other:
+                kwargs[name] = attr_self
+            else:
+                kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other)
+        merged_annotations = merge_annotations(self.annotations, other.annotations)
+        kwargs.update(merged_annotations)
+
+        kwargs['array_annotations'] = self._merge_array_annotations(other)
+
+        signal = self.__class__(stack, units=self.units, dtype=self.dtype, copy=False,
+                                t_start=self.t_start, sampling_rate=self.sampling_rate, **kwargs)
+        signal.segment = self.segment
+
+        if hasattr(self, "lazy_shape"):
+            signal.lazy_shape = merged_lazy_shape
 
-        If `units` is specified, first rescale to those units.
-        """
-        if units:
-            return self.rescale(units).magnitude
+        # merge channel_index (move to ChannelIndex.merge()?)
+        if self.channel_index and other.channel_index:
+            signal.channel_index = ChannelIndex(index=np.arange(signal.shape[1]),
+                channel_ids=np.hstack(
+                    [self.channel_index.channel_ids, other.channel_index.channel_ids]),
+                channel_names=np.hstack(
+                    [self.channel_index.channel_names, other.channel_index.channel_names]))
         else:
-            return self.magnitude
+            signal.channel_index = ChannelIndex(index=np.arange(signal.shape[1]))
 
-    def as_quantity(self):
-        """
-        Return the signal as a quantities array.
-        """
-        return self.view(pq.Quantity)
+        return signal

+ 27 - 19
code/python-neo/neo/core/container.py

@@ -31,6 +31,9 @@ def filterdata(data, targdict=None, objects=None, **kwargs):
     be a list of dictionaries, in which case the filters are applied
     sequentially.  If targdict and kwargs are both supplied, the
     targdict filters are applied first, followed by the kwarg filters.
+    A targdict of None or {} and objects = None corresponds to no filters
+    applied, therefore returning all child objects.
+    Default targdict and objects is None.
 
 
     objects (optional) should be the name of a Neo object type,
@@ -56,27 +59,27 @@ def filterdata(data, targdict=None, objects=None, **kwargs):
         targdict += [kwargs]
 
     if not targdict:
-        return []
+        results = data
 
     # if multiple dicts are provided, apply each filter sequentially
-    if not hasattr(targdict, 'keys'):
+    elif not hasattr(targdict, 'keys'):
         # for performance reasons, only do the object filtering on the first
         # iteration
         results = filterdata(data, targdict=targdict[0], objects=objects)
         for targ in targdict[1:]:
             results = filterdata(results, targdict=targ)
         return results
-
-    # do the actual filtering
-    results = []
-    for key, value in sorted(targdict.items()):
-        for obj in data:
-            if (hasattr(obj, key) and getattr(obj, key) == value and
-                    all([obj is not res for res in results])):
-                results.append(obj)
-            elif (key in obj.annotations and obj.annotations[key] == value and
-                    all([obj is not res for res in results])):
-                results.append(obj)
+    else:
+        # do the actual filtering
+        results = []
+        for key, value in sorted(targdict.items()):
+            for obj in data:
+                if (hasattr(obj, key) and getattr(obj, key) == value and
+                        all([obj is not res for res in results])):
+                    results.append(obj)
+                elif (key in obj.annotations and obj.annotations[key] == value and
+                          all([obj is not res for res in results])):
+                    results.append(obj)
 
     # keep only objects of the correct classes
     if objects:
@@ -379,6 +382,8 @@ class Container(BaseNeo):
         be a list of dictionaries, in which case the filters are applied
         sequentially.  If targdict and kwargs are both supplied, the
         targdict filters are applied first, followed by the kwarg filters.
+        A targdict of None or {} corresponds to no filters applied, therefore
+        returning all child objects. Default targdict is None.
 
         If data is True (default), include data objects.
         If container is True (default False), include container objects.
@@ -387,14 +392,17 @@ class Container(BaseNeo):
 
         objects (optional) should be the name of a Neo object type,
         a neo object class, or a list of one or both of these.  If specified,
-        only these objects will be returned.  Note that if recursive is True,
-        containers not in objects will still be descended into.
-        This overrides data and container.
+        only these objects will be returned. If not specified any type of
+        object is  returned. Default is None.
+        Note that if recursive is True, containers not in objects will still
+        be descended into. This overrides data and container.
 
 
         Examples::
 
             >>> obj.filter(name="Vm")
+            >>> obj.filter(objects=neo.SpikeTrain)
+            >>> obj.filter(targdict={'myannotation':3})
         """
         # if objects are specified, get the classes
         if objects:
@@ -452,7 +460,7 @@ class Container(BaseNeo):
         parent_name = _reference_name(self.__class__.__name__)
         for child in self._single_children:
             if (hasattr(child, parent_name) and
-                    getattr(child, parent_name) is None or force):
+                        getattr(child, parent_name) is None or force):
                 setattr(child, parent_name, self)
         if recursive:
             for child in self.container_children:
@@ -474,7 +482,7 @@ class Container(BaseNeo):
                 continue
             if append:
                 target = getattr(child, parent_name)
-                if not self in target:
+                if self not in target:
                     target.append(self)
                 continue
             setattr(child, parent_name, [self])
@@ -520,7 +528,7 @@ class Container(BaseNeo):
         """
         # merge containers with the same name
         for container in (self._container_child_containers +
-                          self._multi_child_containers):
+                              self._multi_child_containers):
             lookup = dict((obj.name, obj) for obj in getattr(self, container))
             ids = [id(obj) for obj in getattr(self, container)]
             for obj in getattr(other, container):

+ 133 - 57
code/python-neo/neo/core/epoch.py

@@ -10,26 +10,32 @@ This module defines :class:`Epoch`, an array of epochs.
 from __future__ import absolute_import, division, print_function
 
 import sys
+from copy import deepcopy
 
 import numpy as np
 import quantities as pq
 
 from neo.core.baseneo import BaseNeo, merge_annotations
+from neo.core.dataobject import DataObject, ArrayDict
 
 PY_VER = sys.version_info[0]
 
-def _new_epoch(cls, times=None, durations=None, labels=None, units=None,
-                name=None, description=None, file_origin=None, annotations = None, segment=None):
+
+def _new_epoch(cls, times=None, durations=None, labels=None, units=None, name=None,
+               description=None, file_origin=None, array_annotations=None, annotations=None,
+               segment=None):
     '''
     A function to map epoch.__new__ to function that
-    does not do the unit checking. This is needed for pickle to work. 
+    does not do the unit checking. This is needed for pickle to work.
     '''
-    e = Epoch( times=times, durations=durations, labels=labels, units=units, name=name, file_origin=file_origin,
-                 description=description, **annotations)
+    e = Epoch(times=times, durations=durations, labels=labels, units=units, name=name,
+              file_origin=file_origin, description=description,
+              array_annotations=array_annotations, **annotations)
     e.segment = segment
     return e
 
-class Epoch(BaseNeo, pq.Quantity):
+
+class Epoch(DataObject):
     '''
     Array of epochs.
 
@@ -52,16 +58,20 @@ class Epoch(BaseNeo, pq.Quantity):
               dtype='|S4')
 
     *Required attributes/properties*:
-        :times: (quantity array 1D) The starts of the time periods.
-        :durations: (quantity array 1D) The length of the time period.
-        :labels: (numpy.array 1D dtype='S') Names or labels for the
-            time periods.
+        :times: (quantity array 1D) The start times of each time period.
+        :durations: (quantity array 1D or quantity scalar) The length(s) of each time period.
+           If a scalar, the same value is used for all time periods.
+        :labels: (numpy.array 1D dtype='S') Names or labels for the time periods.
 
     *Recommended attributes/properties*:
         :name: (str) A label for the dataset,
         :description: (str) Text description,
         :file_origin: (str) Filesystem path or URL of the original data file.
 
+    *Optional attributes/properties*:
+        :array_annotations: (dict) Dict mapping strings to numpy arrays containing annotations \
+                                   for all data points
+
     Note: Any other additional arguments are assumed to be user-specific
     metadata and stored in :attr:`annotations`,
 
@@ -69,18 +79,24 @@ class Epoch(BaseNeo, pq.Quantity):
 
     _single_parent_objects = ('Segment',)
     _quantity_attr = 'times'
-    _necessary_attrs = (('times', pq.Quantity, 1),
-                        ('durations', pq.Quantity, 1),
+    _necessary_attrs = (('times', pq.Quantity, 1), ('durations', pq.Quantity, 1),
                         ('labels', np.ndarray, 1, np.dtype('S')))
 
-    def __new__(cls, times=None, durations=None, labels=None, units=None,
-                name=None, description=None, file_origin=None, **annotations):
+    def __new__(cls, times=None, durations=None, labels=None, units=None, name=None,
+                description=None, file_origin=None, array_annotations=None, **annotations):
         if times is None:
             times = np.array([]) * pq.s
         if durations is None:
             durations = np.array([]) * pq.s
+        elif durations.size != times.size:
+            if durations.size == 1:
+                durations = durations * np.ones_like(times.magnitude)
+            else:
+                raise ValueError("Durations array has different length to times")
         if labels is None:
             labels = np.array([], dtype='S')
+        elif len(labels) != times.size:
+            raise ValueError("Labels array has different length to times")
         if units is None:
             # No keyword units, so get from `times`
             try:
@@ -96,42 +112,45 @@ class Epoch(BaseNeo, pq.Quantity):
         # check to make sure the units are time
         # this approach is much faster than comparing the
         # reference dimensionality
-        if (len(dim) != 1 or list(dim.values())[0] != 1 or
-                not isinstance(list(dim.keys())[0], pq.UnitTime)):
-            ValueError("Unit %s has dimensions %s, not [time]" %
-                       (units, dim.simplified))
+        if (len(dim) != 1 or list(dim.values())[0] != 1 or not isinstance(list(dim.keys())[0],
+                                                                          pq.UnitTime)):
+            ValueError("Unit %s has dimensions %s, not [time]" % (units, dim.simplified))
 
         obj = pq.Quantity.__new__(cls, times, units=dim)
-        obj.durations = durations
         obj.labels = labels
+        obj.durations = durations
         obj.segment = None
         return obj
 
-    def __init__(self, times=None, durations=None, labels=None, units=None,
-                 name=None, description=None, file_origin=None, **annotations):
+    def __init__(self, times=None, durations=None, labels=None, units=None, name=None,
+                 description=None, file_origin=None, array_annotations=None, **annotations):
         '''
         Initialize a new :class:`Epoch` instance.
         '''
-        BaseNeo.__init__(self, name=name, file_origin=file_origin,
-                         description=description, **annotations)
+        DataObject.__init__(self, name=name, file_origin=file_origin, description=description,
+                            array_annotations=array_annotations, **annotations)
+
     def __reduce__(self):
         '''
-        Map the __new__ function onto _new_BaseAnalogSignal, so that pickle
+        Map the __new__ function onto _new_epoch, so that pickle
         works
         '''
         return _new_epoch, (self.__class__, self.times, self.durations, self.labels, self.units,
-                            self.name, self.file_origin, self.description,
-                            self.annotations, self.segment)      
+                            self.name, self.file_origin, self.description, self.array_annotations,
+                            self.annotations, self.segment)
 
     def __array_finalize__(self, obj):
         super(Epoch, self).__array_finalize__(obj)
-        self.durations = getattr(obj, 'durations', None)
-        self.labels = getattr(obj, 'labels', None)
         self.annotations = getattr(obj, 'annotations', None)
         self.name = getattr(obj, 'name', None)
         self.file_origin = getattr(obj, 'file_origin', None)
         self.description = getattr(obj, 'description', None)
         self.segment = getattr(obj, 'segment', None)
+        # Add empty array annotations, because they cannot always be copied,
+        # but do not overwrite existing ones from slicing etc.
+        # This ensures the attribute exists
+        if not hasattr(self, 'array_annotations'):
+            self.array_annotations = ArrayDict(self._get_arr_ann_length())
 
     def __repr__(self):
         '''
@@ -143,10 +162,45 @@ class Epoch(BaseNeo, pq.Quantity):
         else:
             labels = self.labels
 
-        objs = ['%s@%s for %s' % (label, time, dur) for
-                label, time, dur in zip(labels, self.times, self.durations)]
+        objs = ['%s@%s for %s' % (label, time, dur) for label, time, dur in
+                zip(labels, self.times, self.durations)]
         return '<Epoch: %s>' % ', '.join(objs)
 
+    def _repr_pretty_(self, pp, cycle):
+        super(Epoch, self)._repr_pretty_(pp, cycle)
+
+    def rescale(self, units):
+        '''
+        Return a copy of the :class:`Epoch` converted to the specified
+        units
+        '''
+
+        obj = super(Epoch, self).rescale(units)
+        obj.segment = self.segment
+
+        return obj
+
+    def __getitem__(self, i):
+        '''
+        Get the item or slice :attr:`i`.
+        '''
+        obj = Epoch(times=super(Epoch, self).__getitem__(i))
+        obj._copy_data_complement(self)
+        try:
+            # Array annotations need to be sliced accordingly
+            obj.array_annotate(**deepcopy(self.array_annotations_at_index(i)))
+        except AttributeError:  # If Quantity was returned, not Epoch
+            pass
+        return obj
+
+    def __getslice__(self, i, j):
+        '''
+        Get a slice from :attr:`i` to :attr:`j`.attr[0]
+
+        Doesn't get called in Python 3, :meth:`__getitem__` is called instead
+        '''
+        return self.__getitem__(slice(i, j))
+
     @property
     def times(self):
         return pq.Quantity(self)
@@ -162,11 +216,7 @@ class Epoch(BaseNeo, pq.Quantity):
         compatible, and Exception is raised.
         '''
         othertimes = other.times.rescale(self.times.units)
-        otherdurations = other.durations.rescale(self.durations.units)
         times = np.hstack([self.times, othertimes]) * self.times.units
-        durations = np.hstack([self.durations,
-                               otherdurations]) * self.durations.units
-        labels = np.hstack([self.labels, other.labels])
         kwargs = {}
         for name in ("name", "description", "file_origin"):
             attr_self = getattr(self, name)
@@ -176,26 +226,55 @@ class Epoch(BaseNeo, pq.Quantity):
             else:
                 kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other)
 
-        merged_annotations = merge_annotations(self.annotations,
-                                               other.annotations)
+        merged_annotations = merge_annotations(self.annotations, other.annotations)
         kwargs.update(merged_annotations)
+
+        kwargs['array_annotations'] = self._merge_array_annotations(other)
+        labels = kwargs['array_annotations']['labels']
+        durations = kwargs['array_annotations']['durations']
+
         return Epoch(times=times, durations=durations, labels=labels, **kwargs)
 
     def _copy_data_complement(self, other):
         '''
         Copy the metadata from another :class:`Epoch`.
+        Note: Array annotations can not be copied here because length of data can change
         '''
-        for attr in ("labels", "durations", "name", "file_origin",
-                     "description", "annotations"):
+        # Note: Array annotations cannot be copied because length of data could be changed
+        # here which would cause inconsistencies. This is instead done locally.
+        for attr in ("name", "file_origin", "description", "annotations"):
             setattr(self, attr, getattr(other, attr, None))
 
-    def duplicate_with_new_data(self, signal):
+    def __deepcopy__(self, memo):
+        cls = self.__class__
+        new_ep = cls(times=self.times, durations=self.durations, labels=self.labels,
+                     units=self.units, name=self.name, description=self.description,
+                     file_origin=self.file_origin)
+        new_ep.__dict__.update(self.__dict__)
+        memo[id(self)] = new_ep
+        for k, v in self.__dict__.items():
+            try:
+                setattr(new_ep, k, deepcopy(v, memo))
+            except TypeError:
+                setattr(new_ep, k, v)
+        return new_ep
+
+    def duplicate_with_new_data(self, signal, units=None):
         '''
         Create a new :class:`Epoch` with the same metadata
         but different data (times, durations)
+
+        Note: Array annotations can not be copied here because length of data can change
         '''
-        new = self.__class__(times=signal)
+
+        if units is None:
+            units = self.units
+        else:
+            units = pq.quantity.validate_dimensionality(units)
+
+        new = self.__class__(times=signal, units=units)
         new._copy_data_complement(self)
+        # Note: Array annotations can not be copied here because length of data can change
         return new
 
     def time_slice(self, t_start, t_stop):
@@ -213,25 +292,22 @@ class Epoch(BaseNeo, pq.Quantity):
             _t_stop = np.inf
 
         indices = (self >= _t_start) & (self <= _t_stop)
-
         new_epc = self[indices]
-        new_epc.durations = self.durations[indices]
-        new_epc.labels = self.labels[indices]
+
         return new_epc
 
-    def as_array(self, units=None):
-        """
-        Return the epoch start times as a plain NumPy array.
+    def set_labels(self, labels):
+        self.array_annotate(labels=labels)
 
-        If `units` is specified, first rescale to those units.
-        """
-        if units:
-            return self.rescale(units).magnitude
-        else:
-            return self.magnitude
+    def get_labels(self):
+        return self.array_annotations['labels']
+
+    labels = property(get_labels, set_labels)
+
+    def set_durations(self, durations):
+        self.array_annotate(durations=durations)
+
+    def get_durations(self):
+        return self.array_annotations['durations']
 
-    def as_quantity(self):
-        """
-        Return the epoch start times as a quantities array.
-        """
-        return self.view(pq.Quantity)
+    durations = property(get_durations, set_durations)

+ 147 - 45
code/python-neo/neo/core/event.py

@@ -10,27 +10,31 @@ This module defines :class:`Event`, an array of events.
 from __future__ import absolute_import, division, print_function
 
 import sys
+from copy import deepcopy
 
 import numpy as np
 import quantities as pq
 
-from neo.core.baseneo import BaseNeo, merge_annotations
+from neo.core.baseneo import merge_annotations
+from neo.core.dataobject import DataObject, ArrayDict
+from neo.core.epoch import Epoch
 
 PY_VER = sys.version_info[0]
 
-def _new_event(cls, signal, times = None, labels=None, units=None, name=None, 
-               file_origin=None, description=None,
-               annotations=None, segment=None):
+
+def _new_event(cls, times=None, labels=None, units=None, name=None, file_origin=None,
+               description=None, array_annotations=None, annotations=None, segment=None):
     '''
     A function to map Event.__new__ to function that
-    does not do the unit checking. This is needed for pickle to work. 
+    does not do the unit checking. This is needed for pickle to work.
     '''
-    e = Event(signal=signal, times=times, labels=labels, units=units, name=name, file_origin=file_origin,
-                 description=description, **annotations)
+    e = Event(times=times, labels=labels, units=units, name=name, file_origin=file_origin,
+              description=description, array_annotations=array_annotations, **annotations)
     e.segment = segment
     return e
 
-class Event(BaseNeo, pq.Quantity):
+
+class Event(DataObject):
     '''
     Array of events.
 
@@ -59,6 +63,10 @@ class Event(BaseNeo, pq.Quantity):
         :description: (str) Text description.
         :file_origin: (str) Filesystem path or URL of the original data file.
 
+    *Optional attributes/properties*:
+        :array_annotations: (dict) Dict mapping strings to numpy arrays containing annotations \
+                                   for all data points
+
     Note: Any other additional arguments are assumed to be user-specific
     metadata and stored in :attr:`annotations`.
 
@@ -66,11 +74,10 @@ class Event(BaseNeo, pq.Quantity):
 
     _single_parent_objects = ('Segment',)
     _quantity_attr = 'times'
-    _necessary_attrs = (('times', pq.Quantity, 1),
-                        ('labels', np.ndarray, 1, np.dtype('S')))
+    _necessary_attrs = (('times', pq.Quantity, 1), ('labels', np.ndarray, 1, np.dtype('S')))
 
     def __new__(cls, times=None, labels=None, units=None, name=None, description=None,
-                file_origin=None, **annotations):
+                file_origin=None, array_annotations=None, **annotations):
         if times is None:
             times = np.array([]) * pq.s
         if labels is None:
@@ -90,10 +97,9 @@ class Event(BaseNeo, pq.Quantity):
         # check to make sure the units are time
         # this approach is much faster than comparing the
         # reference dimensionality
-        if (len(dim) != 1 or list(dim.values())[0] != 1 or
-                not isinstance(list(dim.keys())[0], pq.UnitTime)):
-            ValueError("Unit %s has dimensions %s, not [time]" %
-                       (units, dim.simplified))
+        if (len(dim) != 1 or list(dim.values())[0] != 1 or not isinstance(list(dim.keys())[0],
+                                                                          pq.UnitTime)):
+            ValueError("Unit %s has dimensions %s, not [time]" % (units, dim.simplified))
 
         obj = pq.Quantity(times, units=dim).view(cls)
         obj.labels = labels
@@ -101,29 +107,34 @@ class Event(BaseNeo, pq.Quantity):
         return obj
 
     def __init__(self, times=None, labels=None, units=None, name=None, description=None,
-                 file_origin=None, **annotations):
+                 file_origin=None, array_annotations=None, **annotations):
         '''
         Initialize a new :class:`Event` instance.
         '''
-        BaseNeo.__init__(self, name=name, file_origin=file_origin,
-                         description=description, **annotations)
+        DataObject.__init__(self, name=name, file_origin=file_origin, description=description,
+                            array_annotations=array_annotations, **annotations)
+
     def __reduce__(self):
         '''
-        Map the __new__ function onto _new_BaseAnalogSignal, so that pickle
+        Map the __new__ function onto _new_event, so that pickle
         works
         '''
-        return _new_event, (self.__class__, self.times, np.array(self), self.labels, self.units,
-                            self.name, self.file_origin, self.description,
+        return _new_event, (self.__class__, np.array(self), self.labels, self.units, self.name,
+                            self.file_origin, self.description, self.array_annotations,
                             self.annotations, self.segment)
 
     def __array_finalize__(self, obj):
         super(Event, self).__array_finalize__(obj)
-        self.labels = getattr(obj, 'labels', None)
         self.annotations = getattr(obj, 'annotations', None)
         self.name = getattr(obj, 'name', None)
         self.file_origin = getattr(obj, 'file_origin', None)
         self.description = getattr(obj, 'description', None)
         self.segment = getattr(obj, 'segment', None)
+        # Add empty array annotations, because they cannot always be copied,
+        # but do not overwrite existing ones from slicing etc.
+        # This ensures the attribute exists
+        if not hasattr(self, 'array_annotations'):
+            self.array_annotations = ArrayDict(self._get_arr_ann_length())
 
     def __repr__(self):
         '''
@@ -134,10 +145,21 @@ class Event(BaseNeo, pq.Quantity):
             labels = self.labels.astype('U')
         else:
             labels = self.labels
-        objs = ['%s@%s' % (label, time) for label, time in zip(labels,
-                                                               self.times)]
+        objs = ['%s@%s' % (label, time) for label, time in zip(labels, self.times)]
         return '<Event: %s>' % ', '.join(objs)
 
+    def _repr_pretty_(self, pp, cycle):
+        super(Event, self)._repr_pretty_(pp, cycle)
+
+    def rescale(self, units):
+        '''
+        Return a copy of the :class:`Event` converted to the specified
+        units
+        '''
+        obj = super(Event, self).rescale(units)
+        obj.segment = self.segment
+        return obj
+
     @property
     def times(self):
         return pq.Quantity(self)
@@ -154,7 +176,6 @@ class Event(BaseNeo, pq.Quantity):
         '''
         othertimes = other.times.rescale(self.times.units)
         times = np.hstack([self.times, othertimes]) * self.times.units
-        labels = np.hstack([self.labels, other.labels])
         kwargs = {}
         for name in ("name", "description", "file_origin"):
             attr_self = getattr(self, name)
@@ -164,26 +185,65 @@ class Event(BaseNeo, pq.Quantity):
             else:
                 kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other)
 
-        merged_annotations = merge_annotations(self.annotations,
-                                               other.annotations)
+        print('Event: merge annotations')
+        merged_annotations = merge_annotations(self.annotations, other.annotations)
+
         kwargs.update(merged_annotations)
-        return Event(times=times, labels=labels, **kwargs)
+
+        kwargs['array_annotations'] = self._merge_array_annotations(other)
+
+        evt = Event(times=times, labels=kwargs['array_annotations']['labels'], **kwargs)
+
+        return evt
 
     def _copy_data_complement(self, other):
         '''
         Copy the metadata from another :class:`Event`.
+        Note: Array annotations can not be copied here because length of data can change
         '''
-        for attr in ("labels", "name", "file_origin", "description",
-                     "annotations"):
-            setattr(self, attr, getattr(other, attr, None))
+        # Note: Array annotations cannot be copied
+        # because they are linked to their respective timestamps
+        for attr in ("name", "file_origin", "description", "annotations"):
+            setattr(self, attr, getattr(other, attr,
+                                        None))  # Note: Array annotations cannot be copied
+            # because length of data can be changed  # here which would cause inconsistencies  #
+            #  This includes labels and durations!!!
+
+    def __deepcopy__(self, memo):
+        cls = self.__class__
+        new_ev = cls(times=self.times, labels=self.labels, units=self.units, name=self.name,
+                     description=self.description, file_origin=self.file_origin)
+        new_ev.__dict__.update(self.__dict__)
+        memo[id(self)] = new_ev
+        for k, v in self.__dict__.items():
+            try:
+                setattr(new_ev, k, deepcopy(v, memo))
+            except TypeError:
+                setattr(new_ev, k, v)
+        return new_ev
+
+    def __getitem__(self, i):
+        obj = super(Event, self).__getitem__(i)
+        try:
+            obj.array_annotate(**deepcopy(self.array_annotations_at_index(i)))
+        except AttributeError:  # If Quantity was returned, not Event
+            pass
+        return obj
 
-    def duplicate_with_new_data(self, signal):
+    def duplicate_with_new_data(self, signal, units=None):
         '''
         Create a new :class:`Event` with the same metadata
         but different data
+        Note: Array annotations can not be copied here because length of data can change
         '''
-        new = self.__class__(times=signal)
+        if units is None:
+            units = self.units
+        else:
+            units = pq.quantity.validate_dimensionality(units)
+
+        new = self.__class__(times=signal, units=units)
         new._copy_data_complement(self)
+        # Note: Array annotations cannot be copied here, because length of data can be changed
         return new
 
     def time_slice(self, t_start, t_stop):
@@ -205,19 +265,61 @@ class Event(BaseNeo, pq.Quantity):
 
         return new_evt
 
-    def as_array(self, units=None):
-        """
-        Return the event times as a plain NumPy array.
+    def set_labels(self, labels):
+        self.array_annotate(labels=labels)
 
-        If `units` is specified, first rescale to those units.
-        """
-        if units:
-            return self.rescale(units).magnitude
-        else:
-            return self.magnitude
+    def get_labels(self):
+        return self.array_annotations['labels']
+
+    labels = property(get_labels, set_labels)
 
-    def as_quantity(self):
+    def to_epoch(self, pairwise=False, durations=None):
         """
-        Return the event times as a quantities array.
+        Returns a new Epoch object based on the times and labels in the Event object.
+
+        This method has three modes of action.
+
+        1. By default, an array of `n` event times will be transformed into
+           `n-1` epochs, where the end of one epoch is the beginning of the next.
+           This assumes that the events are ordered in time; it is the
+           responsibility of the caller to check this is the case.
+        2. If `pairwise` is True, then the event times will be taken as pairs
+           representing the start and end time of an epoch. The number of
+           events must be even, otherwise a ValueError is raised.
+        3. If `durations` is given, it should be a scalar Quantity or a
+           Quantity array of the same size as the Event.
+           Each event time is then taken as the start of an epoch of duration
+           given by `durations`.
+
+        `pairwise=True` and `durations` are mutually exclusive. A ValueError
+        will be raised if both are given.
+
+        If `durations` is given, epoch labels are set to the corresponding
+        labels of the events that indicate the epoch start
+        If `durations` is not given, then the event labels A and B bounding
+        the epoch are used to set the labels of the epochs in the form 'A-B'.
         """
-        return self.view(pq.Quantity)
+
+        if pairwise:
+            # Mode 2
+            if durations is not None:
+                raise ValueError("Inconsistent arguments. "
+                                 "Cannot give both `pairwise` and `durations`")
+            if self.size % 2 != 0:
+                raise ValueError("Pairwise conversion of events to epochs"
+                                 " requires an even number of events")
+            times = self.times[::2]
+            durations = self.times[1::2] - times
+            labels = np.array(
+                ["{}-{}".format(a, b) for a, b in zip(self.labels[::2], self.labels[1::2])])
+        elif durations is None:
+            # Mode 1
+            times = self.times[:-1]
+            durations = np.diff(self.times)
+            labels = np.array(
+                ["{}-{}".format(a, b) for a, b in zip(self.labels[:-1], self.labels[1:])])
+        else:
+            # Mode 3
+            times = self.times
+            labels = self.labels
+        return Epoch(times=times, durations=durations, labels=labels)

+ 150 - 173
code/python-neo/neo/core/irregularlysampledsignal.py

@@ -4,8 +4,9 @@ This module implements :class:`IrregularlySampledSignal`, an array of analog
 signals with samples taken at arbitrary time points.
 
 :class:`IrregularlySampledSignal` inherits from :class:`basesignal.BaseSignal`
-and  derives from :class:`BaseNeo`, from :module:`neo.core.baseneo`, 
-and from :class:`quantities.Quantity`, which inherits from :class:`numpy.array`.
+which derives from :class:`BaseNeo`, from :module:`neo.core.baseneo`,
+and from :class:`quantities.Quantity`, which in turn inherits from
+:class:`numpy.ndarray`.
 
 Inheritance from :class:`numpy.array` is explained here:
 http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
@@ -22,29 +23,33 @@ the old object.
 # needed for Python 3 compatibility
 from __future__ import absolute_import, division, print_function
 
+from copy import deepcopy
 import numpy as np
 import quantities as pq
 
 from neo.core.baseneo import BaseNeo, MergeError, merge_annotations
+from neo.core.basesignal import BaseSignal
+from neo.core.channelindex import ChannelIndex
+from neo.core.dataobject import DataObject
 
-from neo.core import basesignal
 
 def _new_IrregularlySampledSignal(cls, times, signal, units=None, time_units=None, dtype=None,
                                   copy=True, name=None, file_origin=None, description=None,
-                                  annotations=None, segment=None, channel_index=None):
+                                  array_annotations=None, annotations=None, segment=None,
+                                  channel_index=None):
     '''
-    A function to map IrregularlySampledSignal.__new__ to function that
+    A function to map IrregularlySampledSignal.__new__ to a function that
     does not do the unit checking. This is needed for pickle to work.
     '''
-    iss = cls(times=times, signal=signal, units=units, time_units=time_units, 
-               dtype=dtype, copy=copy, name=name, file_origin=file_origin,
-               description=description, **annotations)
+    iss = cls(times=times, signal=signal, units=units, time_units=time_units, dtype=dtype,
+              copy=copy, name=name, file_origin=file_origin, description=description,
+              array_annotations=array_annotations, **annotations)
     iss.segment = segment
     iss.channel_index = channel_index
     return iss
 
 
-class IrregularlySampledSignal(basesignal.BaseSignal):
+class IrregularlySampledSignal(BaseSignal):
     '''
     An array of one or more analog signals with samples taken at arbitrary time points.
 
@@ -52,6 +57,9 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
     :attr:`t_start` with a varying sampling interval. Each channel is sampled
     at the same time points.
 
+    Inherits from :class:`quantities.Quantity`, which in turn inherits from
+    :class:`numpy.ndarray`.
+
     *Usage*::
 
         >>> from neo.core import IrregularlySampledSignal
@@ -82,6 +90,8 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
         :dtype: (numpy dtype or str) Override the dtype of the signal array.
             (times are always floats).
         :copy: (bool) True by default.
+        :array_annotations: (dict) Dict mapping strings to numpy arrays containing annotations \
+                                   for all data points
 
     Note: Any other additional arguments are assumed to be user-specific
     metadata and stored in :attr:`annotations`.
@@ -110,28 +120,17 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
 
     _single_parent_objects = ('Segment', 'ChannelIndex')
     _quantity_attr = 'signal'
-    _necessary_attrs = (('times', pq.Quantity, 1),
-                        ('signal', pq.Quantity, 2))
+    _necessary_attrs = (('times', pq.Quantity, 1), ('signal', pq.Quantity, 2))
 
-    def __new__(cls, times, signal, units=None, time_units=None, dtype=None,
-                copy=True, name=None, file_origin=None,
-                description=None,
-                **annotations):
+    def __new__(cls, times, signal, units=None, time_units=None, dtype=None, copy=True, name=None,
+                file_origin=None, description=None, array_annotations=None, **annotations):
         '''
         Construct a new :class:`IrregularlySampledSignal` instance.
 
         This is called whenever a new :class:`IrregularlySampledSignal` is
         created from the constructor, but not when slicing.
         '''
-        if units is None:
-            if hasattr(signal, "units"):
-                units = signal.units
-            else:
-                raise ValueError("Units must be specified")
-        elif isinstance(signal, pq.Quantity):
-             # could improve this test, what if units is a string?
-            if units != signal.units:
-                signal = signal.rescale(units)
+        signal = cls._rescale(signal, units=units)
         if time_units is None:
             if hasattr(times, "units"):
                 time_units = times.units
@@ -142,102 +141,82 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
             if time_units != times.units:
                 times = times.rescale(time_units)
         # should check time units have correct dimensions
-        obj = pq.Quantity.__new__(cls, signal, units=units,
-                                  dtype=dtype, copy=copy)
+        obj = pq.Quantity.__new__(cls, signal, units=units, dtype=dtype, copy=copy)
         if obj.ndim == 1:
             obj = obj.reshape(-1, 1)
         if len(times) != obj.shape[0]:
             raise ValueError("times array and signal array must "
                              "have same length")
-        obj.times = pq.Quantity(times, units=time_units,
-                                dtype=float, copy=copy)
+        obj.times = pq.Quantity(times, units=time_units, dtype=float, copy=copy)
         obj.segment = None
         obj.channel_index = None
 
         return obj
 
-    def __init__(self, times, signal, units=None, time_units=None, dtype=None,
-                 copy=True, name=None, file_origin=None, description=None,
+    def __init__(self, times, signal, units=None, time_units=None, dtype=None, copy=True,
+                 name=None, file_origin=None, description=None, array_annotations=None,
                  **annotations):
         '''
         Initializes a newly constructed :class:`IrregularlySampledSignal`
         instance.
         '''
-        BaseNeo.__init__(self, name=name, file_origin=file_origin,
-                         description=description, **annotations)
+        DataObject.__init__(self, name=name, file_origin=file_origin, description=description,
+                            array_annotations=array_annotations, **annotations)
 
     def __reduce__(self):
         '''
         Map the __new__ function onto _new_IrregularlySampledSignal, so that pickle
         works
         '''
-        return _new_IrregularlySampledSignal, (self.__class__,
-                                               self.times, 
-                                               np.array(self),
-                                               self.units, 
-                                               self.times.units, 
-                                               self.dtype,
-                                               True, 
-                                               self.name, 
-                                               self.file_origin,
-                                               self.description,
-                                               self.annotations,
-                                               self.segment,
-                                               self.channel_index)
-
-    def __array_finalize__(self, obj):
-        '''
-        This is called every time a new :class:`IrregularlySampledSignal` is
-        created.
-
-        It is the appropriate place to set default values for attributes
-        for :class:`IrregularlySampledSignal` constructed by slicing or
-        viewing.
-
-        User-specified values are only relevant for construction from
-        constructor, and these are set in __new__. Then they are just
-        copied over here.
-        '''
-        super(IrregularlySampledSignal, self).__array_finalize__(obj)
-        self.times = getattr(obj, 'times', None)
+        return _new_IrregularlySampledSignal, (self.__class__, self.times, np.array(self),
+                                               self.units, self.times.units, self.dtype, True,
+                                               self.name, self.file_origin, self.description,
+                                               self.array_annotations, self.annotations,
+                                               self.segment, self.channel_index)
 
-        # The additional arguments
-        self.annotations = getattr(obj, 'annotations', None)
+    def _array_finalize_spec(self, obj):
+        '''
+        Set default values for attributes specific to :class:`IrregularlySampledSignal`.
 
-        # Globally recommended attributes
-        self.name = getattr(obj, 'name', None)
-        self.file_origin = getattr(obj, 'file_origin', None)
-        self.description = getattr(obj, 'description', None)
+        Common attributes are defined in
+        :meth:`__array_finalize__` in :class:`basesignal.BaseSignal`),
+        which is called every time a new signal is created
+        and calls this method.
+        '''
+        self.times = getattr(obj, 'times', None)
+        return obj
 
-        # Parent objects
-        self.segment = getattr(obj, 'segment', None)
-        self.channel_index = getattr(obj, 'channel_index', None)
+    def __deepcopy__(self, memo):
+        cls = self.__class__
+        new_signal = cls(self.times, np.array(self), units=self.units,
+                         time_units=self.times.units, dtype=self.dtype,
+                         t_start=self.t_start, name=self.name,
+                         file_origin=self.file_origin, description=self.description)
+        new_signal.__dict__.update(self.__dict__)
+        memo[id(self)] = new_signal
+        for k, v in self.__dict__.items():
+            try:
+                setattr(new_signal, k, deepcopy(v, memo))
+            except TypeError:
+                setattr(new_signal, k, v)
+        return new_signal
 
     def __repr__(self):
         '''
         Returns a string representing the :class:`IrregularlySampledSignal`.
         '''
-        return '<%s(%s at times %s)>' % (self.__class__.__name__,
-                                         super(IrregularlySampledSignal,
-                                               self).__repr__(), self.times)
-
-    def __getslice__(self, i, j):
-        '''
-        Get a slice from :attr:`i` to :attr:`j`.
-
-        Doesn't get called in Python 3, :meth:`__getitem__` is called instead
-        '''
-        return self.__getitem__(slice(i, j))
-
+        return '<%s(%s at times %s)>' % (
+            self.__class__.__name__, super(IrregularlySampledSignal, self).__repr__(), self.times)
 
     def __getitem__(self, i):
         '''
         Get the item or slice :attr:`i`.
         '''
-        obj = super(IrregularlySampledSignal, self).__getitem__(i)
         if isinstance(i, (int, np.integer)):  # a single point in time across all channels
+            obj = super(IrregularlySampledSignal, self).__getitem__(i)
             obj = pq.Quantity(obj.magnitude, units=obj.units)
         elif isinstance(i, tuple):
+            obj = super(IrregularlySampledSignal, self).__getitem__(i)
             j, k = i
             if isinstance(j, (int, np.integer)):  # a single point in time across some channels
                 obj = pq.Quantity(obj.magnitude, units=obj.units)
@@ -249,14 +228,29 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
                 else:
                     raise TypeError("%s not supported" % type(j))
                 if isinstance(k, (int, np.integer)):
-                    obj = obj.reshape(-1, 1)
+                    obj = obj.reshape(-1, 1)  # add if channel_index
+                obj.array_annotations = deepcopy(self.array_annotations_at_index(k))
         elif isinstance(i, slice):
+            obj = super(IrregularlySampledSignal, self).__getitem__(i)
             obj.times = self.times.__getitem__(i)
+            obj.array_annotations = deepcopy(self.array_annotations)
+        elif isinstance(i, np.ndarray):
+            # Indexing of an IrregularlySampledSignal is only consistent if the resulting
+            # number of samples is the same for each trace. The time axis for these samples is not
+            # guaranteed to be continuous, so returning a Quantity instead of an
+            # IrregularlySampledSignal here.
+            new_time_dims = np.sum(i, axis=0)
+            if len(new_time_dims) and all(new_time_dims == new_time_dims[0]):
+                obj = np.asarray(self).T.__getitem__(i.T)
+                obj = obj.T.reshape(self.shape[1], -1).T
+                obj = pq.Quantity(obj, units=self.units)
+            else:
+                raise IndexError("indexing of an IrregularlySampledSignal needs to keep the same "
+                                 "number of sample for each trace contained")
         else:
-            raise IndexError("index should be an integer, tuple or slice")
+            raise IndexError("index should be an integer, tuple, slice or boolean numpy array")
         return obj
 
-
     @property
     def duration(self):
         '''
@@ -288,8 +282,9 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
         '''
         Equality test (==)
         '''
-        return (super(IrregularlySampledSignal, self).__eq__(other).all() and
-                (self.times == other.times).all())
+        if (isinstance(other, IrregularlySampledSignal) and not (self.times == other.times).all()):
+            return False
+        return super(IrregularlySampledSignal, self).__eq__(other)
 
     def _check_consistency(self, other):
         '''
@@ -304,8 +299,7 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
             return
         # dimensionality should match
         if self.ndim != other.ndim:
-            raise ValueError('Dimensionality does not match: %s vs %s' %
-                             (self.ndim, other.ndim))
+            raise ValueError('Dimensionality does not match: %s vs %s' % (self.ndim, other.ndim))
         # if if the other array does not have a times property,
         # then it should be okay to add it directly
         if not hasattr(other, 'times'):
@@ -313,16 +307,7 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
 
         # if there is a times property, the times need to be the same
         if not (self.times == other.times).all():
-            raise ValueError('Times do not match: %s vs %s' %
-                             (self.times, other.times))
-
-    def _copy_data_complement(self, other):
-        '''
-        Copy the metadata from another :class:`IrregularlySampledSignal`.
-        '''
-        for attr in ("times", "name", "file_origin",
-                     "description", "annotations"):
-            setattr(self, attr, getattr(other, attr, None))
+            raise ValueError('Times do not match: %s vs %s' % (self.times, other.times))
 
     def __rsub__(self, other, *args):
         '''
@@ -335,12 +320,11 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
         Handle pretty-printing the :class:`IrregularlySampledSignal`.
         '''
         pp.text("{cls} with {channels} channels of length {length}; "
-                "units {units}; datatype {dtype} ".format(
-                    cls=self.__class__.__name__,
-                    channels=self.shape[1],
-                    length=self.shape[0],
-                    units=self.units.dimensionality.string,
-                    dtype=self.dtype))
+                "units {units}; datatype {dtype} ".format(cls=self.__class__.__name__,
+                                                          channels=self.shape[1],
+                                                          length=self.shape[0],
+                                                          units=self.units.dimensionality.string,
+                                                          dtype=self.dtype))
         if self._has_repr_pretty_attrs_():
             pp.breakable()
             self._repr_pretty_attrs_(pp, cycle)
@@ -349,6 +333,7 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
             pp.breakable()
             with pp.group(indent=1):
                 pp.text(line)
+
         for line in ["sample times: {0}".format(self.times)]:
             _pp(line)
 
@@ -370,7 +355,7 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
         stepwise at sampling times.
         '''
         if interpolation is None:
-            return (self[:-1]*self.sampling_intervals.reshape(-1, 1)).sum()/self.duration
+            return (self[:-1] * self.sampling_intervals.reshape(-1, 1)).sum() / self.duration
         else:
             raise NotImplementedError
 
@@ -390,47 +375,58 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
         # further interpolation methods could be added
         raise NotImplementedError
 
-    def rescale(self, units):
+    def time_slice(self, t_start, t_stop):
         '''
-        Return a copy of the :class:`IrregularlySampledSignal` converted to the
-        specified units
+        Creates a new :class:`IrregularlySampledSignal` corresponding to the time slice of
+        the original :class:`IrregularlySampledSignal` between times
+        `t_start` and `t_stop`. Either parameter can also be None
+        to use infinite endpoints for the time interval.
         '''
-        to_dims = pq.quantity.validate_dimensionality(units)
-        if self.dimensionality == to_dims:
-            to_u = self.units
-            signal = np.array(self)
-        else:
-            to_u = pq.Quantity(1.0, to_dims)
-            from_u = pq.Quantity(1.0, self.dimensionality)
-            try:
-                cf = pq.quantity.get_conversion_factor(from_u, to_u)
-            except AssertionError:
-                raise ValueError('Unable to convert between units of "%s" \
-                                 and "%s"' % (from_u._dimensionality,
-                                              to_u._dimensionality))
-            signal = cf * self.magnitude
-        new = self.__class__(times=self.times, signal=signal, units=to_u)
-        new._copy_data_complement(self)
-        new.channel_index = self.channel_index
-        new.segment = self.segment
-        new.annotations.update(self.annotations)
-        return new
+        _t_start = t_start
+        _t_stop = t_stop
+
+        if t_start is None:
+            _t_start = -np.inf
+        if t_stop is None:
+            _t_stop = np.inf
+        indices = (self.times >= _t_start) & (self.times <= _t_stop)
+
+        count = 0
+        id_start = None
+        id_stop = None
+        for i in indices:
+            if id_start is None:
+                if i:
+                    id_start = count
+            else:
+                if not i:
+                    id_stop = count
+                    break
+            count += 1
+
+        new_st = self[id_start:id_stop]
+
+        return new_st
 
     def merge(self, other):
         '''
-        Merge another :class:`IrregularlySampledSignal` with this one, and return the
-        merged signal.
+        Merge another signal into this one.
 
-        The :class:`IrregularlySampledSignal` objects are concatenated horizontally
+        The signal objects are concatenated horizontally
         (column-wise, :func:`np.hstack`).
 
-        If the attributes of the two :class:`IrregularlySampledSignal` are not
-        compatible, a :class:`MergeError` is raised.
+        If the attributes of the two signals are not
+        compatible, an Exception is raised.
+
+        Required attributes of the signal are used.
         '''
+
         if not np.array_equal(self.times, other.times):
             raise MergeError("Cannot merge these two signals as the sample times differ.")
+
         if self.segment != other.segment:
-            raise MergeError("Cannot merge these two signals as they belong to different segments.")
+            raise MergeError(
+                "Cannot merge these two signals as they belong to different segments.")
         if hasattr(self, "lazy_shape"):
             if hasattr(other, "lazy_shape"):
                 if self.lazy_shape[0] != other.lazy_shape[0]:
@@ -449,46 +445,27 @@ class IrregularlySampledSignal(basesignal.BaseSignal):
                 kwargs[name] = attr_self
             else:
                 kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other)
-        merged_annotations = merge_annotations(self.annotations,
-                                               other.annotations)
+        merged_annotations = merge_annotations(self.annotations, other.annotations)
         kwargs.update(merged_annotations)
-        signal = IrregularlySampledSignal(self.times, stack, units=self.units,
-                                         dtype=self.dtype, copy=False,
-                                         **kwargs)
+
+        signal = self.__class__(self.times, stack, units=self.units, dtype=self.dtype,
+                                copy=False, **kwargs)
         signal.segment = self.segment
+        signal.array_annotate(**self._merge_array_annotations(other))
+
         if hasattr(self, "lazy_shape"):
             signal.lazy_shape = merged_lazy_shape
-        return signal
-
-    def time_slice (self, t_start, t_stop):
-        '''
-        Creates a new :class:`IrregularlySampledSignal` corresponding to the time slice of
-        the original :class:`IrregularlySampledSignal` between times
-        `t_start` and `t_stop`. Either parameter can also be None
-        to use infinite endpoints for the time interval.
-        '''
-        _t_start = t_start
-        _t_stop = t_stop
-
-        if t_start is None:
-            _t_start = -np.inf
-        if t_stop is None:
-            _t_stop = np.inf
-        indices = (self.times >= _t_start) & (self.times <= _t_stop)
 
-        count = 0
-        id_start = None
-        id_stop = None
-        for i in indices :
-            if id_start == None :
-                if i == True :
-                    id_start = count
-            else :
-                if i == False : 
-                    id_stop = count
-                    break
-            count += 1
-        
-        new_st = self[id_start:id_stop]
+        # merge channel_index (move to ChannelIndex.merge()?)
+        if self.channel_index and other.channel_index:
+            signal.channel_index = ChannelIndex(index=np.arange(signal.shape[1]),
+                                                channel_ids=np.hstack(
+                                                    [self.channel_index.channel_ids,
+                                                     other.channel_index.channel_ids]),
+                                                channel_names=np.hstack(
+                                                    [self.channel_index.channel_names,
+                                                     other.channel_index.channel_names]))
+        else:
+            signal.channel_index = ChannelIndex(index=np.arange(signal.shape[1]))
 
-        return new_st
+        return signal

+ 7 - 5
code/python-neo/neo/core/segment.py

@@ -103,11 +103,12 @@ class Segment(Container):
         '''
         Time when first signal begins.
         '''
-        t_starts = [sig.t_start for sig in self.analogsignals + self.spiketrains + self.irregularlysampledsignals]
+        t_starts = [sig.t_start for sig in self.analogsignals +
+                    self.spiketrains + self.irregularlysampledsignals]
         t_starts += [e.times[0] for e in self.epochs + self.events if len(e.times) > 0]
 
         # t_start is not defined if no children are present
-        if len(t_starts)==0:
+        if len(t_starts) == 0:
             return None
 
         t_start = min(t_starts)
@@ -119,11 +120,12 @@ class Segment(Container):
         '''
         Time when last signal ends.
         '''
-        t_stops = [sig.t_stop for sig in self.analogsignals +  self.spiketrains + self.irregularlysampledsignals]
+        t_stops = [sig.t_stop for sig in self.analogsignals +
+                   self.spiketrains + self.irregularlysampledsignals]
         t_stops += [e.times[-1] for e in self.epochs + self.events if len(e.times) > 0]
 
         # t_stop is not defined if no children are present
-        if len(t_stops)==0:
+        if len(t_stops) == 0:
             return None
 
         t_stop = max(t_stops)
@@ -247,5 +249,5 @@ class Segment(Container):
         seg.spiketrains = self.take_spiketrains_by_unit(unit_list)
         seg.analogsignals = \
             self.take_slice_of_analogsignalarray_by_unit(unit_list)
-        #TODO copy others attributes
+        # TODO copy others attributes
         return seg

+ 209 - 118
code/python-neo/neo/core/spiketrain.py

@@ -23,9 +23,12 @@ from __future__ import absolute_import, division, print_function
 import sys
 
 import copy
+import warnings
+
 import numpy as np
 import quantities as pq
 from neo.core.baseneo import BaseNeo, MergeError, merge_annotations
+from neo.core.dataobject import DataObject, ArrayDict
 
 
 def check_has_dimensions_time(*values):
@@ -36,10 +39,9 @@ def check_has_dimensions_time(*values):
     errmsgs = []
     for value in values:
         dim = value.dimensionality
-        if (len(dim) != 1 or list(dim.values())[0] != 1 or
-                not isinstance(list(dim.keys())[0], pq.UnitTime)):
-            errmsgs.append("value %s has dimensions %s, not [time]" %
-                           (value, dim.simplified))
+        if (len(dim) != 1 or list(dim.values())[0] != 1 or not isinstance(list(dim.keys())[0],
+                                                                          pq.UnitTime)):
+            errmsgs.append("value %s has dimensions %s, not [time]" % (value, dim.simplified))
     if errmsgs:
         raise ValueError("\n".join(errmsgs))
 
@@ -54,6 +56,9 @@ def _check_time_in_range(value, t_start, t_stop, view=False):
     certain that the dtype and units are the same
     '''
 
+    if t_start > t_stop:
+        raise ValueError("t_stop (%s) is before t_start (%s)" % (t_stop, t_start))
+
     if not value.size:
         return
 
@@ -63,11 +68,9 @@ def _check_time_in_range(value, t_start, t_stop, view=False):
         t_stop = t_stop.view(np.ndarray)
 
     if value.min() < t_start:
-        raise ValueError("The first spike (%s) is before t_start (%s)" %
-                         (value, t_start))
+        raise ValueError("The first spike (%s) is before t_start (%s)" % (value, t_start))
     if value.max() > t_stop:
-        raise ValueError("The last spike (%s) is after t_stop (%s)" %
-                         (value, t_stop))
+        raise ValueError("The last spike (%s) is after t_stop (%s)" % (value, t_stop))
 
 
 def _check_waveform_dimensions(spiketrain):
@@ -86,14 +89,12 @@ def _check_waveform_dimensions(spiketrain):
 
     if waveforms.shape[0] != len(spiketrain):
         raise ValueError("Spiketrain length (%s) does not match to number of "
-                         "waveforms present (%s)" % (len(spiketrain),
-                                                     waveforms.shape[0]))
+                         "waveforms present (%s)" % (len(spiketrain), waveforms.shape[0]))
 
 
-def _new_spiketrain(cls, signal, t_stop, units=None, dtype=None,
-                    copy=True, sampling_rate=1.0 * pq.Hz,
-                    t_start=0.0 * pq.s, waveforms=None, left_sweep=None,
-                    name=None, file_origin=None, description=None,
+def _new_spiketrain(cls, signal, t_stop, units=None, dtype=None, copy=True,
+                    sampling_rate=1.0 * pq.Hz, t_start=0.0 * pq.s, waveforms=None, left_sweep=None,
+                    name=None, file_origin=None, description=None, array_annotations=None,
                     annotations=None, segment=None, unit=None):
     '''
     A function to map :meth:`BaseAnalogSignal.__new__` to function that
@@ -101,15 +102,14 @@ def _new_spiketrain(cls, signal, t_stop, units=None, dtype=None,
     '''
     if annotations is None:
         annotations = {}
-    obj = SpikeTrain(signal, t_stop, units, dtype, copy, sampling_rate,
-                     t_start, waveforms, left_sweep, name, file_origin,
-                     description, **annotations)
+    obj = SpikeTrain(signal, t_stop, units, dtype, copy, sampling_rate, t_start, waveforms,
+                     left_sweep, name, file_origin, description, array_annotations, **annotations)
     obj.segment = segment
     obj.unit = unit
     return obj
 
 
-class SpikeTrain(BaseNeo, pq.Quantity):
+class SpikeTrain(DataObject):
     '''
     :class:`SpikeTrain` is a :class:`Quantity` array of spike times.
 
@@ -170,6 +170,8 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         :dtype: (numpy dtype or str) Override the dtype of the signal array.
         :copy: (bool) Whether to copy the times array.  True by default.
             Must be True when you request a change of units or dtype.
+        :array_annotations: (dict) Dict mapping strings to numpy arrays containing annotations \
+                                   for all data points
 
     Note: Any other additional arguments are assumed to be user-specific
     metadata and stored in :attr:`annotations`.
@@ -185,8 +187,7 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         :right_sweep: (quantity scalar) Time from the trigger times of the
             spikes to the end of the waveforms, read-only.
             (:attr:`left_sweep` + :attr:`spike_duration`)
-        :times: (:class:`SpikeTrain`) Returns the :class:`SpikeTrain` without
-            modification or copying.
+        :times: (quantity array 1D) Returns the :class:`SpikeTrain` as a quantity array.
 
     *Slicing*:
         :class:`SpikeTrain` objects can be sliced. When this occurs, a new
@@ -199,29 +200,23 @@ class SpikeTrain(BaseNeo, pq.Quantity):
 
     _single_parent_objects = ('Segment', 'Unit')
     _quantity_attr = 'times'
-    _necessary_attrs = (('times', pq.Quantity, 1),
-                        ('t_start', pq.Quantity, 0),
+    _necessary_attrs = (('times', pq.Quantity, 1), ('t_start', pq.Quantity, 0),
                         ('t_stop', pq.Quantity, 0))
-    _recommended_attrs = ((('waveforms', pq.Quantity, 3),
-                           ('left_sweep', pq.Quantity, 0),
-                           ('sampling_rate', pq.Quantity, 0)) +
-                          BaseNeo._recommended_attrs)
+    _recommended_attrs = ((('waveforms', pq.Quantity, 3), ('left_sweep', pq.Quantity, 0),
+                           ('sampling_rate', pq.Quantity, 0)) + BaseNeo._recommended_attrs)
 
-    def __new__(cls, times, t_stop, units=None, dtype=None, copy=True,
-                sampling_rate=1.0 * pq.Hz, t_start=0.0 * pq.s, waveforms=None,
-                left_sweep=None, name=None, file_origin=None, description=None,
-                **annotations):
+    def __new__(cls, times, t_stop, units=None, dtype=None, copy=True, sampling_rate=1.0 * pq.Hz,
+                t_start=0.0 * pq.s, waveforms=None, left_sweep=None, name=None, file_origin=None,
+                description=None, array_annotations=None, **annotations):
         '''
         Constructs a new :clas:`Spiketrain` instance from data.
 
         This is called whenever a new :class:`SpikeTrain` is created from the
         constructor, but not when slicing.
         '''
-        if len(times) != 0 and waveforms is not None and len(times) != \
-                waveforms.shape[
-                    0]:  # len(times)!=0 has been used to workaround a bug occuring during neo import)
-            raise ValueError(
-                "the number of waveforms should be equal to the number of spikes")
+        if len(times) != 0 and waveforms is not None and len(times) != waveforms.shape[0]:
+            # len(times)!=0 has been used to workaround a bug occuring during neo import
+            raise ValueError("the number of waveforms should be equal to the number of spikes")
 
         # Make sure units are consistent
         # also get the dimensionality now since it is much faster to feed
@@ -270,8 +265,8 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         # check to make sure the units are time
         # this approach is orders of magnitude faster than comparing the
         # reference dimensionality
-        if (len(dim) != 1 or list(dim.values())[0] != 1 or
-                not isinstance(list(dim.keys())[0], pq.UnitTime)):
+        if (len(dim) != 1 or list(dim.values())[0] != 1 or not isinstance(list(dim.keys())[0],
+                                                                          pq.UnitTime)):
             ValueError("Unit has dimensions %s, not [time]" % dim.simplified)
 
         # Construct Quantity from data
@@ -280,16 +275,17 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         # if the dtype and units match, just copy the values here instead
         # of doing the much more expensive creation of a new Quantity
         # using items() is orders of magnitude faster
-        if (hasattr(t_start, 'dtype') and t_start.dtype == obj.dtype and
-                hasattr(t_start, 'dimensionality') and
-                    t_start.dimensionality.items() == dim.items()):
+        if (hasattr(t_start, 'dtype')
+                and t_start.dtype == obj.dtype
+                and hasattr(t_start, 'dimensionality')
+                and t_start.dimensionality.items() == dim.items()):
             obj.t_start = t_start.copy()
         else:
             obj.t_start = pq.Quantity(t_start, units=dim, dtype=obj.dtype)
 
-        if (hasattr(t_stop, 'dtype') and t_stop.dtype == obj.dtype and
-                hasattr(t_stop, 'dimensionality') and
-                    t_stop.dimensionality.items() == dim.items()):
+        if (hasattr(t_stop, 'dtype') and t_stop.dtype == obj.dtype
+                and hasattr(t_stop, 'dimensionality')
+                and t_stop.dimensionality.items() == dim.items()):
             obj.t_stop = t_stop.copy()
         else:
             obj.t_stop = pq.Quantity(t_stop, units=dim, dtype=obj.dtype)
@@ -308,10 +304,10 @@ class SpikeTrain(BaseNeo, pq.Quantity):
 
         return obj
 
-    def __init__(self, times, t_stop, units=None, dtype=np.float,
-                 copy=True, sampling_rate=1.0 * pq.Hz, t_start=0.0 * pq.s,
-                 waveforms=None, left_sweep=None, name=None, file_origin=None,
-                 description=None, **annotations):
+    def __init__(self, times, t_stop, units=None, dtype=np.float, copy=True,
+                 sampling_rate=1.0 * pq.Hz, t_start=0.0 * pq.s, waveforms=None, left_sweep=None,
+                 name=None, file_origin=None, description=None, array_annotations=None,
+                 **annotations):
         '''
         Initializes a newly constructed :class:`SpikeTrain` instance.
         '''
@@ -322,24 +318,20 @@ class SpikeTrain(BaseNeo, pq.Quantity):
 
         # Calls parent __init__, which grabs universally recommended
         # attributes and sets up self.annotations
-        BaseNeo.__init__(self, name=name, file_origin=file_origin,
-                         description=description, **annotations)
+        DataObject.__init__(self, name=name, file_origin=file_origin, description=description,
+                            array_annotations=array_annotations, **annotations)
+
+    def _repr_pretty_(self, pp, cycle):
+        super(SpikeTrain, self)._repr_pretty_(pp, cycle)
 
     def rescale(self, units):
         '''
         Return a copy of the :class:`SpikeTrain` converted to the specified
         units
         '''
-        if self.dimensionality == pq.quantity.validate_dimensionality(units):
-            return self.copy()
-        spikes = self.view(pq.Quantity)
-        obj = SpikeTrain(times=spikes, t_stop=self.t_stop, units=units,
-                         sampling_rate=self.sampling_rate,
-                         t_start=self.t_start, waveforms=self.waveforms,
-                         left_sweep=self.left_sweep, name=self.name,
-                         file_origin=self.file_origin,
-                         description=self.description, **self.annotations)
-        obj.segment = self.segment
+        obj = super(SpikeTrain, self).rescale(units)
+        obj.t_start = self.t_start.rescale(units)
+        obj.t_stop = self.t_stop.rescale(units)
         obj.unit = self.unit
         return obj
 
@@ -349,12 +341,11 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         works
         '''
         import numpy
-        return _new_spiketrain, (self.__class__, numpy.array(self),
-                                 self.t_stop, self.units, self.dtype, True,
-                                 self.sampling_rate, self.t_start,
-                                 self.waveforms, self.left_sweep,
-                                 self.name, self.file_origin, self.description,
-                                 self.annotations, self.segment, self.unit)
+        return _new_spiketrain, (self.__class__, numpy.array(self), self.t_stop, self.units,
+                                 self.dtype, True, self.sampling_rate, self.t_start,
+                                 self.waveforms, self.left_sweep, self.name, self.file_origin,
+                                 self.description, self.array_annotations, self.annotations,
+                                 self.segment, self.unit)
 
     def __array_finalize__(self, obj):
         '''
@@ -393,6 +384,14 @@ class SpikeTrain(BaseNeo, pq.Quantity):
 
         # The additional arguments
         self.annotations = getattr(obj, 'annotations', {})
+        # Add empty array annotations, because they cannot always be copied,
+        # but do not overwrite existing ones from slicing etc.
+        # This ensures the attribute exists
+        if not hasattr(self, 'array_annotations'):
+            self.array_annotations = ArrayDict(self._get_arr_ann_length())
+
+        # Note: Array annotations have to be changed when slicing or initializing an object,
+        # copying them over in spite of changed data would result in unexpected behaviour
 
         # Globally recommended attributes
         self.name = getattr(obj, 'name', None)
@@ -402,6 +401,21 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         if hasattr(obj, 'lazy_shape'):
             self.lazy_shape = obj.lazy_shape
 
+    def __deepcopy__(self, memo):
+        cls = self.__class__
+        new_st = cls(np.array(self), self.t_stop, units=self.units, dtype=self.dtype, copy=True,
+                     sampling_rate=self.sampling_rate, t_start=self.t_start,
+                     waveforms=self.waveforms, left_sweep=self.left_sweep, name=self.name,
+                     file_origin=self.file_origin, description=self.description)
+        new_st.__dict__.update(self.__dict__)
+        memo[id(self)] = new_st
+        for k, v in self.__dict__.items():
+            try:
+                setattr(new_st, k, copy.deepcopy(v, memo))
+            except TypeError:
+                setattr(new_st, k, v)
+        return new_st
+
     def __repr__(self):
         '''
         Returns a string representing the :class:`SpikeTrain`.
@@ -418,6 +432,7 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         sort_indices = np.argsort(self)
         if self.waveforms is not None and self.waveforms.any():
             self.waveforms = self.waveforms[sort_indices]
+        self.array_annotate(**copy.deepcopy(self.array_annotations_at_index(sort_indices)))
 
         # now sort the times
         # We have sorted twice, but `self = self[sort_indices]` introduces
@@ -437,36 +452,69 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         Shifts the time point of all spikes by adding the amount in
         :attr:`time` (:class:`Quantity`)
 
-        Raises an exception if new time points fall outside :attr:`t_start` or
-        :attr:`t_stop`
+        If `time` is a scalar, this also shifts :attr:`t_start` and :attr:`t_stop`.
+        If `time` is an array, :attr:`t_start` and :attr:`t_stop` are not changed unless
+        some of the new spikes would be outside this range.
+        In this case :attr:`t_start` and :attr:`t_stop` are modified if necessary to
+        ensure they encompass all spikes.
+
+        It is not possible to add two SpikeTrains (raises ValueError).
         '''
         spikes = self.view(pq.Quantity)
         check_has_dimensions_time(time)
-        _check_time_in_range(spikes + time, self.t_start, self.t_stop)
-        return SpikeTrain(times=spikes + time, t_stop=self.t_stop,
-                          units=self.units, sampling_rate=self.sampling_rate,
-                          t_start=self.t_start, waveforms=self.waveforms,
-                          left_sweep=self.left_sweep, name=self.name,
-                          file_origin=self.file_origin,
-                          description=self.description, **self.annotations)
+        if isinstance(time, SpikeTrain):
+            raise TypeError("Can't add two spike trains")
+        new_times = spikes + time
+        if time.size > 1:
+            t_start = min(self.t_start, np.min(new_times))
+            t_stop = max(self.t_stop, np.max(new_times))
+        else:
+            t_start = self.t_start + time
+            t_stop = self.t_stop + time
+        return SpikeTrain(times=new_times, t_stop=t_stop, units=self.units,
+                          sampling_rate=self.sampling_rate, t_start=t_start,
+                          waveforms=self.waveforms, left_sweep=self.left_sweep, name=self.name,
+                          file_origin=self.file_origin, description=self.description,
+                          array_annotations=copy.deepcopy(self.array_annotations),
+                          **self.annotations)
 
     def __sub__(self, time):
         '''
         Shifts the time point of all spikes by subtracting the amount in
         :attr:`time` (:class:`Quantity`)
 
-        Raises an exception if new time points fall outside :attr:`t_start` or
-        :attr:`t_stop`
+        If `time` is a scalar, this also shifts :attr:`t_start` and :attr:`t_stop`.
+        If `time` is an array, :attr:`t_start` and :attr:`t_stop` are not changed unless
+        some of the new spikes would be outside this range.
+        In this case :attr:`t_start` and :attr:`t_stop` are modified if necessary to
+        ensure they encompass all spikes.
+
+        In general, it is not possible to subtract two SpikeTrain objects (raises ValueError).
+        However, if `time` is itself a SpikeTrain of the same size as the SpikeTrain,
+        returns a Quantities array (since this is often used in checking
+        whether two spike trains are the same or in calculating the inter-spike interval.
         '''
         spikes = self.view(pq.Quantity)
         check_has_dimensions_time(time)
-        _check_time_in_range(spikes - time, self.t_start, self.t_stop)
-        return SpikeTrain(times=spikes - time, t_stop=self.t_stop,
-                          units=self.units, sampling_rate=self.sampling_rate,
-                          t_start=self.t_start, waveforms=self.waveforms,
-                          left_sweep=self.left_sweep, name=self.name,
-                          file_origin=self.file_origin,
-                          description=self.description, **self.annotations)
+        if isinstance(time, SpikeTrain):
+            if self.size == time.size:
+                return spikes - time
+            else:
+                raise TypeError("Can't subtract spike trains with different sizes")
+        else:
+            new_times = spikes - time
+            if time.size > 1:
+                t_start = min(self.t_start, np.min(new_times))
+                t_stop = max(self.t_stop, np.max(new_times))
+            else:
+                t_start = self.t_start - time
+                t_stop = self.t_stop - time
+            return SpikeTrain(times=spikes - time, t_stop=t_stop, units=self.units,
+                              sampling_rate=self.sampling_rate, t_start=t_start,
+                              waveforms=self.waveforms, left_sweep=self.left_sweep, name=self.name,
+                              file_origin=self.file_origin, description=self.description,
+                              array_annotations=copy.deepcopy(self.array_annotations),
+                              **self.annotations)
 
     def __getitem__(self, i):
         '''
@@ -475,6 +523,10 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         obj = super(SpikeTrain, self).__getitem__(i)
         if hasattr(obj, 'waveforms') and obj.waveforms is not None:
             obj.waveforms = obj.waveforms.__getitem__(i)
+        try:
+            obj.array_annotate(**copy.deepcopy(self.array_annotations_at_index(i)))
+        except AttributeError:  # If Quantity was returned, not SpikeTrain
+            pass
         return obj
 
     def __setitem__(self, i, value):
@@ -482,9 +534,9 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         Set the value the item or slice :attr:`i`.
         '''
         if not hasattr(value, "units"):
-            value = pq.Quantity(value, units=self.units)
-            # or should we be strict: raise ValueError("Setting a value
-            # requires a quantity")?
+            value = pq.Quantity(value,
+                                units=self.units)  # or should we be strict: raise ValueError(
+            # "Setting a value  # requires a quantity")?
         # check for values outside t_start, t_stop
         _check_time_in_range(value, self.t_start, self.t_stop)
         super(SpikeTrain, self).__setitem__(i, value)
@@ -498,19 +550,23 @@ class SpikeTrain(BaseNeo, pq.Quantity):
     def _copy_data_complement(self, other, deep_copy=False):
         '''
         Copy the metadata from another :class:`SpikeTrain`.
+        Note: Array annotations can not be copied here because length of data can change
         '''
-        for attr in ("left_sweep", "sampling_rate", "name", "file_origin",
-                     "description", "annotations"):
+        # Note: Array annotations cannot be copied because length of data can be changed
+        # here which would cause inconsistencies
+        for attr in ("left_sweep", "sampling_rate", "name", "file_origin", "description",
+                     "annotations"):
             attr_value = getattr(other, attr, None)
             if deep_copy:
                 attr_value = copy.deepcopy(attr_value)
             setattr(self, attr, attr_value)
 
-    def duplicate_with_new_data(self, signal, t_start=None, t_stop=None,
-                                waveforms=None, deep_copy=True):
+    def duplicate_with_new_data(self, signal, t_start=None, t_stop=None, waveforms=None,
+                                deep_copy=True, units=None):
         '''
         Create a new :class:`SpikeTrain` with the same metadata
         but different data (times, t_start, t_stop)
+        Note: Array annotations can not be copied here because length of data can change
         '''
         # using previous t_start and t_stop if no values are provided
         if t_start is None:
@@ -519,11 +575,17 @@ class SpikeTrain(BaseNeo, pq.Quantity):
             t_stop = self.t_stop
         if waveforms is None:
             waveforms = self.waveforms
+        if units is None:
+            units = self.units
+        else:
+            units = pq.quantity.validate_dimensionality(units)
 
-        new_st = self.__class__(signal, t_start=t_start, t_stop=t_stop,
-                                waveforms=waveforms, units=self.units)
+        new_st = self.__class__(signal, t_start=t_start, t_stop=t_stop, waveforms=waveforms,
+                                units=units)
         new_st._copy_data_complement(self, deep_copy=deep_copy)
 
+        # Note: Array annotations are not copied here, because length of data could change
+
         # overwriting t_start and t_stop with new values
         new_st.t_start = t_start
         new_st.t_stop = t_stop
@@ -593,6 +655,9 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         sorting = np.argsort(stack)
         stack = stack[sorting]
         kwargs = {}
+
+        kwargs['array_annotations'] = self._merge_array_annotations(other, sorting=sorting)
+
         for name in ("name", "description", "file_origin"):
             attr_self = getattr(self, name)
             attr_other = getattr(other, name)
@@ -600,14 +665,12 @@ class SpikeTrain(BaseNeo, pq.Quantity):
                 kwargs[name] = attr_self
             else:
                 kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other)
-        merged_annotations = merge_annotations(self.annotations,
-                                               other.annotations)
+        merged_annotations = merge_annotations(self.annotations, other.annotations)
         kwargs.update(merged_annotations)
-        train = SpikeTrain(stack, units=self.units, dtype=self.dtype,
-                           copy=False, t_start=self.t_start,
-                           t_stop=self.t_stop,
-                           sampling_rate=self.sampling_rate,
-                           left_sweep=self.left_sweep, **kwargs)
+
+        train = SpikeTrain(stack, units=self.units, dtype=self.dtype, copy=False,
+                           t_start=self.t_start, t_stop=self.t_stop,
+                           sampling_rate=self.sampling_rate, left_sweep=self.left_sweep, **kwargs)
         if all(wfs):
             wfs_stack = np.vstack((self.waveforms, other.waveforms))
             wfs_stack = wfs_stack[sorting]
@@ -620,12 +683,57 @@ class SpikeTrain(BaseNeo, pq.Quantity):
             train.lazy_shape = merged_lazy_shape
         return train
 
+    def _merge_array_annotations(self, other, sorting=None):
+        '''
+        Merges array annotations of 2 different objects.
+        The merge happens in such a way that the result fits the merged data
+        In general this means concatenating the arrays from the 2 objects.
+        If an annotation is only present in one of the objects, it will be omitted.
+        Apart from that the array_annotations need to be sorted according to the sorting of
+        the spikes.
+        :return Merged array_annotations
+        '''
+
+        assert sorting is not None, "The order of the merged spikes must be known"
+
+        merged_array_annotations = {}
+
+        omitted_keys_self = []
+
+        keys = self.array_annotations.keys()
+        for key in keys:
+            try:
+                self_ann = copy.deepcopy(self.array_annotations[key])
+                other_ann = copy.deepcopy(other.array_annotations[key])
+                if isinstance(self_ann, pq.Quantity):
+                    other_ann.rescale(self_ann.units)
+                    arr_ann = np.concatenate([self_ann, other_ann]) * self_ann.units
+                else:
+                    arr_ann = np.concatenate([self_ann, other_ann])
+                merged_array_annotations[key] = arr_ann[sorting]
+            # Annotation only available in 'self', must be skipped
+            # Ignore annotations present only in one of the SpikeTrains
+            except KeyError:
+                omitted_keys_self.append(key)
+                continue
+
+        omitted_keys_other = [key for key in other.array_annotations if
+                              key not in self.array_annotations]
+
+        if omitted_keys_self or omitted_keys_other:
+            warnings.warn("The following array annotations were omitted, because they were only "
+                          "present in one of the merged objects: {} from the one that was merged "
+                          "into and {} from the one that was merged into the other"
+                          "".format(omitted_keys_self, omitted_keys_other), UserWarning)
+
+        return merged_array_annotations
+
     @property
     def times(self):
         '''
-        Returns the :class:`SpikeTrain` without modification or copying.
+        Returns the :class:`SpikeTrain` as a quantity array.
         '''
-        return self
+        return pq.Quantity(self)
 
     @property
     def duration(self):
@@ -681,20 +789,3 @@ class SpikeTrain(BaseNeo, pq.Quantity):
         if self.left_sweep is None or dur is None:
             return None
         return self.left_sweep + dur
-
-    def as_array(self, units=None):
-        """
-        Return the spike times as a plain NumPy array.
-
-        If `units` is specified, first rescale to those units.
-        """
-        if units:
-            return self.rescale(units).magnitude
-        else:
-            return self.magnitude
-
-    def as_quantity(self):
-        """
-        Return the spike times as a quantities array.
-        """
-        return self.view(pq.Quantity)

+ 72 - 56
code/python-neo/neo/io/__init__.py

@@ -6,7 +6,12 @@ electrophysiological data files.
 Note that if the package dependency is not satisfied for one io, it does not
 raise an error but a warning.
 
-neo.io.iolist provides a list of succesfully imported io classes.
+:attr:`neo.io.iolist` provides a list of successfully imported io classes.
+
+Functions:
+
+.. autofunction:: neo.io.get_io
+
 
 Classes:
 
@@ -16,8 +21,12 @@ Classes:
 
 .. autoclass:: neo.io.AsciiSpikeTrainIO
 
+.. autoclass:: neo.io.AxographIO
+
 .. autoclass:: neo.io.AxonIO
 
+.. autoclass:: neo.io.BCI2000IO
+
 .. autoclass:: neo.io.BlackrockIO
 
 .. autoclass:: neo.io.BrainVisionIO
@@ -30,11 +39,12 @@ Classes:
 
 .. autoclass:: neo.io.ElanIO
 
-..
-  .. autoclass:: neo.io.ElphyIO
+.. autoclass:: neo.io.ElphyIO
 
 .. autoclass:: neo.io.IgorIO
 
+.. autoclass:: neo.io.IntanIO
+
 .. autoclass:: neo.io.KlustaKwikIO
 
 .. autoclass:: neo.io.KwikIO
@@ -59,16 +69,16 @@ Classes:
 
 .. autoclass:: neo.io.NSDFIO
 
+.. autoclass:: neo.io.OpenEphysIO
+
 .. autoclass:: neo.io.PickleIO
 
 .. autoclass:: neo.io.PlexonIO
 
-.. autoclass:: neo.io.PyNNNumpyIO
-
-.. autoclass:: neo.io.PyNNTextIO
-
 .. autoclass:: neo.io.RawBinarySignalIO
 
+.. autoclass:: neo.io.RawMCSIO
+
 .. autoclass:: neo.io.StimfitIO
 
 .. autoclass:: neo.io.TdtIO
@@ -81,37 +91,39 @@ Classes:
 
 import os.path
 
-#try to import the neuroshare library.
-#if it is present, use the neuroshareapiio to load neuroshare files
-#if it is not present, use the neurosharectypesio to load files
+# try to import the neuroshare library.
+# if it is present, use the neuroshareapiio to load neuroshare files
+# if it is not present, use the neurosharectypesio to load files
 try:
     import neuroshare as ns
 except ImportError as err:
     from neo.io.neurosharectypesio import NeurosharectypesIO as NeuroshareIO
-    #print("\n neuroshare library not found, loading data with ctypes" )
-    #print("\n to use the API be sure to install the library found at:")
-    #print("\n www.http://pythonhosted.org/neuroshare/")
+    # print("\n neuroshare library not found, loading data with ctypes" )
+    # print("\n to use the API be sure to install the library found at:")
+    # print("\n www.http://pythonhosted.org/neuroshare/")
 
 else:
     from neo.io.neuroshareapiio import NeuroshareapiIO as NeuroshareIO
-    #print("neuroshare library successfully imported")
-    #print("\n loading with API...")
-
-
+    # print("neuroshare library successfully imported")
+    # print("\n loading with API...")
 
 from neo.io.alphaomegaio import AlphaOmegaIO
 from neo.io.asciisignalio import AsciiSignalIO
 from neo.io.asciispiketrainio import AsciiSpikeTrainIO
 from neo.io.axonio import AxonIO
+from neo.io.axographio import AxographIO
 from neo.io.blackrockio import BlackrockIO
+from neo.io.blackrockio_v4 import BlackrockIO as OldBlackrockIO
+from neo.io.bci2000io import BCI2000IO
 from neo.io.brainvisionio import BrainVisionIO
 from neo.io.brainwaredamio import BrainwareDamIO
 from neo.io.brainwaref32io import BrainwareF32IO
 from neo.io.brainwaresrcio import BrainwareSrcIO
 from neo.io.elanio import ElanIO
-#from neo.io.elphyio import ElphyIO
+# from neo.io.elphyio import ElphyIO
 from neo.io.exampleio import ExampleIO
 from neo.io.igorproio import IgorIO
+from neo.io.intanio import IntanIO
 from neo.io.klustakwikio import KlustaKwikIO
 from neo.io.kwikio import KwikIO
 from neo.io.micromedio import MicromedIO
@@ -119,58 +131,62 @@ from neo.io.hdf5io import NeoHdf5IO
 from neo.io.neomatlabio import NeoMatlabIO
 from neo.io.nestio import NestIO
 from neo.io.neuralynxio import NeuralynxIO
+from neo.io.neuralynxio_v1 import NeuralynxIO as OldNeuralynxIO
 from neo.io.neuroexplorerio import NeuroExplorerIO
 from neo.io.neuroscopeio import NeuroScopeIO
 from neo.io.nixio import NixIO
+from neo.io.nixio_fr import NixIO as NixIOFr
 from neo.io.nsdfio import NSDFIO
+from neo.io.openephysio import OpenEphysIO
 from neo.io.pickleio import PickleIO
 from neo.io.plexonio import PlexonIO
-from neo.io.pynnio import PyNNNumpyIO
-from neo.io.pynnio import PyNNTextIO
 from neo.io.rawbinarysignalio import RawBinarySignalIO
+from neo.io.rawmcsio import RawMCSIO
 from neo.io.spike2io import Spike2IO
 from neo.io.stimfitio import StimfitIO
 from neo.io.tdtio import TdtIO
 from neo.io.winedrio import WinEdrIO
 from neo.io.winwcpio import WinWcpIO
 
-
 iolist = [
-          AlphaOmegaIO,
-          AsciiSignalIO,
-          AsciiSpikeTrainIO,
-          AxonIO,
-          BlackrockIO,
-          BrainVisionIO,
-          BrainwareDamIO,
-          BrainwareF32IO,
-          BrainwareSrcIO,
-          ElanIO,
-          #ElphyIO,
-          ExampleIO,
-          IgorIO,
-          KlustaKwikIO,
-          KwikIO,
-          MicromedIO,
-          NixIO,  # place NixIO before NeoHdf5IO to make it the default for .h5 files
-          NeoHdf5IO,
-          NeoMatlabIO,
-          NestIO,
-          NeuralynxIO,
-          NeuroExplorerIO,
-          NeuroScopeIO,
-          NeuroshareIO,
-          NSDFIO,
-          PickleIO,
-          PlexonIO,
-          PyNNNumpyIO,
-          PyNNTextIO,
-          RawBinarySignalIO,
-          Spike2IO,
-          StimfitIO,
-          TdtIO,
-          WinEdrIO,
-          WinWcpIO
+    AlphaOmegaIO,
+    AsciiSignalIO,
+    AsciiSpikeTrainIO,
+    AxonIO,
+    AxographIO,
+    BCI2000IO,
+    BlackrockIO,
+    BrainVisionIO,
+    BrainwareDamIO,
+    BrainwareF32IO,
+    BrainwareSrcIO,
+    ElanIO,
+    # ElphyIO,
+    ExampleIO,
+    IgorIO,
+    IntanIO,
+    KlustaKwikIO,
+    KwikIO,
+    MicromedIO,
+    NixIO,  # place NixIO before NeoHdf5IO to make it the default for .h5 files
+    NeoHdf5IO,
+    NeoMatlabIO,
+    NestIO,
+    NeuralynxIO,
+    NeuroExplorerIO,
+    NeuroScopeIO,
+    NeuroshareIO,
+    NSDFIO,
+    OpenEphysIO,
+    PickleIO,
+    PlexonIO,
+    RawBinarySignalIO,
+    RawMCSIO,
+    Spike2IO,
+    StimfitIO,
+    TdtIO,
+    WinEdrIO,
+    WinWcpIO
 ]
 
 

+ 367 - 399
code/python-neo/neo/io/alphaomegaio.py

@@ -76,6 +76,7 @@ try:
     file
 except NameError:
     import io
+
     file = io.BufferedReader
 
 # note neo.core need only numpy and quantities
@@ -98,41 +99,40 @@ class AlphaOmegaIO(BaseIO):
     Usage:
         >>> from neo import io
         >>> r = io.AlphaOmegaIO( filename = 'File_AlphaOmega_1.map')
-        >>> blck = r.read_block(lazy = False, cascade = True)
+        >>> blck = r.read_block()
         >>> print blck.segments[0].analogsignals
 
     """
 
-    is_readable        = True  # This is a reading only class
-    is_writable        = False # writing is not supported
+    is_readable = True  # This is a reading only class
+    is_writable = False  # writing is not supported
 
     # This class is able to directly or indirectly read the following kind of
     # objects
-    supported_objects  = [ Block, Segment , AnalogSignal]
+    supported_objects = [Block, Segment, AnalogSignal]
     # TODO: Add support for other objects that should be extractable from .map
     # files (Event, Epoch?, Epoch Array?, SpikeTrain?)
 
     # This class can only return a Block
-    readable_objects   = [ Block ]
+    readable_objects = [Block]
     # TODO : create readers for different type of objects (Segment,
     # AnalogSignal,...)
 
     # This class is not able to write objects
-    writeable_objects  = [ ]
+    writeable_objects = []
 
     # This is for GUI stuff : a definition for parameters when reading.
-    read_params        = { Block : [ ] }
+    read_params = {Block: []}
 
     # Writing is not supported, so no GUI stuff
-    write_params       = None
+    write_params = None
 
-    name               = 'AlphaOmega'
-    extensions         = [ 'map' ]
+    name = 'AlphaOmega'
+    extensions = ['map']
 
     mode = 'file'
 
-
-    def __init__(self , filename = None) :
+    def __init__(self, filename=None):
         """
 
         Arguments:
@@ -144,14 +144,12 @@ class AlphaOmegaIO(BaseIO):
 
     # write is not supported so I do not overload write method from BaseIO
 
-    def read_block(self,
-                   # the 2 first keyword arguments are imposed by neo.io API
-                   lazy = False,
-                   cascade = True):
+    def read_block(self, lazy=False):
         """
         Return a Block.
 
         """
+        assert not lazy, 'Do not support lazy'
 
         def count_samples(m_length):
             """
@@ -161,7 +159,7 @@ class AlphaOmegaIO(BaseIO):
             """
 
             # for information about type 5 data block, see [1]
-            count = int((m_length-6)/2-2)
+            count = int((m_length - 6) / 2 - 2)
             # -6 corresponds to the header of block 5, and the -2 take into
             # account the fact that last 2 values are not available as the 4
             # corresponding bytes are coding the time stamp of the beginning
@@ -169,7 +167,7 @@ class AlphaOmegaIO(BaseIO):
             return count
 
         # create the neo Block that will be returned at the end
-        blck = Block(file_origin = os.path.basename(self.filename))
+        blck = Block(file_origin=os.path.basename(self.filename))
         blck.file_origin = os.path.basename(self.filename)
 
         fid = open(self.filename, 'rb')
@@ -181,192 +179,163 @@ class AlphaOmegaIO(BaseIO):
         # step 1: read the headers of all the data blocks to load the file
         # structure
 
-        pos_block = 0 # position of the current block in the file
-        file_blocks = [] # list of data blocks available in the file
+        pos_block = 0  # position of the current block in the file
+        file_blocks = []  # list of data blocks available in the file
 
-        if not cascade:
-            # we read only the main header
+        seg = Segment(file_origin=os.path.basename(self.filename))
+        seg.file_origin = os.path.basename(self.filename)
+        blck.segments.append(seg)
+
+        while True:
+            first_4_bytes = fid.read(4)
+            if len(first_4_bytes) < 4:
+                # we have reached the end of the file
+                break
+            else:
+                m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)
 
-            m_length, m_TypeBlock = struct.unpack('Hcx' , fid.read(4))
-            # m_TypeBlock should be 'h', as we read the first block
             block = HeaderReader(fid,
                                  dict_header_type.get(m_TypeBlock,
                                                       Type_Unknown)).read_f()
             block.update({'m_length': m_length,
                           'm_TypeBlock': m_TypeBlock,
                           'pos': pos_block})
-            file_blocks.append(block)
 
-        else: # cascade == True
-
-            seg = Segment(file_origin = os.path.basename(self.filename))
-            seg.file_origin = os.path.basename(self.filename)
-            blck.segments.append(seg)
-
-            while True:
-                first_4_bytes = fid.read(4)
-                if len(first_4_bytes) < 4:
-                    # we have reached the end of the file
-                    break
-                else:
-                    m_length, m_TypeBlock = struct.unpack('Hcx', first_4_bytes)
-
-                block = HeaderReader(fid,
-                                dict_header_type.get(m_TypeBlock,
-                                                     Type_Unknown)).read_f()
-                block.update({'m_length': m_length,
-                              'm_TypeBlock': m_TypeBlock,
-                              'pos': pos_block})
-
-                if m_TypeBlock == '2':
-                    # The beginning of the block of type '2' is identical for
-                    # all types of channels, but the following part depends on
-                    # the type of channel. So we need a special case here.
-
-                    # WARNING: How to check the type of channel is not
-                    # described in the documentation. So here I use what is
-                    # proposed in the C code [2].
-                    # According to this C code, it seems that the 'm_isAnalog'
-                    # is used to distinguished analog and digital channels, and
-                    # 'm_Mode' encodes the type of analog channel:
-                    # 0 for continuous, 1 for level, 2 for external trigger.
-                    # But in some files, I found channels that seemed to be
-                    # continuous channels with 'm_Modes' = 128 or 192. So I
-                    # decided to consider every channel with 'm_Modes'
-                    # different from 1 or 2 as continuous. I also couldn't
-                    # check that values of 1 and 2 are really for level and
-                    # external trigger as I had no test files containing data
-                    # of this types.
-
-                    type_subblock = 'unknown_channel_type(m_Mode=' \
-                                    + str(block['m_Mode'])+ ')'
-                    description = Type2_SubBlockUnknownChannels
-                    block.update({'m_Name': 'unknown_name'})
-                    if block['m_isAnalog'] == 0:
-                        # digital channel
-                        type_subblock = 'digital'
-                        description = Type2_SubBlockDigitalChannels
-                    elif block['m_isAnalog'] == 1:
-                        # analog channel
-                        if block['m_Mode'] == 1:
-                            # level channel
-                            type_subblock = 'level'
-                            description = Type2_SubBlockLevelChannels
-                        elif block['m_Mode'] == 2:
-                            # external trigger channel
-                            type_subblock = 'external_trigger'
-                            description = Type2_SubBlockExtTriggerChannels
-                        else:
-                            # continuous channel
-                            type_subblock = 'continuous(Mode' \
-                                            + str(block['m_Mode']) +')'
-                            description = Type2_SubBlockContinuousChannels
-
-                    subblock = HeaderReader(fid, description).read_f()
-
-                    block.update(subblock)
-                    block.update({'type_subblock': type_subblock})
-
-                file_blocks.append(block)
-                pos_block += m_length
-                fid.seek(pos_block)
-
-            # step 2: find the available channels
-            list_chan = [] # list containing indexes of channel blocks
+            if m_TypeBlock == '2':
+                # The beginning of the block of type '2' is identical for
+                # all types of channels, but the following part depends on
+                # the type of channel. So we need a special case here.
+
+                # WARNING: How to check the type of channel is not
+                # described in the documentation. So here I use what is
+                # proposed in the C code [2].
+                # According to this C code, it seems that the 'm_isAnalog'
+                # is used to distinguished analog and digital channels, and
+                # 'm_Mode' encodes the type of analog channel:
+                # 0 for continuous, 1 for level, 2 for external trigger.
+                # But in some files, I found channels that seemed to be
+                # continuous channels with 'm_Modes' = 128 or 192. So I
+                # decided to consider every channel with 'm_Modes'
+                # different from 1 or 2 as continuous. I also couldn't
+                # check that values of 1 and 2 are really for level and
+                # external trigger as I had no test files containing data
+                # of this types.
+
+                type_subblock = 'unknown_channel_type(m_Mode=' \
+                                + str(block['m_Mode']) + ')'
+                description = Type2_SubBlockUnknownChannels
+                block.update({'m_Name': 'unknown_name'})
+                if block['m_isAnalog'] == 0:
+                    # digital channel
+                    type_subblock = 'digital'
+                    description = Type2_SubBlockDigitalChannels
+                elif block['m_isAnalog'] == 1:
+                    # analog channel
+                    if block['m_Mode'] == 1:
+                        # level channel
+                        type_subblock = 'level'
+                        description = Type2_SubBlockLevelChannels
+                    elif block['m_Mode'] == 2:
+                        # external trigger channel
+                        type_subblock = 'external_trigger'
+                        description = Type2_SubBlockExtTriggerChannels
+                    else:
+                        # continuous channel
+                        type_subblock = 'continuous(Mode' \
+                                        + str(block['m_Mode']) + ')'
+                        description = Type2_SubBlockContinuousChannels
+
+                subblock = HeaderReader(fid, description).read_f()
+
+                block.update(subblock)
+                block.update({'type_subblock': type_subblock})
+
+            file_blocks.append(block)
+            pos_block += m_length
+            fid.seek(pos_block)
+
+        # step 2: find the available channels
+        list_chan = []  # list containing indexes of channel blocks
+        for ind_block, block in enumerate(file_blocks):
+            if block['m_TypeBlock'] == '2':
+                list_chan.append(ind_block)
+
+        # step 3: find blocks containing data for the available channels
+        list_data = []  # list of lists of indexes of data blocks
+        # corresponding to each channel
+        for ind_chan, chan in enumerate(list_chan):
+            list_data.append([])
+            num_chan = file_blocks[chan]['m_numChannel']
             for ind_block, block in enumerate(file_blocks):
-                if block['m_TypeBlock'] == '2':
-                    list_chan.append(ind_block)
-
-            # step 3: find blocks containing data for the available channels
-            list_data = [] # list of lists of indexes of data blocks
-                           # corresponding to each channel
-            for ind_chan, chan in enumerate(list_chan):
-                list_data.append([])
-                num_chan = file_blocks[chan]['m_numChannel']
-                for ind_block, block in enumerate(file_blocks):
-                    if block['m_TypeBlock'] == '5':
-                        if block['m_numChannel'] == num_chan:
-                            list_data[ind_chan].append(ind_block)
-
-
-            # step 4: compute the length (number of samples) of the channels
-            chan_len = np.zeros(len(list_data), dtype = np.int)
-            for ind_chan, list_blocks in enumerate(list_data):
-                for ind_block in list_blocks:
-                    chan_len[ind_chan] += count_samples(
-                                          file_blocks[ind_block]['m_length'])
-
-            # step 5: find channels for which data are available
-            ind_valid_chan = np.nonzero(chan_len)[0]
-
-            # step 6: load the data
-            # TODO give the possibility to load data as AnalogSignalArrays
-            for ind_chan in ind_valid_chan:
-                list_blocks = list_data[ind_chan]
-                ind = 0 # index in the data vector
-
-                # read time stamp for the beginning of the signal
-                form = '<l' # reading format
-                ind_block = list_blocks[0]
-                count = count_samples(file_blocks[ind_block]['m_length'])
-                fid.seek(file_blocks[ind_block]['pos']+6+count*2)
-                buf = fid.read(struct.calcsize(form))
-                val = struct.unpack(form , buf)
-                start_index = val[0]
-
-                # WARNING: in the following blocks are read supposing taht they
-                # are all contiguous and sorted in time. I don't know if it's
-                # always the case. Maybe we should use the time stamp of each
-                # data block to choose where to put the read data in the array.
-                if not lazy:
-                    temp_array = np.empty(chan_len[ind_chan], dtype = np.int16)
-                    # NOTE: we could directly create an empty AnalogSignal and
-                    # load the data in it, but it is much faster to load data
-                    # in a temporary numpy array and create the AnalogSignals
-                    # from this temporary array
-                    for ind_block in list_blocks:
-                        count = count_samples(
-                                file_blocks[ind_block]['m_length'])
-                        fid.seek(file_blocks[ind_block]['pos']+6)
-                        temp_array[ind:ind+count] = \
-                            np.fromfile(fid, dtype = np.int16, count = count)
-                        ind += count
-
-                sampling_rate = \
-                    file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
-                t_start = (start_index / sampling_rate).simplified
-                if lazy:
-                    ana_sig = AnalogSignal([],
-                                           sampling_rate = sampling_rate,
-                                           t_start = t_start,
-                                           name = file_blocks\
-                                               [list_chan[ind_chan]]['m_Name'],
-                                           file_origin = \
-                                               os.path.basename(self.filename),
-                                           units = pq.dimensionless)
-                    ana_sig.lazy_shape = chan_len[ind_chan]
-                else:
-                    ana_sig = AnalogSignal(temp_array,
-                                           sampling_rate = sampling_rate,
-                                           t_start = t_start,
-                                           name = file_blocks\
-                                               [list_chan[ind_chan]]['m_Name'],
-                                           file_origin = \
-                                               os.path.basename(self.filename),
-                                           units = pq.dimensionless)
-# todo apibreak: create ChannelIndex for each signals
-#                ana_sig.channel_index = \
-#                            file_blocks[list_chan[ind_chan]]['m_numChannel']
-                ana_sig.annotate(channel_name = \
-                            file_blocks[list_chan[ind_chan]]['m_Name'])
-                ana_sig.annotate(channel_type = \
-                            file_blocks[list_chan[ind_chan]]['type_subblock'])
-                seg.analogsignals.append(ana_sig)
+                if block['m_TypeBlock'] == '5':
+                    if block['m_numChannel'] == num_chan:
+                        list_data[ind_chan].append(ind_block)
+
+        # step 4: compute the length (number of samples) of the channels
+        chan_len = np.zeros(len(list_data), dtype=np.int)
+        for ind_chan, list_blocks in enumerate(list_data):
+            for ind_block in list_blocks:
+                chan_len[ind_chan] += count_samples(
+                    file_blocks[ind_block]['m_length'])
+
+        # step 5: find channels for which data are available
+        ind_valid_chan = np.nonzero(chan_len)[0]
+
+        # step 6: load the data
+        # TODO give the possibility to load data as AnalogSignalArrays
+        for ind_chan in ind_valid_chan:
+            list_blocks = list_data[ind_chan]
+            ind = 0  # index in the data vector
+
+            # read time stamp for the beginning of the signal
+            form = '<l'  # reading format
+            ind_block = list_blocks[0]
+            count = count_samples(file_blocks[ind_block]['m_length'])
+            fid.seek(file_blocks[ind_block]['pos'] + 6 + count * 2)
+            buf = fid.read(struct.calcsize(form))
+            val = struct.unpack(form, buf)
+            start_index = val[0]
+
+            # WARNING: in the following blocks are read supposing taht they
+            # are all contiguous and sorted in time. I don't know if it's
+            # always the case. Maybe we should use the time stamp of each
+            # data block to choose where to put the read data in the array.
+
+            temp_array = np.empty(chan_len[ind_chan], dtype=np.int16)
+            # NOTE: we could directly create an empty AnalogSignal and
+            # load the data in it, but it is much faster to load data
+            # in a temporary numpy array and create the AnalogSignals
+            # from this temporary array
+            for ind_block in list_blocks:
+                count = count_samples(
+                    file_blocks[ind_block]['m_length'])
+                fid.seek(file_blocks[ind_block]['pos'] + 6)
+                temp_array[ind:ind + count] = \
+                    np.fromfile(fid, dtype=np.int16, count=count)
+                ind += count
+
+            sampling_rate = \
+                file_blocks[list_chan[ind_chan]]['m_SampleRate'] * pq.kHz
+            t_start = (start_index / sampling_rate).simplified
+
+            ana_sig = AnalogSignal(temp_array,
+                                   sampling_rate=sampling_rate,
+                                   t_start=t_start,
+                                   name=file_blocks
+                                   [list_chan[ind_chan]]['m_Name'],
+                                   file_origin=os.path.basename(self.filename),
+                                   units=pq.dimensionless)
+            # todo apibreak: create ChannelIndex for each signals
+            #                ana_sig.channel_index = \
+            #                            file_blocks[list_chan[ind_chan]]['m_numChannel']
+            ana_sig.annotate(channel_name=file_blocks[list_chan[ind_chan]]['m_Name'])
+            ana_sig.annotate(channel_type=file_blocks[list_chan[ind_chan]]['type_subblock'])
+            seg.analogsignals.append(ana_sig)
 
         fid.close()
 
-        if file_blocks[0]['m_TypeBlock'] == 'h': # this should always be true
-            blck.rec_datetime = datetime.datetime(\
+        if file_blocks[0]['m_TypeBlock'] == 'h':  # this should always be true
+            blck.rec_datetime = datetime.datetime(
                 file_blocks[0]['m_date_year'],
                 file_blocks[0]['m_date_month'],
                 file_blocks[0]['m_date_day'],
@@ -374,21 +343,20 @@ class AlphaOmegaIO(BaseIO):
                 file_blocks[0]['m_time_minute'],
                 file_blocks[0]['m_time_second'],
                 10000 * file_blocks[0]['m_time_hsecond'])
-                # the 10000 is here to convert m_time_hsecond from centisecond
-                # to microsecond
+            # the 10000 is here to convert m_time_hsecond from centisecond
+            # to microsecond
             version = file_blocks[0]['m_version']
-            blck.annotate(alphamap_version = version)
-            if cascade:
-                seg.rec_datetime = blck.rec_datetime.replace()
-                # I couldn't find a simple copy function for datetime,
-                # using replace without arguments is a twisted way to make a
-                # copy
-                seg.annotate(alphamap_version = version)
-        if cascade:
-            blck.create_many_to_one_relationship()
+            blck.annotate(alphamap_version=version)
 
-        return blck
+            seg.rec_datetime = blck.rec_datetime.replace()
+            # I couldn't find a simple copy function for datetime,
+            # using replace without arguments is a twisted way to make a
+            # copy
+            seg.annotate(alphamap_version=version)
 
+        blck.create_many_to_one_relationship()
+
+        return blck
 
 
 """
@@ -425,7 +393,7 @@ typedef struct
 
 """
 
-max_string_len = '32s' # maximal length of variable length strings in the file
+max_string_len = '32s'  # maximal length of variable length strings in the file
 # WARNING: I don't know what is the real value here. According to [1] p 139
 # it seems that it could be 20. Some tests would be needed to check this.
 
@@ -443,8 +411,8 @@ max_string_len = '32s' # maximal length of variable length strings in the file
 # possible the names in document [1]
 
 TypeH_Header = [
-    ('m_nextBlock','l'),
-    ('m_version','h'),
+    ('m_nextBlock', 'l'),
+    ('m_version', 'h'),
     ('m_time_hour', 'B'),
     ('m_time_minute', 'B'),
     ('m_time_second', 'B'),
@@ -453,95 +421,95 @@ TypeH_Header = [
     ('m_date_month', 'B'),
     ('m_date_year', 'H'),
     ('m_date_dayofweek', 'B'),
-    ('blank', 'x'), # one byte blank because of the 2 bytes alignement
-    ('m_MinimumTime','d'),
-    ('m_MaximumTime','d')]
+    ('blank', 'x'),  # one byte blank because of the 2 bytes alignement
+    ('m_MinimumTime', 'd'),
+    ('m_MaximumTime', 'd')]
 
 Type0_SetBoards = [
-    ('m_nextBlock','l'),
-    ('m_BoardCount','h'),
-    ('m_GroupCount','h'),
-    ('m_placeMainWindow','x')] # WARNING: unknown type ('x' is wrong)
-
-Type1_Boards = [ # WARNING: needs to be checked
-    ('m_nextBlock','l'),
-    ('m_Number','h'),
-    ('m_countChannel','h'),
-    ('m_countAnIn','h'),
-    ('m_countAnOut','h'),
-    ('m_countDigIn','h'),
-    ('m_countDigOut','h'),
-    ('m_TrigCount', 'h'), # not defined in 5.3.3 but appears in 5.5.1 and
-                          # seems to really exist in files
+    ('m_nextBlock', 'l'),
+    ('m_BoardCount', 'h'),
+    ('m_GroupCount', 'h'),
+    ('m_placeMainWindow', 'x')]  # WARNING: unknown type ('x' is wrong)
+
+Type1_Boards = [  # WARNING: needs to be checked
+    ('m_nextBlock', 'l'),
+    ('m_Number', 'h'),
+    ('m_countChannel', 'h'),
+    ('m_countAnIn', 'h'),
+    ('m_countAnOut', 'h'),
+    ('m_countDigIn', 'h'),
+    ('m_countDigOut', 'h'),
+    ('m_TrigCount', 'h'),  # not defined in 5.3.3 but appears in 5.5.1 and
+    # seems to really exist in files
     # WARNING: check why 'm_TrigCount is not in the C code [2]
-    ('m_Amplitude','f'),
-    ('m_cSampleRate','f'), # sample rate seems to be given in kHz
-    ('m_Duration','f'),
-    ('m_nPreTrigmSec','f'),
-    ('m_nPostTrigmSec','f'),
-    ('m_TrgMode','h'),
-    ('m_LevelValue','h'), # after this line, 5.3.3 is wrong,
-                          # check example in 5.5.1 for the right fields
+    ('m_Amplitude', 'f'),
+    ('m_cSampleRate', 'f'),  # sample rate seems to be given in kHz
+    ('m_Duration', 'f'),
+    ('m_nPreTrigmSec', 'f'),
+    ('m_nPostTrigmSec', 'f'),
+    ('m_TrgMode', 'h'),
+    ('m_LevelValue', 'h'),  # after this line, 5.3.3 is wrong,
+    # check example in 5.5.1 for the right fields
     # WARNING: check why the following part is not corrected in the C code [2]
-    ('m_nSamples','h'),
-    ('m_fRMS','f'),
-    ('m_ScaleFactor','f'),
-    ('m_DapTime','f'),
+    ('m_nSamples', 'h'),
+    ('m_fRMS', 'f'),
+    ('m_ScaleFactor', 'f'),
+    ('m_DapTime', 'f'),
     ('m_nameBoard', max_string_len)]
-    #('m_DiscMaxValue','h'), # WARNING: should this exist?
-    #('m_DiscMinValue','h') # WARNING: should this exist?
+# ('m_DiscMaxValue','h'), # WARNING: should this exist?
+# ('m_DiscMinValue','h') # WARNING: should this exist?
 
 Type2_DefBlocksChannels = [
     # common parameters for all types of channels
-    ('m_nextBlock','l'),
-    ('m_isAnalog','h'),
-    ('m_isInput','h'),
-    ('m_numChannel','h'),
-    ('m_numColor','h'),
-    ('m_Mode','h')]
+    ('m_nextBlock', 'l'),
+    ('m_isAnalog', 'h'),
+    ('m_isInput', 'h'),
+    ('m_numChannel', 'h'),
+    ('m_numColor', 'h'),
+    ('m_Mode', 'h')]
 
 Type2_SubBlockContinuousChannels = [
     # continuous channels parameters
-    ('blank', '2x'), # WARNING: this is not in the specs but it seems needed
-    ('m_Amplitude','f'),
-    ('m_SampleRate','f'),
-    ('m_ContBlkSize','h'),
-    ('m_ModeSpike','h'), # WARNING: the C code [2] uses usigned short here
-    ('m_Duration','f'),
-    ('m_bAutoScale','h'),
+    ('blank', '2x'),  # WARNING: this is not in the specs but it seems needed
+    ('m_Amplitude', 'f'),
+    ('m_SampleRate', 'f'),
+    ('m_ContBlkSize', 'h'),
+    ('m_ModeSpike', 'h'),  # WARNING: the C code [2] uses usigned short here
+    ('m_Duration', 'f'),
+    ('m_bAutoScale', 'h'),
     ('m_Name', max_string_len)]
 
-Type2_SubBlockLevelChannels = [ # WARNING: untested
+Type2_SubBlockLevelChannels = [  # WARNING: untested
     # level channels parameters
-    ('m_Amplitude','f'),
-    ('m_SampleRate','f'),
-    ('m_nSpikeCount','h'),
-    ('m_ModeSpike','h'),
-    ('m_nPreTrigmSec','f'),
-    ('m_nPostTrigmSec','f'),
-    ('m_LevelValue','h'),
-    ('m_TrgMode','h'),
-    ('m_YesRms','h'),
-    ('m_bAutoScale','h'),
+    ('m_Amplitude', 'f'),
+    ('m_SampleRate', 'f'),
+    ('m_nSpikeCount', 'h'),
+    ('m_ModeSpike', 'h'),
+    ('m_nPreTrigmSec', 'f'),
+    ('m_nPostTrigmSec', 'f'),
+    ('m_LevelValue', 'h'),
+    ('m_TrgMode', 'h'),
+    ('m_YesRms', 'h'),
+    ('m_bAutoScale', 'h'),
     ('m_Name', max_string_len)]
 
-Type2_SubBlockExtTriggerChannels = [ # WARNING: untested
+Type2_SubBlockExtTriggerChannels = [  # WARNING: untested
     # external trigger channels parameters
-    ('m_Amplitude','f'),
-    ('m_SampleRate','f'),
-    ('m_nSpikeCount','h'),
-    ('m_ModeSpike','h'),
-    ('m_nPreTrigmSec','f'),
-    ('m_nPostTrigmSec','f'),
-    ('m_TriggerNumber','h'),
+    ('m_Amplitude', 'f'),
+    ('m_SampleRate', 'f'),
+    ('m_nSpikeCount', 'h'),
+    ('m_ModeSpike', 'h'),
+    ('m_nPreTrigmSec', 'f'),
+    ('m_nPostTrigmSec', 'f'),
+    ('m_TriggerNumber', 'h'),
     ('m_Name', max_string_len)]
 
 Type2_SubBlockDigitalChannels = [
     # digital channels parameters
-    ('m_SampleRate','f'),
-    ('m_SaveTrigger','h'),
-    ('m_Duration','f'),
-    ('m_PreviousStatus','h'), # WARNING: check difference with C code here
+    ('m_SampleRate', 'f'),
+    ('m_SaveTrigger', 'h'),
+    ('m_Duration', 'f'),
+    ('m_PreviousStatus', 'h'),  # WARNING: check difference with C code here
     ('m_Name', max_string_len)]
 
 Type2_SubBlockUnknownChannels = [
@@ -550,147 +518,149 @@ Type2_SubBlockUnknownChannels = [
     # It seems that for non-digital channels the beginning is
     # similar to continuous channels. Let's hope we're right...
     ('blank', '2x'),
-    ('m_Amplitude','f'),
-    ('m_SampleRate','f')]
-    # there are probably other fields after...
-
-Type6_DefBlockTrigger = [ # WARNING: untested
-    ('m_nextBlock','l'),
-    ('m_Number','h'),
-    ('m_countChannel','h'),
-    ('m_StateChannels','i'),
-    ('m_numChannel1','h'),
-    ('m_numChannel2','h'),
-    ('m_numChannel3','h'),
-    ('m_numChannel4','h'),
-    ('m_numChannel5','h'),
-    ('m_numChannel6','h'),
-    ('m_numChannel7','h'),
-    ('m_numChannel8','h'),
-    ('m_Name','c')]
-
-Type3_DefBlockGroup = [ # WARNING: untested
-    ('m_nextBlock','l'),
-    ('m_Number','h'),
-    ('m_Z_Order','h'),
-    ('m_countSubGroups','h'),
-    ('m_placeGroupWindow','x'), # WARNING: unknown type ('x' is wrong)
-    ('m_NetLoc','h'),
-    ('m_locatMax','x'), # WARNING: unknown type ('x' is wrong)
-    ('m_nameGroup','c')]
-
-Type4_DefBlockSubgroup = [ # WARNING: untested
-    ('m_nextBlock','l'),
-    ('m_Number','h'),
-    ('m_TypeOverlap','h'),
-    ('m_Z_Order','h'),
-    ('m_countChannel','h'),
-    ('m_NetLoc','h'),
-    ('m_location','x'), # WARNING: unknown type ('x' is wrong)
-    ('m_bIsMaximized','h'),
-    ('m_numChannel1','h'),
-    ('m_numChannel2','h'),
-    ('m_numChannel3','h'),
-    ('m_numChannel4','h'),
-    ('m_numChannel5','h'),
-    ('m_numChannel6','h'),
-    ('m_numChannel7','h'),
-    ('m_numChannel8','h'),
-    ('m_Name','c')]
+    ('m_Amplitude', 'f'),
+    ('m_SampleRate', 'f')]
+# there are probably other fields after...
+
+Type6_DefBlockTrigger = [  # WARNING: untested
+    ('m_nextBlock', 'l'),
+    ('m_Number', 'h'),
+    ('m_countChannel', 'h'),
+    ('m_StateChannels', 'i'),
+    ('m_numChannel1', 'h'),
+    ('m_numChannel2', 'h'),
+    ('m_numChannel3', 'h'),
+    ('m_numChannel4', 'h'),
+    ('m_numChannel5', 'h'),
+    ('m_numChannel6', 'h'),
+    ('m_numChannel7', 'h'),
+    ('m_numChannel8', 'h'),
+    ('m_Name', 'c')]
+
+Type3_DefBlockGroup = [  # WARNING: untested
+    ('m_nextBlock', 'l'),
+    ('m_Number', 'h'),
+    ('m_Z_Order', 'h'),
+    ('m_countSubGroups', 'h'),
+    ('m_placeGroupWindow', 'x'),  # WARNING: unknown type ('x' is wrong)
+    ('m_NetLoc', 'h'),
+    ('m_locatMax', 'x'),  # WARNING: unknown type ('x' is wrong)
+    ('m_nameGroup', 'c')]
+
+Type4_DefBlockSubgroup = [  # WARNING: untested
+    ('m_nextBlock', 'l'),
+    ('m_Number', 'h'),
+    ('m_TypeOverlap', 'h'),
+    ('m_Z_Order', 'h'),
+    ('m_countChannel', 'h'),
+    ('m_NetLoc', 'h'),
+    ('m_location', 'x'),  # WARNING: unknown type ('x' is wrong)
+    ('m_bIsMaximized', 'h'),
+    ('m_numChannel1', 'h'),
+    ('m_numChannel2', 'h'),
+    ('m_numChannel3', 'h'),
+    ('m_numChannel4', 'h'),
+    ('m_numChannel5', 'h'),
+    ('m_numChannel6', 'h'),
+    ('m_numChannel7', 'h'),
+    ('m_numChannel8', 'h'),
+    ('m_Name', 'c')]
 
 Type5_DataBlockOneChannel = [
-    ('m_numChannel','h')]
-    # WARNING: 'm_numChannel' (called 'm_Number' in 5.4.1 of [1]) is supposed
-    # to be uint according to 5.4.1 but it seems to be a short in the files
-    # (or should it be ushort ?)
+    ('m_numChannel', 'h')]
+# WARNING: 'm_numChannel' (called 'm_Number' in 5.4.1 of [1]) is supposed
+# to be uint according to 5.4.1 but it seems to be a short in the files
+# (or should it be ushort ?)
 
 # WARNING: In 5.1.1 page 121 of [1], they say "Note: 5 is used for demo
 # purposes, 7 is used for real data", but looking at some real datafiles,
 # it seems that block of type 5 are also used for real data...
 
-Type7_DataBlockMultipleChannels = [ # WARNING: unfinished
-    ('m_lenHead', 'h'), # WARNING: unknown true type
-    ('FINT','h')]
-    # WARNING: there should be data after...
-
-TypeP_DefBlockPeriStimHist = [ # WARNING: untested
-    ('m_Number_Chan','h'),
-    ('m_Position','x'), # WARNING: unknown type ('x' is wrong)
-    ('m_isStatVisible','h'),
-    ('m_DurationSec','f'),
-    ('m_Rows','i'),
-    ('m_DurationSecPre','f'),
-    ('m_Bins','i'),
-    ('m_NoTrigger','h')]
-
-TypeF_DefBlockFRTachogram = [ # WARNING: untested
-    ('m_Number_Chan','h'),
-    ('m_Position','x'), # WARNING: unknown type ('x' is wrong)
-    ('m_isStatVisible','h'),
-    ('m_DurationSec','f'),
-    ('m_AutoManualScale','i'),
-    ('m_Max','i')]
-
-TypeR_DefBlockRaster = [ # WARNING: untested
-    ('m_Number_Chan','h'),
-    ('m_Position','x'), # WARNING: unknown type ('x' is wrong)
-    ('m_isStatVisible','h'),
-    ('m_DurationSec','f'),
-    ('m_Rows','i'),
-    ('m_NoTrigger','h')]
-
-TypeI_DefBlockISIHist = [ # WARNING: untested
-    ('m_Number_Chan','h'),
-    ('m_Position','x'), # WARNING: unknown type ('x' is wrong)
-    ('m_isStatVisible','h'),
-    ('m_DurationSec','f'),
-    ('m_Bins','i'),
-    ('m_TypeScale','i')]
-
-Type8_MarkerBlock = [ # WARNING: untested
-    ('m_Number_Channel','h'),
-    ('m_Time','l')] # WARNING: check what's the right type here.
-    # It seems that the size of time_t type depends on the system typedef,
-    # I put long here but I couldn't check if it is the right type
-
-Type9_ScaleBlock = [ # WARNING: untested
-    ('m_Number_Channel','h'),
-    ('m_Scale','f')]
+Type7_DataBlockMultipleChannels = [  # WARNING: unfinished
+    ('m_lenHead', 'h'),  # WARNING: unknown true type
+    ('FINT', 'h')]
+# WARNING: there should be data after...
+
+TypeP_DefBlockPeriStimHist = [  # WARNING: untested
+    ('m_Number_Chan', 'h'),
+    ('m_Position', 'x'),  # WARNING: unknown type ('x' is wrong)
+    ('m_isStatVisible', 'h'),
+    ('m_DurationSec', 'f'),
+    ('m_Rows', 'i'),
+    ('m_DurationSecPre', 'f'),
+    ('m_Bins', 'i'),
+    ('m_NoTrigger', 'h')]
+
+TypeF_DefBlockFRTachogram = [  # WARNING: untested
+    ('m_Number_Chan', 'h'),
+    ('m_Position', 'x'),  # WARNING: unknown type ('x' is wrong)
+    ('m_isStatVisible', 'h'),
+    ('m_DurationSec', 'f'),
+    ('m_AutoManualScale', 'i'),
+    ('m_Max', 'i')]
+
+TypeR_DefBlockRaster = [  # WARNING: untested
+    ('m_Number_Chan', 'h'),
+    ('m_Position', 'x'),  # WARNING: unknown type ('x' is wrong)
+    ('m_isStatVisible', 'h'),
+    ('m_DurationSec', 'f'),
+    ('m_Rows', 'i'),
+    ('m_NoTrigger', 'h')]
+
+TypeI_DefBlockISIHist = [  # WARNING: untested
+    ('m_Number_Chan', 'h'),
+    ('m_Position', 'x'),  # WARNING: unknown type ('x' is wrong)
+    ('m_isStatVisible', 'h'),
+    ('m_DurationSec', 'f'),
+    ('m_Bins', 'i'),
+    ('m_TypeScale', 'i')]
+
+Type8_MarkerBlock = [  # WARNING: untested
+    ('m_Number_Channel', 'h'),
+    ('m_Time', 'l')]  # WARNING: check what's the right type here.
+# It seems that the size of time_t type depends on the system typedef,
+# I put long here but I couldn't check if it is the right type
+
+Type9_ScaleBlock = [  # WARNING: untested
+    ('m_Number_Channel', 'h'),
+    ('m_Scale', 'f')]
 
 Type_Unknown = []
 
 dict_header_type = {
-                    'h' : TypeH_Header,
-                    '0' : Type0_SetBoards,
-                    '1' : Type1_Boards,
-                    '2' : Type2_DefBlocksChannels,
-                    '6' : Type6_DefBlockTrigger,
-                    '3' : Type3_DefBlockGroup,
-                    '4' : Type4_DefBlockSubgroup,
-                    '5' : Type5_DataBlockOneChannel,
-                    '7' : Type7_DataBlockMultipleChannels,
-                    'P' : TypeP_DefBlockPeriStimHist,
-                    'F' : TypeF_DefBlockFRTachogram,
-                    'R' : TypeR_DefBlockRaster,
-                    'I' : TypeI_DefBlockISIHist,
-                    '8' : Type8_MarkerBlock,
-                    '9' : Type9_ScaleBlock
-                    }
+    'h': TypeH_Header,
+    '0': Type0_SetBoards,
+    '1': Type1_Boards,
+    '2': Type2_DefBlocksChannels,
+    '6': Type6_DefBlockTrigger,
+    '3': Type3_DefBlockGroup,
+    '4': Type4_DefBlockSubgroup,
+    '5': Type5_DataBlockOneChannel,
+    '7': Type7_DataBlockMultipleChannels,
+    'P': TypeP_DefBlockPeriStimHist,
+    'F': TypeF_DefBlockFRTachogram,
+    'R': TypeR_DefBlockRaster,
+    'I': TypeI_DefBlockISIHist,
+    '8': Type8_MarkerBlock,
+    '9': Type9_ScaleBlock
+}
 
 
 class HeaderReader():
-    def __init__(self,fid ,description ):
+    def __init__(self, fid, description):
         self.fid = fid
         self.description = description
-    def read_f(self, offset =None):
-        if offset is not None :
+
+    def read_f(self, offset=None):
+        if offset is not None:
             self.fid.seek(offset)
-        d = { }
-        for key, fmt in self.description :
-            fmt = '<' + fmt # insures use of standard sizes
+        d = {}
+        for key, fmt in self.description:
+            fmt = '<' + fmt  # insures use of standard sizes
             buf = self.fid.read(struct.calcsize(fmt))
-            if len(buf) != struct.calcsize(fmt) : return None
-            val = list(struct.unpack(fmt , buf))
+            if len(buf) != struct.calcsize(fmt):
+                return None
+            val = list(struct.unpack(fmt, buf))
             for i, ival in enumerate(val):
                 if hasattr(ival, 'split'):
                     val[i] = ival.split('\x00', 1)[0]
@@ -698,5 +668,3 @@ class HeaderReader():
                 val = val[0]
             d[key] = val
         return d
-
-

+ 85 - 94
code/python-neo/neo/io/asciisignalio.py

@@ -31,47 +31,47 @@ class AsciiSignalIO(BaseIO):
     Usage:
         >>> from neo import io
         >>> r = io.AsciiSignalIO(filename='File_asciisignal_2.txt')
-        >>> seg = r.read_segment(lazy=False, cascade=True)
+        >>> seg = r.read_segment()
         >>> print seg.analogsignals
         [<AnalogSignal(array([ 39.0625    ,   0.        ,   0.        , ..., -26.85546875 ...
 
     """
 
-    is_readable        = True
-    is_writable        = True
-
-    supported_objects  = [ Segment , AnalogSignal]
-    readable_objects   = [ Segment]
-    writeable_objects  = [Segment]
-
-    has_header         = False
-    is_streameable     = False
-
-    read_params        = {
-                            Segment : [
-                                        ('delimiter' , {'value' :  '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
-                                        ('usecols' , { 'value' : None , 'type' : int } ),
-                                        ('skiprows' , { 'value' :0 } ),
-                                        ('timecolumn' , { 'value' : None, 'type' : int } ) ,
-                                        ('unit' , { 'value' : 'V', } ),
-                                        ('sampling_rate' , { 'value' : 1000., } ),
-                                        ('t_start' , { 'value' : 0., } ),
-                                        ('method' , { 'value' : 'homemade', 'possible' : ['genfromtxt' , 'csv' , 'homemade' ] }) ,
-                                        ]
-                            }
-    write_params       = {
-                            Segment : [
-                                        ('delimiter' , {'value' :  '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
-                                        ('writetimecolumn' , { 'value' : True,  } ) ,
-                                        ]
-                            }
-
-    name               = None
-    extensions          = [ 'txt' , 'asc', ]
+    is_readable = True
+    is_writable = True
+
+    supported_objects = [Segment, AnalogSignal]
+    readable_objects = [Segment]
+    writeable_objects = [Segment]
+
+    has_header = False
+    is_streameable = False
+
+    read_params = {
+        Segment: [
+            ('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
+            ('usecols', {'value': None, 'type': int}),
+            ('skiprows', {'value': 0}),
+            ('timecolumn', {'value': None, 'type': int}),
+            ('unit', {'value': 'V', }),
+            ('sampling_rate', {'value': 1000., }),
+            ('t_start', {'value': 0., }),
+            ('method', {'value': 'homemade', 'possible': ['genfromtxt', 'csv', 'homemade']}),
+        ]
+    }
+    write_params = {
+        Segment: [
+            ('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
+            ('writetimecolumn', {'value': True, }),
+        ]
+    }
+
+    name = None
+    extensions = ['txt', 'asc', ]
 
     mode = 'file'
 
-    def __init__(self , filename = None) :
+    def __init__(self, filename=None):
         """
         This class read/write AnalogSignal in a text file.
         Each signal is a column.
@@ -84,21 +84,20 @@ class AsciiSignalIO(BaseIO):
         self.filename = filename
 
     def read_segment(self,
-                                        lazy = False,
-                                        cascade = True,
-                                        delimiter = '\t',
-                                        usecols = None,
-                                        skiprows =0,
+                     lazy=False,
+                     delimiter='\t',
+                     usecols=None,
+                     skiprows=0,
 
-                                        timecolumn = None,
-                                        sampling_rate = 1.*pq.Hz,
-                                        t_start = 0.*pq.s,
+                     timecolumn=None,
+                     sampling_rate=1. * pq.Hz,
+                     t_start=0. * pq.s,
 
-                                        unit = pq.V,
+                     unit=pq.V,
 
-                                        method = 'genfromtxt',
+                     method='genfromtxt',
 
-                                        ):
+                     ):
         """
         Arguments:
             delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
@@ -117,81 +116,75 @@ class AsciiSignalIO(BaseIO):
                         'homemade' use a intuitive more robust but slow method
 
         """
-        seg = Segment(file_origin = os.path.basename(self.filename))
-        if not cascade:
-            return seg
+        assert not lazy, 'Do not support lazy'
 
-        if type(sampling_rate) == float or type(sampling_rate)==int:
+        seg = Segment(file_origin=os.path.basename(self.filename))
+
+        if type(sampling_rate) == float or type(sampling_rate) == int:
             # if not quantitities Hz by default
-            sampling_rate = sampling_rate*pq.Hz
+            sampling_rate = sampling_rate * pq.Hz
 
-        if type(t_start) == float or type(t_start)==int:
+        if type(t_start) == float or type(t_start) == int:
             # if not quantitities s by default
-            t_start = t_start*pq.s
+            t_start = t_start * pq.s
 
         unit = pq.Quantity(1, unit)
 
-
-
-        #loadtxt
-        if method == 'genfromtxt' :
+        # loadtxt
+        if method == 'genfromtxt':
             sig = np.genfromtxt(self.filename,
-                                        delimiter = delimiter,
-                                        usecols = usecols ,
-                                        skip_header = skiprows,
-                                        dtype = 'f')
-            if len(sig.shape) ==1:
+                                delimiter=delimiter,
+                                usecols=usecols,
+                                skip_header=skiprows,
+                                dtype='f')
+            if len(sig.shape) == 1:
                 sig = sig[:, np.newaxis]
-        elif method == 'csv' :
-            tab = [l for l in  csv.reader( file(self.filename,'rU') , delimiter = delimiter ) ]
+        elif method == 'csv':
+            tab = [l for l in csv.reader(file(self.filename, 'rU'), delimiter=delimiter)]
             tab = tab[skiprows:]
-            sig = np.array( tab , dtype = 'f')
-        elif method == 'homemade' :
-            fid = open(self.filename,'rU')
+            sig = np.array(tab, dtype='f')
+        elif method == 'homemade':
+            fid = open(self.filename, 'rU')
             for l in range(skiprows):
                 fid.readline()
-            tab = [ ]
+            tab = []
             for line in fid.readlines():
-                line = line.replace('\r','')
-                line = line.replace('\n','')
+                line = line.replace('\r', '')
+                line = line.replace('\n', '')
                 l = line.split(delimiter)
-                while '' in l :
+                while '' in l:
                     l.remove('')
                 tab.append(l)
-            sig = np.array( tab , dtype = 'f')
+            sig = np.array(tab, dtype='f')
 
         if timecolumn is not None:
-            sampling_rate = 1./np.mean(np.diff(sig[:,timecolumn])) * pq.Hz
-            t_start = sig[0,timecolumn] * pq.s
-
+            sampling_rate = 1. / np.mean(np.diff(sig[:, timecolumn])) * pq.Hz
+            t_start = sig[0, timecolumn] * pq.s
 
+        for i in range(sig.shape[1]):
+            if timecolumn == i:
+                continue
+            if usecols is not None and i not in usecols:
+                continue
 
-        for i in range(sig.shape[1]) :
-            if timecolumn == i : continue
-            if usecols is not None and i not in usecols: continue
-
-            if lazy:
-                signal = [ ]*unit
-            else:
-                signal = sig[:,i]*unit
+            signal = sig[:, i] * unit
 
             anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
                                   t_start=t_start, channel_index=i,
-                                  name='Column %d'%i)
-            if lazy:
-                anaSig.lazy_shape = sig.shape
-            seg.analogsignals.append( anaSig )
+                                  name='Column %d' % i)
+
+            seg.analogsignals.append(anaSig)
 
         seg.create_many_to_one_relationship()
         return seg
 
     def write_segment(self, segment,
-                                delimiter = '\t',
+                      delimiter='\t',
 
-                                skiprows =0,
-                                writetimecolumn = True,
+                      skiprows=0,
+                      writetimecolumn=True,
 
-                                ):
+                      ):
         """
         Write a segment and AnalogSignal in a text file.
 
@@ -202,13 +195,11 @@ class AsciiSignalIO(BaseIO):
         if skiprows:
             raise NotImplementedError('skiprows values other than 0 are not ' +
                                       'supported')
-        l = [ ]
+        l = []
         if writetimecolumn is not None:
             l.append(segment.analogsignals[0].times[:, np.newaxis])
         for anaSig in segment.analogsignals:
             l.append(anaSig.magnitude[:, np.newaxis])
         sigs = np.concatenate(l, axis=1)
-        #print sigs.shape
-        np.savetxt(self.filename , sigs , delimiter = delimiter)
-
-
+        # print sigs.shape
+        np.savetxt(self.filename, sigs, delimiter=delimiter)

+ 48 - 57
code/python-neo/neo/io/asciispiketrainio.py

@@ -22,48 +22,47 @@ from neo.core import Segment, SpikeTrain
 class AsciiSpikeTrainIO(BaseIO):
     """
 
-    Classe for reading/writing SpikeTrains in a text file.
+    Class for reading/writing SpikeTrains in a text file.
     Each Spiketrain is a line.
 
     Usage:
         >>> from neo import io
         >>> r = io.AsciiSpikeTrainIO( filename = 'File_ascii_spiketrain_1.txt')
-        >>> seg = r.read_segment(lazy = False, cascade = True,)
+        >>> seg = r.read_segment()
         >>> print seg.spiketrains     # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
         [<SpikeTrain(array([ 3.89981604,  4.73258781,  0.608428  ,  4.60246277,  1.23805797,
         ...
 
     """
 
-    is_readable        = True
-    is_writable        = True
+    is_readable = True
+    is_writable = True
 
-    supported_objects  = [Segment , SpikeTrain]
-    readable_objects   = [Segment]
-    writeable_objects  = [Segment]
+    supported_objects = [Segment, SpikeTrain]
+    readable_objects = [Segment]
+    writeable_objects = [Segment]
 
-    has_header         = False
-    is_streameable     = False
+    has_header = False
+    is_streameable = False
 
-    read_params        = {
-                            Segment : [
-                                        ('delimiter' , {'value' :  '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
-                                        ('t_start' , { 'value' : 0., } ),
-                                        ]
-                            }
-    write_params       = {
-                            Segment : [
-                                        ('delimiter' , {'value' :  '\t', 'possible' : ['\t' , ' ' , ',' , ';'] }) ,
-                                        ]
-                            }
+    read_params = {
+        Segment: [
+            ('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
+            ('t_start', {'value': 0., }),
+        ]
+    }
+    write_params = {
+        Segment: [
+            ('delimiter', {'value': '\t', 'possible': ['\t', ' ', ',', ';']}),
+        ]
+    }
 
-    name               = None
-    extensions          = [ 'txt' ]
+    name = None
+    extensions = ['txt']
 
     mode = 'file'
 
-
-    def __init__(self , filename = None) :
+    def __init__(self, filename=None):
         """
         This class read/write SpikeTrains in a text file.
         Each row is a spiketrain.
@@ -77,41 +76,37 @@ class AsciiSpikeTrainIO(BaseIO):
         self.filename = filename
 
     def read_segment(self,
-                            lazy = False,
-                            cascade = True,
-                            delimiter = '\t',
-                            t_start = 0.*pq.s,
-                            unit = pq.s,
-                            ):
+                     lazy=False,
+                     delimiter='\t',
+                     t_start=0. * pq.s,
+                     unit=pq.s,
+                     ):
         """
         Arguments:
             delimiter  :  columns delimiter in file  '\t' or one space or two space or ',' or ';'
             t_start : time start of all spiketrain 0 by default
             unit : unit of spike times, can be a str or directly a Quantities
         """
+        assert not lazy, 'Do not support lazy'
+
         unit = pq.Quantity(1, unit)
 
-        seg = Segment(file_origin = os.path.basename(self.filename))
-        if not cascade:
-            return seg
+        seg = Segment(file_origin=os.path.basename(self.filename))
 
         f = open(self.filename, 'Ur')
-        for i,line in enumerate(f) :
+        for i, line in enumerate(f):
             alldata = line[:-1].split(delimiter)
-            if alldata[-1] == '': alldata = alldata[:-1]
-            if alldata[0] == '': alldata = alldata[1:]
-            if lazy:
-                spike_times = [ ]
-                t_stop = t_start
-            else:
-                spike_times = np.array(alldata).astype('f')
-                t_stop = spike_times.max()*unit
-
-            sptr = SpikeTrain(spike_times*unit, t_start=t_start, t_stop=t_stop)
-            if lazy:
-                sptr.lazy_shape = len(alldata)
-
-            sptr.annotate(channel_index = i)
+            if alldata[-1] == '':
+                alldata = alldata[:-1]
+            if alldata[0] == '':
+                alldata = alldata[1:]
+
+            spike_times = np.array(alldata).astype('f')
+            t_stop = spike_times.max() * unit
+
+            sptr = SpikeTrain(spike_times * unit, t_start=t_start, t_stop=t_stop)
+
+            sptr.annotate(channel_index=i)
             seg.spiketrains.append(sptr)
         f.close()
 
@@ -119,8 +114,8 @@ class AsciiSpikeTrainIO(BaseIO):
         return seg
 
     def write_segment(self, segment,
-                                delimiter = '\t',
-                                ):
+                      delimiter='\t',
+                      ):
         """
         Write SpikeTrain of a Segment in a txt file.
         Each row is a spiketrain.
@@ -134,12 +129,8 @@ class AsciiSpikeTrainIO(BaseIO):
         """
 
         f = open(self.filename, 'w')
-        for s,sptr in enumerate(segment.spiketrains) :
-            for ts in sptr :
-                f.write('%f%s'% (ts , delimiter) )
+        for s, sptr in enumerate(segment.spiketrains):
+            for ts in sptr:
+                f.write('%f%s' % (ts, delimiter))
             f.write('\n')
         f.close()
-
-
-
-

+ 41 - 850
code/python-neo/neo/io/axonio.py

@@ -1,875 +1,66 @@
 # -*- coding: utf-8 -*-
-"""
 
-Class for reading data from pCLAMP and AxoScope
-files (.abf version 1 and 2), developed by Molecular device/Axon technologies.
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.axonrawio import AxonRawIO
 
-- abf = Axon binary file
-- atf is a text file based format from axon that could be
-  read by AsciiIO (but this file is less efficient.)
-
-
-This code is a port of abfload and abf2load
-written in Matlab (BSD-2-Clause licence) by :
- - Copyright (c) 2009, Forrest Collman, fcollman@princeton.edu
- - Copyright (c) 2004, Harald Hentschke
-and available here:
-http://www.mathworks.com/matlabcentral/fileexchange/22114-abf2load
-
-Information on abf 1 and 2 formats is available here:
-http://www.moleculardevices.com/pages/software/developer_info.html
-
-This file supports the old (ABF1) and new (ABF2) format.
-ABF1 (clampfit <=9) and ABF2 (clampfit >10)
-
-All possible mode are possible :
-    - event-driven variable-length mode 1 -> return several Segments per Block
-    - event-driven fixed-length mode 2 or 5 -> return several Segments
-    - gap free mode -> return one (or sevral) Segment in the Block
-
-Supported : Read
-
-Author: sgarcia, jnowacki
-
-Note: j.s.nowacki@gmail.com has a C++ library with SWIG bindings which also
-reads abf files - would be good to cross-check
-
-"""
-
-import struct
-import datetime
-import os
-from io import open, BufferedReader
-
-import numpy as np
-import quantities as pq
-
-from neo.io.baseio import BaseIO
 from neo.core import Block, Segment, AnalogSignal, Event
+import quantities as pq
 
 
-class StructFile(BufferedReader):
-    def read_f(self, fmt, offset=None):
-        if offset is not None:
-            self.seek(offset)
-        return struct.unpack(fmt, self.read(struct.calcsize(fmt)))
-
-    def write_f(self, fmt, offset=None, *args):
-        if offset is not None:
-            self.seek(offset)
-        self.write(struct.pack(fmt, *args))
-
-
-def reformat_integer_v1(data, nbchannel, header):
-    """
-    reformat when dtype is int16 for ABF version 1
-    """
-    chans = [chan_num for chan_num in
-             header['nADCSamplingSeq'] if chan_num >= 0]
-    for n, i in enumerate(chans[:nbchannel]):  # respect SamplingSeq
-        data[:, n] /= header['fInstrumentScaleFactor'][i]
-        data[:, n] /= header['fSignalGain'][i]
-        data[:, n] /= header['fADCProgrammableGain'][i]
-        if header['nTelegraphEnable'][i]:
-            data[:, n] /= header['fTelegraphAdditGain'][i]
-        data[:, n] *= header['fADCRange']
-        data[:, n] /= header['lADCResolution']
-        data[:, n] += header['fInstrumentOffset'][i]
-        data[:, n] -= header['fSignalOffset'][i]
-
-
-def reformat_integer_v2(data, nbchannel, header):
-    """
-    reformat when dtype is int16 for ABF version 2
-    """
-    for i in range(nbchannel):
-        data[:, i] /= header['listADCInfo'][i]['fInstrumentScaleFactor']
-        data[:, i] /= header['listADCInfo'][i]['fSignalGain']
-        data[:, i] /= header['listADCInfo'][i]['fADCProgrammableGain']
-        if header['listADCInfo'][i]['nTelegraphEnable']:
-            data[:, i] /= header['listADCInfo'][i]['fTelegraphAdditGain']
-        data[:, i] *= header['protocol']['fADCRange']
-        data[:, i] /= header['protocol']['lADCResolution']
-        data[:, i] += header['listADCInfo'][i]['fInstrumentOffset']
-        data[:, i] -= header['listADCInfo'][i]['fSignalOffset']
-
-
-def clean_string(s):
-    s = s.rstrip(b'\x00')
-    s = s.rstrip(b' ')
-    return s
-
-
-class AxonIO(BaseIO):
+class AxonIO(AxonRawIO, BaseFromRaw):
     """
     Class for reading data from pCLAMP and AxoScope
-    files (.abf version 1 and 2), developed by Molecular Device/Axon Technologies.
-
-    Usage:
-        >>> from neo import io
-        >>> r = io.AxonIO(filename='File_axon_1.abf')
-        >>> bl = r.read_block(lazy=False, cascade=True)
-        >>> print bl.segments
-        [<neo.core.segment.Segment object at 0x105516fd0>]
-        >>> print bl.segments[0].analogsignals
-        [<AnalogSignal(array([ 2.18811035,  2.19726562,  2.21252441, ...,
-            1.33056641,  1.3458252 ,  1.3671875 ], dtype=float32) * pA,
-            [0.0 s, 191.2832 s], sampling rate: 10000.0 Hz)>]
-        >>> print bl.segments[0].events
-        []
+    files (.abf version 1 and 2), developed by Molecular device/Axon technologies.
+
+    - abf = Axon binary file
+    - atf is a text file based format from axon that could be
+      read by AsciiIO (but this file is less efficient.)
+
+    Here an important note from erikli@github for user who want to get the :
+    With Axon ABF2 files, the information that you need to recapitulate the original stimulus waveform (both digital and analog) is contained in multiple places.
+
+     - `AxonIO._axon_info['protocol']` -- things like number of samples in episode
+     - `AxonIO.axon_info['section']['ADCSection']` | `AxonIO.axon_info['section']['DACSection']` -- things about the number of channels and channel properties
+     - `AxonIO._axon_info['protocol']['nActiveDACChannel']` -- bitmask specifying which DACs are actually active
+     - `AxonIO._axon_info['protocol']['nDigitalEnable']` -- bitmask specifying which set of Epoch timings should be used to specify the duration of digital outputs
+    - `AxonIO._axon_info['dictEpochInfoPerDAC']` -- dict of dict. First index is DAC channel and second index is Epoch number (i.e. information about Epoch A in Channel 2 would be in `AxonIO._axon_info['dictEpochInfoPerDAC'][2][0]`)
+     - `AxonIO._axon_info['EpochInfo']` -- list of dicts containing information about each Epoch's digital out pattern. Digital out is a bitmask with least significant bit corresponding to Digital Out 0
+     - `AxonIO._axon_info['listDACInfo']` -- information about DAC name, scale factor, holding level, etc
+     - `AxonIO._t_starts` -- start time of each sweep in a unified time basis
+     - `AxonIO._sampling_rate`
+
+    The current AxonIO.read_protocol() method utilizes a subset of these.
+    In particular I know it doesn't consider `nDigitalEnable`, `EpochInfo`, or `nActiveDACChannel` and it doesn't account 
+    for different types of Epochs offered by Clampex/pClamp other than discrete steps (such as ramp, pulse train, etc and
+    encoded by `nEpochType` in the EpochInfoPerDAC section). I'm currently parsing a superset of the properties used 
+    by read_protocol() in my analysis scripts, but that code still doesn't parse the full information and isn't in a state
+    where it could be committed and I can't currently prioritize putting together all the code that would parse the full
+    set of data. The `AxonIO._axon_info['EpochInfo']` section doesn't currently exist.
 
     """
+    _prefered_signal_group_mode = 'split-all'
 
-    is_readable = True
-    is_writable = False
-
-    supported_objects = [Block, Segment, AnalogSignal, Event]
-    readable_objects = [Block]
-    writeable_objects = []
-
-    has_header = False
-    is_streameable = False
-
-    read_params = {Block: []}
-    write_params = None
-
-    name = 'Axon'
-    extensions = ['abf']
-
-    mode = 'file'
-
-    def __init__(self, filename=None):
-        """
-        This class read a abf file.
-
-        Arguments:
-            filename : the filename to read
-
-        """
-        BaseIO.__init__(self)
-        self.filename = filename
-
-    def read_block(self, lazy=False, cascade=True):
-
-        header = self.read_header()
-        version = header['fFileVersionNumber']
-
-        bl = Block()
-        bl.file_origin = os.path.basename(self.filename)
-        bl.annotate(abf_version=str(version))
-
-        # date and time
-        if version < 2.:
-            YY = 1900
-            MM = 1
-            DD = 1
-            hh = int(header['lFileStartTime'] / 3600.)
-            mm = int((header['lFileStartTime'] - hh * 3600) / 60)
-            ss = header['lFileStartTime'] - hh * 3600 - mm * 60
-            ms = int(np.mod(ss, 1) * 1e6)
-            ss = int(ss)
-        elif version >= 2.:
-            YY = int(header['uFileStartDate'] / 10000)
-            MM = int((header['uFileStartDate'] - YY * 10000) / 100)
-            DD = int(header['uFileStartDate'] - YY * 10000 - MM * 100)
-            hh = int(header['uFileStartTimeMS'] / 1000. / 3600.)
-            mm = int((header['uFileStartTimeMS'] / 1000. - hh * 3600) / 60)
-            ss = header['uFileStartTimeMS'] / 1000. - hh * 3600 - mm * 60
-            ms = int(np.mod(ss, 1) * 1e6)
-            ss = int(ss)
-        bl.rec_datetime = datetime.datetime(YY, MM, DD, hh, mm, ss, ms)
-
-        if not cascade:
-            return bl
-
-        # file format
-        if header['nDataFormat'] == 0:
-            dt = np.dtype('i2')
-        elif header['nDataFormat'] == 1:
-            dt = np.dtype('f4')
-
-        if version < 2.:
-            nbchannel = header['nADCNumChannels']
-            head_offset = header['lDataSectionPtr'] * BLOCKSIZE + header[
-                'nNumPointsIgnored'] * dt.itemsize
-            totalsize = header['lActualAcqLength']
-        elif version >= 2.:
-            nbchannel = header['sections']['ADCSection']['llNumEntries']
-            head_offset = header['sections']['DataSection'][
-                'uBlockIndex'] * BLOCKSIZE
-            totalsize = header['sections']['DataSection']['llNumEntries']
-
-        data = np.memmap(self.filename, dt, 'r',
-                         shape=(totalsize,), offset=head_offset)
-
-        # 3 possible modes
-        if version < 2.:
-            mode = header['nOperationMode']
-        elif version >= 2.:
-            mode = header['protocol']['nOperationMode']
-
-        if (mode == 1) or (mode == 2) or (mode == 5) or (mode == 3):
-            # event-driven variable-length mode (mode 1)
-            # event-driven fixed-length mode (mode 2 or 5)
-            # gap free mode (mode 3) can be in several episodes
-
-            # read sweep pos
-            if version < 2.:
-                nbepisod = header['lSynchArraySize']
-                offset_episode = header['lSynchArrayPtr'] * BLOCKSIZE
-            elif version >= 2.:
-                nbepisod = header['sections']['SynchArraySection'][
-                    'llNumEntries']
-                offset_episode = header['sections']['SynchArraySection'][
-                    'uBlockIndex'] * BLOCKSIZE
-            if nbepisod > 0:
-                episode_array = np.memmap(
-                    self.filename, [('offset', 'i4'), ('len', 'i4')], 'r',
-                    shape=nbepisod, offset=offset_episode)
-            else:
-                episode_array = np.empty(1, [('offset', 'i4'), ('len', 'i4')])
-                episode_array[0]['len'] = data.size
-                episode_array[0]['offset'] = 0
-
-            # sampling_rate
-            if version < 2.:
-                sampling_rate = 1. / (header['fADCSampleInterval'] *
-                                      nbchannel * 1.e-6) * pq.Hz
-            elif version >= 2.:
-                sampling_rate = 1.e6 / \
-                    header['protocol']['fADCSequenceInterval'] * pq.Hz
-
-            # construct block
-            # one sweep = one segment in a block
-            pos = 0
-            for j in range(episode_array.size):
-                seg = Segment(index=j)
-
-                length = episode_array[j]['len']
-
-                if version < 2.:
-                    fSynchTimeUnit = header['fSynchTimeUnit']
-                elif version >= 2.:
-                    fSynchTimeUnit = header['protocol']['fSynchTimeUnit']
-
-                if (fSynchTimeUnit != 0) and (mode == 1):
-                    length /= fSynchTimeUnit
-
-                if not lazy:
-                    subdata = data[pos:pos+length]
-                    subdata = subdata.reshape((int(subdata.size/nbchannel),
-                                               nbchannel)).astype('f')
-                    if dt == np.dtype('i2'):
-                        if version < 2.:
-                            reformat_integer_v1(subdata, nbchannel, header)
-                        elif version >= 2.:
-                            reformat_integer_v2(subdata, nbchannel, header)
-
-                pos += length
-
-                if version < 2.:
-                    chans = [chan_num for chan_num in
-                             header['nADCSamplingSeq'] if chan_num >= 0]
-                else:
-                    chans = range(nbchannel)
-                for n, i in enumerate(chans[:nbchannel]):  # fix SamplingSeq
-                    if version < 2.:
-                        name = header['sADCChannelName'][i].replace(b' ', b'')
-                        unit = header['sADCUnits'][i].replace(b'\xb5', b'u').\
-                            replace(b' ', b'').decode('utf-8')  # \xb5 is µ
-                        num = header['nADCPtoLChannelMap'][i]
-                    elif version >= 2.:
-                        lADCIi = header['listADCInfo'][i]
-                        name = lADCIi['ADCChNames'].replace(b' ', b'')
-                        unit = lADCIi['ADCChUnits'].replace(b'\xb5', b'u').\
-                            replace(b' ', b'').decode('utf-8')
-                        num = header['listADCInfo'][i]['nADCNum']
-                    if (fSynchTimeUnit == 0):
-                        t_start = float(episode_array[j]['offset']) / sampling_rate
-                    else:
-                        t_start = float(episode_array[j]['offset']) * fSynchTimeUnit *1e-6* pq.s
-                    t_start = t_start.rescale('s')
-                    try:
-                        pq.Quantity(1, unit)
-                    except:
-                        unit = ''
-
-                    if lazy:
-                        signal = [] * pq.Quantity(1, unit)
-                    else:
-                        signal = pq.Quantity(subdata[:, n], unit)
-
-                    anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
-                                          t_start=t_start,
-                                          name=str(name.decode("utf-8")),
-                                          channel_index=int(num))
-                    if lazy:
-                        anaSig.lazy_shape = length / nbchannel
-                    seg.analogsignals.append(anaSig)
-                bl.segments.append(seg)
-
-            if mode in [3, 5]:  # TODO check if tags exits in other mode
-                # tag is EventArray that should be attached to Block
-                # It is attched to the first Segment
-                times = []
-                labels = []
-                comments = []
-                for i, tag in enumerate(header['listTag']):
-                    times.append(tag['lTagTime']/sampling_rate)
-                    labels.append(str(tag['nTagType']))
-                    comments.append(clean_string(tag['sComment']))
-                times = np.array(times)
-                labels = np.array(labels, dtype='S')
-                comments = np.array(comments, dtype='S')
-                # attach all tags to the first segment.
-                seg = bl.segments[0]
-                if lazy:
-                    ea = Event(times=[] * pq.s, labels=np.array([], dtype='S'))
-                    ea.lazy_shape = len(times)
-                else:
-                    ea = Event(times=times * pq.s, labels=labels,
-                               comments=comments)
-                seg.events.append(ea)
-
-        bl.create_many_to_one_relationship()
-        return bl
-
-    def read_header(self,):
-        """
-        read the header of the file
-
-        The strategy here differs from the original script under Matlab.
-        In the original script for ABF2, it completes the header with
-        information that is located in other structures.
-
-        In ABF2 this function returns header with sub dict:
-            sections             (ABF2)
-            protocol             (ABF2)
-            listTags             (ABF1&2)
-            listADCInfo          (ABF2)
-            listDACInfo          (ABF2)
-            dictEpochInfoPerDAC  (ABF2)
-        that contains more information.
-        """
-        fid = StructFile(open(self.filename, 'rb'))  # fix for py3
-
-        # version
-        f_file_signature = fid.read(4)
-        if f_file_signature == b'ABF ':  # fix for p3 where read returns bytes
-            header_description = headerDescriptionV1
-        elif f_file_signature == b'ABF2':
-            header_description = headerDescriptionV2
-        else:
-            return None
-
-        # construct dict
-        header = {}
-        for key, offset, fmt in header_description:
-            val = fid.read_f(fmt, offset=offset)
-            if len(val) == 1:
-                header[key] = val[0]
-            else:
-                header[key] = np.array(val)
-
-        # correction of version number and starttime
-        if f_file_signature == b'ABF ':
-            header['lFileStartTime'] += header[
-                'nFileStartMillisecs'] * .001
-        elif f_file_signature == b'ABF2':
-            n = header['fFileVersionNumber']
-            header['fFileVersionNumber'] = n[3] + 0.1 * n[2] +\
-                0.01 * n[1] + 0.001 * n[0]
-            header['lFileStartTime'] = header['uFileStartTimeMS'] * .001
-
-        if header['fFileVersionNumber'] < 2.:
-            # tags
-            listTag = []
-            for i in range(header['lNumTagEntries']):
-                fid.seek(header['lTagSectionPtr'] + i * 64)
-                tag = {}
-                for key, fmt in TagInfoDescription:
-                    val = fid.read_f(fmt)
-                    if len(val) == 1:
-                        tag[key] = val[0]
-                    else:
-                        tag[key] = np.array(val)
-                listTag.append(tag)
-            header['listTag'] = listTag
-            #protocol name formatting #TODO move to read_protocol?
-            header['sProtocolPath'] = clean_string(header['sProtocolPath'])
-            header['sProtocolPath'] = header['sProtocolPath'].\
-                replace(b'\\', b'/')
-
-        elif header['fFileVersionNumber'] >= 2.:
-            # in abf2 some info are in other place
-
-            # sections
-            sections = {}
-            for s, sectionName in enumerate(sectionNames):
-                uBlockIndex, uBytes, llNumEntries =\
-                    fid.read_f('IIl', offset=76 + s * 16)
-                sections[sectionName] = {}
-                sections[sectionName]['uBlockIndex'] = uBlockIndex
-                sections[sectionName]['uBytes'] = uBytes
-                sections[sectionName]['llNumEntries'] = llNumEntries
-            header['sections'] = sections
-
-            # strings sections
-            # hack for reading channels names and units
-            fid.seek(sections['StringsSection']['uBlockIndex'] * BLOCKSIZE)
-            big_string = fid.read(sections['StringsSection']['uBytes'])
-            goodstart=-1
-            for key in [b'AXENGN', b'clampex', b'Clampex', b'CLAMPEX', b'axoscope']:
-                #goodstart = big_string.lower().find(key)
-                goodstart = big_string.find(key)
-                if goodstart!=-1: break
-            assert goodstart!=-1, 'This file does not contain clampex, axoscope or clampfit in the header'
-            big_string = big_string[goodstart:]
-            strings = big_string.split(b'\x00')
-
-            # ADC sections
-            header['listADCInfo'] = []
-            for i in range(sections['ADCSection']['llNumEntries']):
-                # read ADCInfo
-                fid.seek(sections['ADCSection']['uBlockIndex'] *
-                         BLOCKSIZE + sections['ADCSection']['uBytes'] * i)
-                ADCInfo = {}
-                for key, fmt in ADCInfoDescription:
-                    val = fid.read_f(fmt)
-                    if len(val) == 1:
-                        ADCInfo[key] = val[0]
-                    else:
-                        ADCInfo[key] = np.array(val)
-                ADCInfo['ADCChNames'] = strings[ADCInfo['lADCChannelNameIndex'] - 1]
-                ADCInfo['ADCChUnits'] = strings[ADCInfo['lADCUnitsIndex'] - 1]
-                header['listADCInfo'].append(ADCInfo)
-
-            # protocol sections
-            protocol = {}
-            fid.seek(sections['ProtocolSection']['uBlockIndex'] * BLOCKSIZE)
-            for key, fmt in protocolInfoDescription:
-                val = fid.read_f(fmt)
-                if len(val) == 1:
-                    protocol[key] = val[0]
-                else:
-                    protocol[key] = np.array(val)
-            header['protocol'] = protocol
-            header['sProtocolPath'] = strings[header['uProtocolPathIndex']-1]
-
-            # tags
-            listTag = []
-            for i in range(sections['TagSection']['llNumEntries']):
-                fid.seek(sections['TagSection']['uBlockIndex'] *
-                         BLOCKSIZE + sections['TagSection']['uBytes'] * i)
-                tag = {}
-                for key, fmt in TagInfoDescription:
-                    val = fid.read_f(fmt)
-                    if len(val) == 1:
-                        tag[key] = val[0]
-                    else:
-                        tag[key] = np.array(val)
-                listTag.append(tag)
-
-            header['listTag'] = listTag
-
-            # DAC sections
-            header['listDACInfo'] = []
-            for i in range(sections['DACSection']['llNumEntries']):
-                # read DACInfo
-                fid.seek(sections['DACSection']['uBlockIndex'] *
-                         BLOCKSIZE + sections['DACSection']['uBytes'] * i)
-                DACInfo = {}
-                for key, fmt in DACInfoDescription:
-                    val = fid.read_f(fmt)
-                    if len(val) == 1:
-                        DACInfo[key] = val[0]
-                    else:
-                        DACInfo[key] = np.array(val)
-                DACInfo['DACChNames'] = strings[DACInfo['lDACChannelNameIndex']
-                                                - 1]
-                DACInfo['DACChUnits'] = strings[
-                    DACInfo['lDACChannelUnitsIndex'] - 1]
-
-                header['listDACInfo'].append(DACInfo)
-
-            # EpochPerDAC  sections
-            # header['dictEpochInfoPerDAC'] is dict of dicts:
-            #  - the first index is the DAC number
-            #  - the second index is the epoch number
-            # It has to be done like that because data may not exist
-            # and may not be in sorted order
-            header['dictEpochInfoPerDAC'] = {}
-            for i in range(sections['EpochPerDACSection']['llNumEntries']):
-                #  read DACInfo
-                fid.seek(sections['EpochPerDACSection']['uBlockIndex'] *
-                         BLOCKSIZE +
-                         sections['EpochPerDACSection']['uBytes'] * i)
-                EpochInfoPerDAC = {}
-                for key, fmt in EpochInfoPerDACDescription:
-                    val = fid.read_f(fmt)
-                    if len(val) == 1:
-                        EpochInfoPerDAC[key] = val[0]
-                    else:
-                        EpochInfoPerDAC[key] = np.array(val)
-
-                DACNum = EpochInfoPerDAC['nDACNum']
-                EpochNum = EpochInfoPerDAC['nEpochNum']
-                # Checking if the key exists, if not, the value is empty
-                # so we have to create empty dict to populate
-                if DACNum not in header['dictEpochInfoPerDAC']:
-                    header['dictEpochInfoPerDAC'][DACNum] = {}
-
-                header['dictEpochInfoPerDAC'][DACNum][EpochNum] =\
-                    EpochInfoPerDAC
-
-        fid.close()
-
-        return header
+    def __init__(self, filename):
+        AxonRawIO.__init__(self, filename=filename)
+        BaseFromRaw.__init__(self, filename)
 
     def read_protocol(self):
         """
         Read the protocol waveform of the file, if present;
         function works with ABF2 only. Protocols can be reconstructed
         from the ABF1 header.
-
         Returns: list of segments (one for every episode)
                  with list of analog signls (one for every DAC).
         """
-        header = self.read_header()
-
-        if header['fFileVersionNumber'] < 2.:
-            raise IOError("Protocol section is only present in ABF2 files.")
-
-        nADC = header['sections']['ADCSection'][
-            'llNumEntries']  # Number of ADC channels
-        nDAC = header['sections']['DACSection'][
-            'llNumEntries']  # Number of DAC channels
-        nSam = int(header['protocol'][
-            'lNumSamplesPerEpisode'] / nADC)  # Number of samples per episode
-        nEpi = header['lActualEpisodes']  # Actual number of episodes
-        sampling_rate = 1.e6 / header['protocol'][
-            'fADCSequenceInterval'] * pq.Hz
-
-        # Make a list of segments with analog signals with just holding levels
-        # List of segments relates to number of episodes, as for recorded data
+        sigs_by_segments, sig_names, sig_units = self.read_raw_protocol()
         segments = []
-        for epiNum in range(nEpi):
-            seg = Segment(index=epiNum)
-            # One analog signal for each DAC in segment (episode)
-            for DACNum in range(nDAC):
-                t_start = 0 * pq.s  # TODO: Possibly check with episode array
-                name = header['listDACInfo'][DACNum]['DACChNames']
-                unit = header['listDACInfo'][DACNum]['DACChUnits'].\
-                    replace(b'\xb5', b'u').decode('utf-8')  # \xb5 is µ
-                signal = np.ones(nSam) *\
-                    header['listDACInfo'][DACNum]['fDACHoldingLevel'] *\
-                    pq.Quantity(1, unit)
-                ana_sig = AnalogSignal(signal, sampling_rate=sampling_rate,
-                                       t_start=t_start, name=name.decode("utf-8"),
-                                       channel_index=DACNum)
-                # If there are epoch infos for this DAC
-                if DACNum in header['dictEpochInfoPerDAC']:
-                    # Save last sample index
-                    i_last = int(nSam * 15625 / 10**6)
-                    # TODO guess for first holding
-                    # Go over EpochInfoPerDAC and change the analog signal
-                    # according to the epochs
-                    epochInfo = header['dictEpochInfoPerDAC'][DACNum]
-                    for epochNum, epoch in epochInfo.items():
-                        i_begin = i_last
-                        i_end = i_last + epoch['lEpochInitDuration'] +\
-                            epoch['lEpochDurationInc'] * epiNum
-                        dif = i_end-i_begin
-                        ana_sig[i_begin:i_end] = np.ones((dif, 1)) *\
-                            pq.Quantity(1, unit) * (epoch['fEpochInitLevel'] +
-                                                    epoch['fEpochLevelInc'] *
-                                                    epiNum)
-                        i_last += epoch['lEpochInitDuration'] +\
-                            epoch['lEpochDurationInc'] * epiNum
+        for seg_index, sigs in enumerate(sigs_by_segments):
+            seg = Segment(index=seg_index)
+            t_start = self._t_starts[seg_index] * pq.s
+            for c, sig in enumerate(sigs):
+                ana_sig = AnalogSignal(sig, sampling_rate=self._sampling_rate * pq.Hz,
+                                       t_start=t_start, name=sig_names[c], units=sig_units[c])
                 seg.analogsignals.append(ana_sig)
             segments.append(seg)
 
         return segments
-
-
-BLOCKSIZE = 512
-
-headerDescriptionV1 = [
-    ('fFileSignature', 0, '4s'),
-    ('fFileVersionNumber', 4, 'f'),
-    ('nOperationMode', 8, 'h'),
-    ('lActualAcqLength', 10, 'i'),
-    ('nNumPointsIgnored', 14, 'h'),
-    ('lActualEpisodes', 16, 'i'),
-    ('lFileStartTime', 24, 'i'),
-    ('lDataSectionPtr', 40, 'i'),
-    ('lTagSectionPtr', 44, 'i'),
-    ('lNumTagEntries', 48, 'i'),
-    ('lSynchArrayPtr', 92, 'i'),
-    ('lSynchArraySize', 96, 'i'),
-    ('nDataFormat', 100, 'h'),
-    ('nADCNumChannels', 120, 'h'),
-    ('fADCSampleInterval', 122, 'f'),
-    ('fSynchTimeUnit', 130, 'f'),
-    ('lNumSamplesPerEpisode', 138, 'i'),
-    ('lPreTriggerSamples', 142, 'i'),
-    ('lEpisodesPerRun', 146, 'i'),
-    ('fADCRange', 244, 'f'),
-    ('lADCResolution', 252, 'i'),
-    ('nFileStartMillisecs', 366, 'h'),
-    ('nADCPtoLChannelMap', 378, '16h'),
-    ('nADCSamplingSeq', 410, '16h'),
-    ('sADCChannelName', 442, '10s'*16),
-    ('sADCUnits', 602, '8s'*16),
-    ('fADCProgrammableGain', 730, '16f'),
-    ('fInstrumentScaleFactor', 922, '16f'),
-    ('fInstrumentOffset', 986, '16f'),
-    ('fSignalGain', 1050, '16f'),
-    ('fSignalOffset', 1114, '16f'),
-
-    ('nDigitalEnable', 1436, 'h'),
-    ('nActiveDACChannel', 1440, 'h'),
-    ('nDigitalHolding', 1584, 'h'),
-    ('nDigitalInterEpisode', 1586, 'h'),
-    ('nDigitalValue', 2588, '10h'),
-    ('lDACFilePtr', 2048, '2i'),
-    ('lDACFileNumEpisodes', 2056, '2i'),
-    ('fDACCalibrationFactor', 2074, '4f'),
-    ('fDACCalibrationOffset', 2090, '4f'),
-    ('nWaveformEnable', 2296, '2h'),
-    ('nWaveformSource', 2300, '2h'),
-    ('nInterEpisodeLevel', 2304, '2h'),
-    ('nEpochType', 2308, '20h'),
-    ('fEpochInitLevel', 2348, '20f'),
-    ('fEpochLevelInc', 2428, '20f'),
-    ('lEpochInitDuration', 2508, '20i'),
-    ('lEpochDurationInc', 2588, '20i'),
-
-    ('nTelegraphEnable', 4512, '16h'),
-    ('fTelegraphAdditGain', 4576, '16f'),
-    ('sProtocolPath', 4898, '384s'),
-    ]
-
-
-headerDescriptionV2 = [
-    ('fFileSignature', 0, '4s'),
-    ('fFileVersionNumber', 4, '4b'),
-    ('uFileInfoSize', 8, 'I'),
-    ('lActualEpisodes', 12, 'I'),
-    ('uFileStartDate', 16, 'I'),
-    ('uFileStartTimeMS', 20, 'I'),
-    ('uStopwatchTime', 24, 'I'),
-    ('nFileType', 28, 'H'),
-    ('nDataFormat', 30, 'H'),
-    ('nSimultaneousScan', 32, 'H'),
-    ('nCRCEnable', 34, 'H'),
-    ('uFileCRC', 36, 'I'),
-    ('FileGUID', 40, 'I'),
-    ('uCreatorVersion', 56, 'I'),
-    ('uCreatorNameIndex', 60, 'I'),
-    ('uModifierVersion', 64, 'I'),
-    ('uModifierNameIndex', 68, 'I'),
-    ('uProtocolPathIndex', 72, 'I'),
-    ]
-
-
-sectionNames = [
-    'ProtocolSection',
-    'ADCSection',
-    'DACSection',
-    'EpochSection',
-    'ADCPerDACSection',
-    'EpochPerDACSection',
-    'UserListSection',
-    'StatsRegionSection',
-    'MathSection',
-    'StringsSection',
-    'DataSection',
-    'TagSection',
-    'ScopeSection',
-    'DeltaSection',
-    'VoiceTagSection',
-    'SynchArraySection',
-    'AnnotationSection',
-    'StatsSection',
-    ]
-
-
-protocolInfoDescription = [
-    ('nOperationMode', 'h'),
-    ('fADCSequenceInterval', 'f'),
-    ('bEnableFileCompression', 'b'),
-    ('sUnused1', '3s'),
-    ('uFileCompressionRatio', 'I'),
-    ('fSynchTimeUnit', 'f'),
-    ('fSecondsPerRun', 'f'),
-    ('lNumSamplesPerEpisode', 'i'),
-    ('lPreTriggerSamples', 'i'),
-    ('lEpisodesPerRun', 'i'),
-    ('lRunsPerTrial', 'i'),
-    ('lNumberOfTrials', 'i'),
-    ('nAveragingMode', 'h'),
-    ('nUndoRunCount', 'h'),
-    ('nFirstEpisodeInRun', 'h'),
-    ('fTriggerThreshold', 'f'),
-    ('nTriggerSource', 'h'),
-    ('nTriggerAction', 'h'),
-    ('nTriggerPolarity', 'h'),
-    ('fScopeOutputInterval', 'f'),
-    ('fEpisodeStartToStart', 'f'),
-    ('fRunStartToStart', 'f'),
-    ('lAverageCount', 'i'),
-    ('fTrialStartToStart', 'f'),
-    ('nAutoTriggerStrategy', 'h'),
-    ('fFirstRunDelayS', 'f'),
-    ('nChannelStatsStrategy', 'h'),
-    ('lSamplesPerTrace', 'i'),
-    ('lStartDisplayNum', 'i'),
-    ('lFinishDisplayNum', 'i'),
-    ('nShowPNRawData', 'h'),
-    ('fStatisticsPeriod', 'f'),
-    ('lStatisticsMeasurements', 'i'),
-    ('nStatisticsSaveStrategy', 'h'),
-    ('fADCRange', 'f'),
-    ('fDACRange', 'f'),
-    ('lADCResolution', 'i'),
-    ('lDACResolution', 'i'),
-    ('nExperimentType', 'h'),
-    ('nManualInfoStrategy', 'h'),
-    ('nCommentsEnable', 'h'),
-    ('lFileCommentIndex', 'i'),
-    ('nAutoAnalyseEnable', 'h'),
-    ('nSignalType', 'h'),
-    ('nDigitalEnable', 'h'),
-    ('nActiveDACChannel', 'h'),
-    ('nDigitalHolding', 'h'),
-    ('nDigitalInterEpisode', 'h'),
-    ('nDigitalDACChannel', 'h'),
-    ('nDigitalTrainActiveLogic', 'h'),
-    ('nStatsEnable', 'h'),
-    ('nStatisticsClearStrategy', 'h'),
-    ('nLevelHysteresis', 'h'),
-    ('lTimeHysteresis', 'i'),
-    ('nAllowExternalTags', 'h'),
-    ('nAverageAlgorithm', 'h'),
-    ('fAverageWeighting', 'f'),
-    ('nUndoPromptStrategy', 'h'),
-    ('nTrialTriggerSource', 'h'),
-    ('nStatisticsDisplayStrategy', 'h'),
-    ('nExternalTagType', 'h'),
-    ('nScopeTriggerOut', 'h'),
-    ('nLTPType', 'h'),
-    ('nAlternateDACOutputState', 'h'),
-    ('nAlternateDigitalOutputState', 'h'),
-    ('fCellID', '3f'),
-    ('nDigitizerADCs', 'h'),
-    ('nDigitizerDACs', 'h'),
-    ('nDigitizerTotalDigitalOuts', 'h'),
-    ('nDigitizerSynchDigitalOuts', 'h'),
-    ('nDigitizerType', 'h'),
-    ]
-
-ADCInfoDescription = [
-    ('nADCNum', 'h'),
-    ('nTelegraphEnable', 'h'),
-    ('nTelegraphInstrument', 'h'),
-    ('fTelegraphAdditGain', 'f'),
-    ('fTelegraphFilter', 'f'),
-    ('fTelegraphMembraneCap', 'f'),
-    ('nTelegraphMode', 'h'),
-    ('fTelegraphAccessResistance', 'f'),
-    ('nADCPtoLChannelMap', 'h'),
-    ('nADCSamplingSeq', 'h'),
-    ('fADCProgrammableGain', 'f'),
-    ('fADCDisplayAmplification', 'f'),
-    ('fADCDisplayOffset', 'f'),
-    ('fInstrumentScaleFactor', 'f'),
-    ('fInstrumentOffset', 'f'),
-    ('fSignalGain', 'f'),
-    ('fSignalOffset', 'f'),
-    ('fSignalLowpassFilter', 'f'),
-    ('fSignalHighpassFilter', 'f'),
-    ('nLowpassFilterType', 'b'),
-    ('nHighpassFilterType', 'b'),
-    ('fPostProcessLowpassFilter', 'f'),
-    ('nPostProcessLowpassFilterType', 'c'),
-    ('bEnabledDuringPN', 'b'),
-    ('nStatsChannelPolarity', 'h'),
-    ('lADCChannelNameIndex', 'i'),
-    ('lADCUnitsIndex', 'i'),
-    ]
-
-TagInfoDescription = [
-    ('lTagTime', 'i'),
-    ('sComment', '56s'),
-    ('nTagType', 'h'),
-    ('nVoiceTagNumber_or_AnnotationIndex', 'h'),
-    ]
-
-DACInfoDescription = [
-    ('nDACNum', 'h'),
-    ('nTelegraphDACScaleFactorEnable', 'h'),
-    ('fInstrumentHoldingLevel', 'f'),
-    ('fDACScaleFactor', 'f'),
-    ('fDACHoldingLevel', 'f'),
-    ('fDACCalibrationFactor', 'f'),
-    ('fDACCalibrationOffset', 'f'),
-    ('lDACChannelNameIndex', 'i'),
-    ('lDACChannelUnitsIndex', 'i'),
-    ('lDACFilePtr', 'i'),
-    ('lDACFileNumEpisodes', 'i'),
-    ('nWaveformEnable', 'h'),
-    ('nWaveformSource', 'h'),
-    ('nInterEpisodeLevel', 'h'),
-    ('fDACFileScale', 'f'),
-    ('fDACFileOffset', 'f'),
-    ('lDACFileEpisodeNum', 'i'),
-    ('nDACFileADCNum', 'h'),
-    ('nConditEnable', 'h'),
-    ('lConditNumPulses', 'i'),
-    ('fBaselineDuration', 'f'),
-    ('fBaselineLevel', 'f'),
-    ('fStepDuration', 'f'),
-    ('fStepLevel', 'f'),
-    ('fPostTrainPeriod', 'f'),
-    ('fPostTrainLevel', 'f'),
-    ('nMembTestEnable', 'h'),
-    ('nLeakSubtractType', 'h'),
-    ('nPNPolarity', 'h'),
-    ('fPNHoldingLevel', 'f'),
-    ('nPNNumADCChannels', 'h'),
-    ('nPNPosition', 'h'),
-    ('nPNNumPulses', 'h'),
-    ('fPNSettlingTime', 'f'),
-    ('fPNInterpulse', 'f'),
-    ('nLTPUsageOfDAC', 'h'),
-    ('nLTPPresynapticPulses', 'h'),
-    ('lDACFilePathIndex', 'i'),
-    ('fMembTestPreSettlingTimeMS', 'f'),
-    ('fMembTestPostSettlingTimeMS', 'f'),
-    ('nLeakSubtractADCIndex', 'h'),
-    ('sUnused', '124s'),
-    ]
-
-EpochInfoPerDACDescription = [
-    ('nEpochNum', 'h'),
-    ('nDACNum', 'h'),
-    ('nEpochType', 'h'),
-    ('fEpochInitLevel', 'f'),
-    ('fEpochLevelInc', 'f'),
-    ('lEpochInitDuration', 'i'),
-    ('lEpochDurationInc', 'i'),
-    ('lEpochPulsePeriod', 'i'),
-    ('lEpochPulseWidth', 'i'),
-    ('sUnused', '18s'),
-    ]
-
-EpochInfoDescription = [
-    ('nEpochNum', 'h'),
-    ('nDigitalValue', 'h'),
-    ('nDigitalTrainValue', 'h'),
-    ('nAlternateDigitalValue', 'h'),
-    ('nAlternateDigitalTrainValue', 'h'),
-    ('bEpochCompression', 'b'),
-    ('sUnused', '21s'),
-    ]

+ 31 - 32
code/python-neo/neo/io/baseio.py

@@ -37,11 +37,11 @@ class BaseIO(object):
     The key methods of the class are:
         - ``read()`` - Read the whole object structure, return a list of Block
                 objects
-        - ``read_block(lazy=True, cascade=True, **params)`` - Read Block object
+        - ``read_block(lazy=True, **params)`` - Read Block object
                 from file with some parameters
-        - ``read_segment(lazy=True, cascade=True, **params)`` - Read Segment
+        - ``read_segment(lazy=True, **params)`` - Read Segment
                 object from file with some parameters
-        - ``read_spiketrainlist(lazy=True, cascade=True, **params)`` - Read
+        - ``read_spiketrainlist(lazy=True, **params)`` - Read
                 SpikeTrainList object from file with some parameters
         - ``write()`` - Write the whole object structure
         - ``write_block(**params)``    - Write Block object to file with some
@@ -52,7 +52,7 @@ class BaseIO(object):
                 file with some parameters
 
     The class can also implement these methods:
-        - ``read_XXX(lazy=True, cascade=True, **params)``
+        - ``read_XXX(lazy=True, **params)``
         - ``write_XXX(**params)``
         where XXX could be one of the objects supported by the IO
 
@@ -81,8 +81,8 @@ class BaseIO(object):
     readable_objects = []
     writeable_objects = []
 
-    has_header = False
-    is_streameable = False
+    support_lazy = False
+
     read_params = {}
     write_params = {}
 
@@ -107,18 +107,17 @@ class BaseIO(object):
             corelogger.addHandler(logging_handler)
 
     ######## General read/write methods #######################
-    def read(self, lazy=False, cascade=True,  **kargs):
+    def read(self, lazy=False, **kargs):
+        if lazy:
+            assert self.support_lazy, 'This IO do not support lazy loading'
         if Block in self.readable_objects:
             if (hasattr(self, 'read_all_blocks') and
                     callable(getattr(self, 'read_all_blocks'))):
-                return self.read_all_blocks(lazy=lazy, cascade=cascade,
-                                            **kargs)
-            return [self.read_block(lazy=lazy, cascade=cascade, **kargs)]
+                return self.read_all_blocks(lazy=lazy, **kargs)
+            return [self.read_block(lazy=lazy, **kargs)]
         elif Segment in self.readable_objects:
             bl = Block(name='One segment only')
-            if not cascade:
-                return bl
-            seg = self.read_segment(lazy=lazy, cascade=cascade,  **kargs)
+            seg = self.read_segment(lazy=lazy, **kargs)
             bl.segments.append(seg)
             bl.create_many_to_one_relationship()
             return [bl]
@@ -144,56 +143,56 @@ class BaseIO(object):
 
     ######## All individual read methods #######################
     def read_block(self, **kargs):
-        assert(Block in self.readable_objects), read_error
+        assert (Block in self.readable_objects), read_error
 
     def read_segment(self, **kargs):
-        assert(Segment in self.readable_objects), read_error
+        assert (Segment in self.readable_objects), read_error
 
     def read_unit(self, **kargs):
-        assert(Unit in self.readable_objects), read_error
+        assert (Unit in self.readable_objects), read_error
 
     def read_spiketrain(self, **kargs):
-        assert(SpikeTrain in self.readable_objects), read_error
+        assert (SpikeTrain in self.readable_objects), read_error
 
     def read_analogsignal(self, **kargs):
-        assert(AnalogSignal in self.readable_objects), read_error
+        assert (AnalogSignal in self.readable_objects), read_error
 
     def read_irregularlysampledsignal(self, **kargs):
-        assert(IrregularlySampledSignal in self.readable_objects), read_error
+        assert (IrregularlySampledSignal in self.readable_objects), read_error
 
     def read_channelindex(self, **kargs):
-        assert(ChannelIndex in self.readable_objects), read_error
+        assert (ChannelIndex in self.readable_objects), read_error
 
     def read_event(self, **kargs):
-        assert(Event in self.readable_objects), read_error
+        assert (Event in self.readable_objects), read_error
 
     def read_epoch(self, **kargs):
-        assert(Epoch in self.readable_objects), read_error
+        assert (Epoch in self.readable_objects), read_error
 
     ######## All individual write methods #######################
     def write_block(self, bl, **kargs):
-        assert(Block in self.writeable_objects), write_error
+        assert (Block in self.writeable_objects), write_error
 
     def write_segment(self, seg, **kargs):
-        assert(Segment in self.writeable_objects), write_error
+        assert (Segment in self.writeable_objects), write_error
 
     def write_unit(self, ut, **kargs):
-        assert(Unit in self.writeable_objects), write_error
+        assert (Unit in self.writeable_objects), write_error
 
     def write_spiketrain(self, sptr, **kargs):
-        assert(SpikeTrain in self.writeable_objects), write_error
+        assert (SpikeTrain in self.writeable_objects), write_error
 
-    def write_analogsignal(self, anasig,  **kargs):
-        assert(AnalogSignal in self.writeable_objects), write_error
+    def write_analogsignal(self, anasig, **kargs):
+        assert (AnalogSignal in self.writeable_objects), write_error
 
     def write_irregularlysampledsignal(self, irsig, **kargs):
-        assert(IrregularlySampledSignal in self.writeable_objects), write_error
+        assert (IrregularlySampledSignal in self.writeable_objects), write_error
 
     def write_channelindex(self, chx, **kargs):
-        assert(ChannelIndex in self.writeable_objects), write_error
+        assert (ChannelIndex in self.writeable_objects), write_error
 
     def write_event(self, ev, **kargs):
-        assert(Event in self.writeable_objects), write_error
+        assert (Event in self.writeable_objects), write_error
 
     def write_epoch(self, ep, **kargs):
-        assert(Epoch in self.writeable_objects), write_error
+        assert (Epoch in self.writeable_objects), write_error

File diff suppressed because it is too large
+ 57 - 2549
code/python-neo/neo/io/blackrockio.py


+ 0 - 483
code/python-neo/neo/io/blackrockio_deprecated.py

@@ -1,483 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Module for reading binary file from Blackrock format.
-"""
-
-import logging
-import struct
-
-import numpy as np
-import quantities as pq
-
-from neo.io.baseio import BaseIO
-from neo.core import (Block, Segment,
-                      RecordingChannel, ChannelIndex, AnalogSignal)
-from neo.io import tools
-
-
-class BlackrockIO(BaseIO):
-    """
-    Class for reading/writing data in a BlackRock Neuroshare ns5 files.
-    """
-    # Class variables demonstrating capabilities of this IO
-    is_readable        = True # This a only reading class
-    is_writable        = True # write is not supported
-
-    # This IO can only manipulate continuous data, not spikes or events
-    supported_objects  = [Block, Segment, AnalogSignal, ChannelIndex, RecordingChannel]
-
-    # Keep things simple by always returning a block
-    readable_objects    = [Block]
-
-    # And write a block
-    writeable_objects   = [Block]
-
-    # Not sure what these do, if anything
-    has_header         = False
-    is_streameable     = False
-
-    # The IO name and the file extensions it uses
-    name               = 'Blackrock'
-    extensions          = ['ns5']
-
-    # Operates on *.ns5 files
-    mode = 'file'
-
-    # GUI defaults for reading
-    # Most information is acquired from the file header.
-    read_params = {
-        Block: [
-            #('rangemin' , { 'value' : -10 } ),
-            #('rangemax' , { 'value' : 10 } ),
-            ]
-        }
-
-    # GUI defaults for writing (not supported)
-    write_params       = None
-
-
-    def __init__(self, filename, full_range=8192.*pq.mV) :
-        """Initialize Blackrock reader.
-
-        **Arguments**
-            filename: string, the filename to read
-            full_range: Quantity, the full-scale analog range of the data.
-                This is set by your digitizing hardware. It should be in
-                volts or millivolts.
-        """
-        BaseIO.__init__(self)
-        self.filename = filename
-        self.full_range = full_range
-
-    # The reading methods. The `lazy` and `cascade` parameters are imposed
-    # by neo.io API
-    def read_block(self, lazy=False, cascade=True,
-        n_starts=None, n_stops=None, channel_list=None):
-        """Reads the file and returns contents as a Block.
-
-        The Block contains one Segment for each entry in zip(n_starts,
-        n_stops). If these parameters are not specified, the default is
-        to store all data in one Segment.
-
-        The Block also contains one ChannelIndex for all channels.
-
-        n_starts: list or array of starting times of each Segment in
-            samples from the beginning of the file.
-        n_stops: similar, stopping times of each Segment
-        channel_list: list of channel numbers to get. The neural data channels
-            are 1 - 128. The analog inputs are 129 - 144. The default
-            is to acquire all channels.
-
-        Returns: Block object containing the data.
-        """
-
-
-        # Create block
-        block = Block(file_origin=self.filename)
-
-        if not cascade:
-            return block
-
-        self.loader = Loader(self.filename)
-        self.loader.load_file()
-        self.header = self.loader.header
-
-        # If channels not specified, get all
-        if channel_list is None:
-            channel_list = self.loader.get_neural_channel_numbers()
-
-        # If not specified, load all as one Segment
-        if n_starts is None:
-            n_starts = [0]
-            n_stops = [self.loader.header.n_samples]
-
-        #~ # Add channel hierarchy
-        #~ chx = ChannelIndex(name='allchannels',
-            #~ description='group of all channels', file_origin=self.filename)
-        #~ block.channel_indexes.append(chx)
-        #~ self.channel_number_to_recording_channel = {}
-
-        #~ # Add each channel at a time to hierarchy
-        #~ for ch in channel_list:
-            #~ ch_object = RecordingChannel(name='channel%d' % ch,
-                #~ file_origin=self.filename, index=ch)
-            #~ chx.index.append(ch_object.index)
-            #~ chx.channel_names.append(ch_object.name)
-            #~ chx.recordingchannels.append(ch_object)
-            #~ self.channel_number_to_recording_channel[ch] = ch_object
-
-        # Iterate through n_starts and n_stops and add one Segment
-        # per each.
-        for n, (t1, t2) in enumerate(zip(n_starts, n_stops)):
-            # Create segment and add metadata
-            seg = self.read_segment(n_start=t1, n_stop=t2, chlist=channel_list,
-                lazy=lazy, cascade=cascade)
-            seg.name = 'Segment %d' % n
-            seg.index = n
-            t1sec = t1 / self.loader.header.f_samp
-            t2sec = t2 / self.loader.header.f_samp
-            seg.description = 'Segment %d from %f to %f' % (n, t1sec, t2sec)
-
-            # Link to block
-            block.segments.append(seg)
-
-        # Create hardware view, and bijectivity
-        tools.populate_RecordingChannel(block)
-        block.create_many_to_one_relationship()
-
-        return block
-
-    def read_segment(self, n_start, n_stop, chlist=None, lazy=False, cascade=True):
-        """Reads a Segment from the file and stores in database.
-
-        The Segment will contain one AnalogSignal for each channel
-        and will go from n_start to n_stop (in samples).
-
-        Arguments:
-            n_start : time in samples that the Segment begins
-            n_stop : time in samples that the Segment ends
-
-        Python indexing is used, so n_stop is not inclusive.
-
-        Returns a Segment object containing the data.
-        """
-        # If no channel numbers provided, get all of them
-        if chlist is None:
-            chlist = self.loader.get_neural_channel_numbers()
-
-        # Conversion from bits to full_range units
-        conversion = self.full_range / 2**(8*self.header.sample_width)
-
-        # Create the Segment
-        seg = Segment(file_origin=self.filename)
-        t_start = float(n_start) / self.header.f_samp
-        t_stop = float(n_stop) / self.header.f_samp
-        seg.annotate(t_start=t_start)
-        seg.annotate(t_stop=t_stop)
-
-        # Load data from each channel and store
-        for ch in chlist:
-            if lazy:
-                sig = np.array([]) * conversion
-            else:
-                # Get the data from the loader
-                sig = np.array(\
-                    self.loader._get_channel(ch)[n_start:n_stop]) * conversion
-
-            # Create an AnalogSignal with the data in it
-            anasig = AnalogSignal(signal=sig,
-                sampling_rate=self.header.f_samp*pq.Hz,
-                t_start=t_start*pq.s, file_origin=self.filename,
-                description='Channel %d from %f to %f' % (ch, t_start, t_stop),
-                channel_index=int(ch))
-
-            if lazy:
-                anasig.lazy_shape = n_stop-n_start
-
-
-            # Link the signal to the segment
-            seg.analogsignals.append(anasig)
-
-            # Link the signal to the recording channel from which it came
-            #rc = self.channel_number_to_recording_channel[ch]
-            #rc.analogsignals.append(anasig)
-
-        return seg
-
-
-    def write_block(self, block):
-        """Writes block to `self.filename`.
-
-        *.ns5 BINARY FILE FORMAT
-        The following information is contained in the first part of the header
-        file.
-        The size in bytes, the variable name, the data type, and the meaning are
-        given below. Everything is little-endian.
-
-        8B. File_Type_ID. char. Always "NEURALSG"
-        16B. File_Spec. char. Always "30 kS/s\0"
-        4B. Period. uint32. Always 1.
-        4B. Channel_Count. uint32. Generally 32 or 34.
-        Channel_Count*4B. uint32. Channel_ID. One uint32 for each channel.
-
-        Thus the total length of the header is 8+16+4+4+Channel_Count*4.
-        Immediately after this header, the raw data begins.
-        Each sample is a 2B signed int16.
-        For our hardware, the conversion factor is 4096.0 / 2**16 mV/bit.
-        The samples for each channel are interleaved, so the first Channel_Count
-        samples correspond to the first sample from each channel, in the same
-        order as the channel id's in the header.
-
-        Variable names are consistent with the Neuroshare specification.
-        """
-        fi = open(self.filename, 'wb')
-        self._write_header(block, fi)
-
-        # Write each segment in order
-        for seg in block.segments:
-            # Create a 2d numpy array of analogsignals converted to bytes
-            all_signals = np.array([
-                np.rint(sig * 2**16 / self.full_range)
-                for sig in seg.analogsignals],
-                dtype=np.int)
-
-            # Write to file. We transpose because channel changes faster
-            # than time in this format.
-            for vals in all_signals.transpose():
-                fi.write(struct.pack('<%dh' % len(vals), *vals))
-
-        fi.close()
-
-
-    def _write_header(self, block, fi):
-        """Write header info about block to fi"""
-        if len(block.segments) > 0:
-            channel_indexes = channel_indexes_in_segment(block.segments[0])
-        else:
-            channel_indexes = []
-
-        # type of file
-        fi.write('NEURALSG')
-
-        # sampling rate, in text and integer
-        fi.write('30 kS/s\0')
-        for _ in range(8): fi.write('\0')
-        fi.write(struct.pack('<I', 1))
-
-        # channel count: one for each analogsignal, and then also for
-        # each column in each analogsignalarray
-        fi.write(struct.pack('<I', len(channel_indexes)))
-        for chidx in channel_indexes:
-            fi.write(struct.pack('<I', chidx))
-
-def channel_indexes_in_segment(seg):
-    """List channel indexes of analogsignals and analogsignalarrays"""
-    channel_indices = []
-    for sig in seg.analogsignals:
-        channel_indices.append(sig.recordingchannel.index)
-
-    for asa in seg.analogsignals:
-        channel_indices.append(asa.channel_index.index)
-
-    return channel_indices
-
-class HeaderInfo:
-    """Holds information from the ns5 file header about the file."""
-    pass
-
-class Loader(object):
-    """Object to load data from binary ns5 files.
-
-    Methods
-    -------
-    load_file : actually create links to file on disk
-    load_header : load header info and store in self.header
-    get_channel_as_array : Returns 1d numpy array of the entire recording
-        from requested channel.
-    get_analog_channel_as_array : Same as get_channel_as_array, but works
-        on analog channels rather than neural channels.
-    get_analog_channel_ids : Returns an array of analog channel numbers
-        existing in the file.
-    get_neural_channel_ids : Returns an array of neural channel numbers
-        existing in the file.
-    regenerate_memmap : Deletes and restores the underlying memmap, which
-        may free up memory.
-
-    Issues
-    ------
-    Memory leaks may exist
-    Not sure that regenerate_memmap actually frees up any memory.
-    """
-    def __init__(self, filename=None):
-        """Creates a new object to load data from the ns5 file you specify.
-
-        filename : path to ns5 file
-        Call load_file() to actually get data from the file.
-        """
-        self.filename = filename
-
-
-
-        self._mm = None
-        self.file_handle = None
-
-    def load_file(self, filename=None):
-        """Loads an ns5 file, if not already done.
-
-        *.ns5 BINARY FILE FORMAT
-        The following information is contained in the first part of the header
-        file.
-        The size in bytes, the variable name, the data type, and the meaning are
-        given below. Everything is little-endian.
-
-        8B. File_Type_ID. char. Always "NEURALSG"
-        16B. File_Spec. char. Always "30 kS/s\0"
-        4B. Period. uint32. Always 1.
-        4B. Channel_Count. uint32. Generally 32 or 34.
-        Channel_Count*4B. uint32. Channel_ID. One uint32 for each channel.
-
-        Thus the total length of the header is 8+16+4+4+Channel_Count*4.
-        Immediately after this header, the raw data begins.
-        Each sample is a 2B signed int16.
-        For our hardware, the conversion factor is 4096.0 / 2**16 mV/bit.
-        The samples for each channel are interleaved, so the first Channel_Count
-        samples correspond to the first sample from each channel, in the same
-        order as the channel id's in the header.
-
-        Variable names are consistent with the Neuroshare specification.
-        """
-        # If filename specified, use it, else use previously specified
-        if filename is not None: self.filename = filename
-
-        # Load header info into self.header
-        self.load_header()
-
-        # build an internal memmap linking to the data on disk
-        self.regenerate_memmap()
-
-    def load_header(self, filename=None):
-        """Reads ns5 file header and writes info to self.header"""
-        # (Re-)initialize header
-        self.header = HeaderInfo()
-
-        # the width of each sample is always 2 bytes
-        self.header.sample_width = 2
-
-        # If filename specified, use it, else use previously specified
-        if filename is not None: self.filename = filename
-        self.header.filename = self.filename
-
-        # first load the binary in directly
-        self.file_handle = open(self.filename, 'rb') # buffering=?
-
-        # Read File_Type_ID and check compatibility
-        # If v2.2 is used, this value will be 'NEURALCD', which uses a slightly
-        # more complex header. Currently unsupported.
-        self.header.File_Type_ID = [chr(ord(c)) \
-            for c in self.file_handle.read(8)]
-        if "".join(self.header.File_Type_ID) != 'NEURALSG':
-            logging.info( "Incompatible ns5 file format. Only v2.1 is supported.\nThis will probably not work.")
-
-
-        # Read File_Spec and check compatibility.
-        self.header.File_Spec = [chr(ord(c)) \
-            for c in self.file_handle.read(16)]
-        if "".join(self.header.File_Spec[:8]) != '30 kS/s\0':
-            logging.info( "File_Spec seems to indicate you did not sample at 30KHz.")
-
-
-        #R ead Period and verify that 30KHz was used. If not, the code will
-        # still run but it's unlikely the data will be useful.
-        self.header.period, = struct.unpack('<I', self.file_handle.read(4))
-        if self.header.period != 1:
-            logging.info( "Period seems to indicate you did not sample at 30KHz.")
-        self.header.f_samp = self.header.period * 30000.0
-
-
-        # Read Channel_Count and Channel_ID
-        self.header.Channel_Count, = struct.unpack('<I',
-            self.file_handle.read(4))
-        self.header.Channel_ID = [struct.unpack('<I',
-            self.file_handle.read(4))[0]
-            for _ in xrange(self.header.Channel_Count)]
-
-        # Compute total header length
-        self.header.Header = 8 + 16 + 4 + 4 + \
-            4*self.header.Channel_Count # in bytes
-
-        # determine length of file
-        self.file_handle.seek(0, 2) # last byte
-        self.header.file_total_size = self.file_handle.tell()
-        self.header.n_samples = \
-            (self.header.file_total_size - self.header.Header) / \
-            self.header.Channel_Count / self.header.sample_width
-        self.header.Length = np.float64(self.header.n_samples) / \
-            self.header.Channel_Count
-        if self.header.sample_width * self.header.Channel_Count * \
-            self.header.n_samples + \
-            self.header.Header != self.header.file_total_size:
-            logging.info( "I got header of %dB, %d channels, %d samples, \
-                but total file size of %dB" % (self.header.Header,
-                self.header.Channel_Count, self.header.n_samples,
-                self.header.file_total_size))
-
-        # close file
-        self.file_handle.close()
-
-
-    def regenerate_memmap(self):
-        """Delete internal memmap and create a new one, to save memory."""
-        try:
-            del self._mm
-        except AttributeError:
-            pass
-
-        self._mm = np.memmap(\
-            self.filename, dtype='h', mode='r',
-            offset=self.header.Header,
-            shape=(self.header.n_samples, self.header.Channel_Count))
-
-    def __del__(self):
-        # this deletion doesn't free memory, even though del l._mm does!
-        if '_mm' in self.__dict__: del self._mm
-        #else: logging.info( "gracefully skipping")
-
-    def _get_channel(self, channel_number):
-        """Returns slice into internal memmap for requested channel"""
-        try:
-            mm_index = self.header.Channel_ID.index(channel_number)
-        except ValueError:
-            logging.info( "Channel number %d does not exist" % channel_number)
-            return np.array([])
-
-        self.regenerate_memmap()
-        return self._mm[:, mm_index]
-
-    def get_channel_as_array(self, channel_number):
-        """Returns data from requested channel as a 1d numpy array."""
-        data = np.array(self._get_channel(channel_number))
-        self.regenerate_memmap()
-        return data
-
-    def get_analog_channel_as_array(self, analog_chn):
-        """Returns data from requested analog channel as a numpy array.
-
-        Simply adds 128 to the channel number to convert to ns5 number.
-        This is just the way Cyberkinetics numbers its channels.
-        """
-        return self.get_channel_as_array(analog_chn + 128)
-
-    def get_audio_channel_numbers(self):
-        """Deprecated, use get_analog_channel_ids"""
-        return self.get_analog_channel_ids()
-
-    def get_analog_channel_ids(self):
-        """Returns array of analog channel ids existing in the file.
-
-        These can then be loaded by calling get_analog_channel_as_array(chn).
-        """
-        return np.array(filter(lambda x: (x > 128) and (x <= 144),
-            self.header.Channel_ID)) - 128
-
-    def get_neural_channel_numbers(self):
-        return np.array(filter(lambda x: x <= 128, self.header.Channel_ID))

+ 8 - 154
code/python-neo/neo/io/brainvisionio.py

@@ -1,159 +1,13 @@
 # -*- coding: utf-8 -*-
-"""
-Class for reading data from BrainVision product.
 
-This code was originally made by L. Pezard (2010), modified B. Burle and
-S. More.
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.brainvisionrawio import BrainVisionRawIO
 
-Supported : Read
 
-Author: sgarcia
-"""
+class BrainVisionIO(BrainVisionRawIO, BaseFromRaw):
+    """Class for reading data from the BrainVision product."""
+    _prefered_signal_group_mode = 'split-all'
 
-import os
-import re
-
-import numpy as np
-import quantities as pq
-
-from neo.io.baseio import BaseIO
-from neo.core import Segment, AnalogSignal, Event
-
-
-class BrainVisionIO(BaseIO):
-    """
-    Class for reading/writing data from BrainVision products (brainAmp,
-    brain analyser...)
-
-    Usage:
-        >>> from neo import io
-        >>> r = io.BrainVisionIO( filename = 'File_brainvision_1.eeg')
-        >>> seg = r.read_segment(lazy = False, cascade = True,)
-
-
-
-    """
-
-    is_readable = True
-    is_writable = False
-
-    supported_objects = [Segment, AnalogSignal, Event]
-    readable_objects = [Segment]
-    writeable_objects = []
-
-    has_header = False
-    is_streameable = False
-
-    read_params = {Segment: []}
-    write_params = {Segment: []}
-
-    name = None
-    extensions = ['vhdr']
-
-    mode = 'file'
-
-    def __init__(self, filename=None):
-        """
-        This class read/write a elan based file.
-
-        **Arguments**
-            filename : the filename to read or write
-        """
-        BaseIO.__init__(self)
-        self.filename = filename
-
-    def read_segment(self, lazy=False, cascade=True):
-
-        # # Read header file (vhdr)
-        header = read_brain_soup(self.filename)
-
-        assert header['Common Infos'][
-            'DataFormat'] == 'BINARY', NotImplementedError
-        assert header['Common Infos'][
-            'DataOrientation'] == 'MULTIPLEXED', NotImplementedError
-        nb_channel = int(header['Common Infos']['NumberOfChannels'])
-        sampling_rate = 1.e6 / float(
-            header['Common Infos']['SamplingInterval']) * pq.Hz
-
-        fmt = header['Binary Infos']['BinaryFormat']
-        fmts = { 'INT_16':np.int16,  'INT_32':np.int32, 'IEEE_FLOAT_32':np.float32,}
-
-        assert fmt in fmts, NotImplementedError
-        dt = fmts[fmt]
-
-        seg = Segment(file_origin=os.path.basename(self.filename))
-        if not cascade:
-            return seg
-
-        # read binary
-        if not lazy:
-            binary_file = os.path.splitext(self.filename)[0] + '.eeg'
-            sigs = np.memmap(binary_file, dt, 'r', ).astype('f')
-
-            n = int(sigs.size / nb_channel)
-            sigs = sigs[:n * nb_channel]
-            sigs = sigs.reshape(n, nb_channel)
-
-        for c in range(nb_channel):
-            name, ref, res, units = header['Channel Infos'][
-                'Ch%d' % (c + 1,)].split(',')
-            units = pq.Quantity(1, units.replace('µ', 'u'))
-            if lazy:
-                signal = [] * units
-            else:
-                signal = sigs[:,c]*units
-                if dt == np.int16 or dt == np.int32:
-                    signal *= np.float(res) 
-            anasig = AnalogSignal(signal = signal,
-                                                channel_index = c,
-                                                name = name,
-                                                sampling_rate = sampling_rate,
-                                                )
-            if lazy:
-                anasig.lazy_shape = -1
-            seg.analogsignals.append(anasig)
-
-        # read marker
-        marker_file = os.path.splitext(self.filename)[0] + '.vmrk'
-        all_info = read_brain_soup(marker_file)['Marker Infos']
-        all_types = []
-        times = []
-        labels = []
-        for i in range(len(all_info)):
-            type_, label, pos, size, channel = all_info[
-                'Mk%d' % (i + 1,)].split(',')[:5]
-            all_types.append(type_)
-            times.append(float(pos) / sampling_rate.magnitude)
-            labels.append(label)
-        all_types = np.array(all_types)
-        times = np.array(times) * pq.s
-        labels = np.array(labels, dtype='S')
-        for type_ in np.unique(all_types):
-            ind = type_ == all_types
-            if lazy:
-                ea = Event(name=str(type_))
-                ea.lazy_shape = -1
-            else:
-                ea = Event(
-                    times=times[ind], labels=labels[ind], name=str(type_))
-            seg.events.append(ea)
-
-        seg.create_many_to_one_relationship()
-        return seg
-
-
-def read_brain_soup(filename):
-    section = None
-    all_info = {}
-    for line in open(filename, 'rU'):
-        line = line.strip('\n').strip('\r')
-        if line.startswith('['):
-            section = re.findall('\[([\S ]+)\]', line)[0]
-            all_info[section] = {}
-            continue
-        if line.startswith(';'):
-            continue
-        if '=' in line and len(line.split('=')) == 2:
-            k, v = line.split('=')
-            all_info[section][k] = v
-    return all_info
+    def __init__(self, filename):
+        BrainVisionRawIO.__init__(self, filename=filename)
+        BaseFromRaw.__init__(self, filename)

+ 15 - 26
code/python-neo/neo/io/brainwaredamio.py

@@ -114,17 +114,19 @@ class BrainwareDamIO(BaseIO):
         self._filename = os.path.basename(filename)
         self._fsrc = None
 
-    def read(self, lazy=False, cascade=True, **kargs):
+    def read(self, lazy=False, **kargs):
         '''
         Reads raw data file "fname" generated with BrainWare
         '''
-        return self.read_block(lazy=lazy, cascade=cascade)
+        assert not lazy, 'Do not support lazy'
+        return self.read_block(lazy=lazy)
 
-    def read_block(self, lazy=False, cascade=True, **kargs):
+    def read_block(self, lazy=False, **kargs):
         '''
         Reads a block from the raw data file "fname" generated
         with BrainWare
         '''
+        assert not lazy, 'Do not support lazy'
 
         # there are no keyargs implemented to so far.  If someone tries to pass
         # them they are expecting them to do something or making a mistake,
@@ -136,15 +138,11 @@ class BrainwareDamIO(BaseIO):
 
         block = Block(file_origin=self._filename)
 
-        # if we aren't doing cascade, don't load anything
-        if not cascade:
-            return block
-
         # create the objects to store other objects
         chx = ChannelIndex(file_origin=self._filename,
-                                    channel_ids=np.array([1]),
-                                    index=np.array([0]),
-                                    channel_names=np.array(['Chan1'], dtype='S'))
+                           channel_ids=np.array([1]),
+                           index=np.array([0]),
+                           channel_names=np.array(['Chan1'], dtype='S'))
 
         # load objects into their containers
         block.channel_indexes.append(chx)
@@ -153,7 +151,7 @@ class BrainwareDamIO(BaseIO):
         with open(self._path, 'rb') as fobject:
             # while the file is not done keep reading segments
             while True:
-                seg = self._read_segment(fobject, lazy)
+                seg = self._read_segment(fobject)
                 # if there are no more Segments, stop
                 if not seg:
                     break
@@ -179,7 +177,7 @@ class BrainwareDamIO(BaseIO):
     # -------------------------------------------------------------------------
     # -------------------------------------------------------------------------
 
-    def _read_segment(self, fobject, lazy):
+    def _read_segment(self, fobject):
         '''
         Read a single segment with a single analogsignal
 
@@ -226,20 +224,11 @@ class BrainwareDamIO(BaseIO):
         # int16 * numpts -- the AnalogSignal itself
         signal = np.fromfile(fobject, dtype=np.int16, count=numpts)
 
-        # handle lazy loading
-        if lazy:
-            sig = AnalogSignal([], t_start=t_start*pq.d,
-                               file_origin=self._filename,
-                               sampling_period=1.*pq.s,
-                               units=pq.mV,
-                               dtype=np.float)
-            sig.lazy_shape = len(signal)
-        else:
-            sig = AnalogSignal(signal.astype(np.float)*pq.mV,
-                               t_start=t_start*pq.d,
-                               file_origin=self._filename,
-                               sampling_period=1.*pq.s,
-                               copy=False)
+        sig = AnalogSignal(signal.astype(np.float) * pq.mV,
+                           t_start=t_start * pq.d,
+                           file_origin=self._filename,
+                           sampling_period=1. * pq.s,
+                           copy=False)
         # Note: setting the sampling_period to 1 s is arbitrary
 
         # load the AnalogSignal and parameters into a new Segment

+ 10 - 23
code/python-neo/neo/io/brainwaref32io.py

@@ -119,7 +119,6 @@ class BrainwareF32IO(BaseIO):
         self._filename = path.basename(filename)
 
         self._fsrc = None
-        self.__lazy = False
 
         self._blk = None
         self.__unit = None
@@ -129,17 +128,18 @@ class BrainwareF32IO(BaseIO):
         self.__seg = None
         self.__spiketimes = None
 
-    def read(self, lazy=False, cascade=True, **kargs):
+    def read(self, lazy=False, **kargs):
         '''
         Reads simple spike data file "fname" generated with BrainWare
         '''
-        return self.read_block(lazy=lazy, cascade=cascade)
+        return self.read_block(lazy=lazy, )
 
-    def read_block(self, lazy=False, cascade=True, **kargs):
+    def read_block(self, lazy=False, **kargs):
         '''
         Reads a block from the simple spike data file "fname" generated
         with BrainWare
         '''
+        assert not lazy, 'Do not support lazy'
 
         # there are no keyargs implemented to so far.  If someone tries to pass
         # them they are expecting them to do something or making a mistake,
@@ -148,18 +148,13 @@ class BrainwareF32IO(BaseIO):
             raise NotImplementedError('This method does not have any '
                                       'argument implemented yet')
         self._fsrc = None
-        self.__lazy = lazy
 
         self._blk = Block(file_origin=self._filename)
         block = self._blk
 
-        # if we aren't doing cascade, don't load anything
-        if not cascade:
-            return block
-
         # create the objects to store other objects
         chx = ChannelIndex(file_origin=self._filename,
-                                    index=np.array([], dtype=np.int))
+                           index=np.array([], dtype=np.int))
         self.__unit = Unit(file_origin=self._filename)
 
         # load objects into their containers
@@ -183,7 +178,6 @@ class BrainwareF32IO(BaseIO):
 
         # cleanup attributes
         self._fsrc = None
-        self.__lazy = False
 
         self._blk = None
 
@@ -285,18 +279,11 @@ class BrainwareF32IO(BaseIO):
                                  **self.__params)
             self.__spiketimes = []
 
-        if self.__lazy:
-            train = SpikeTrain(pq.Quantity([], dtype=np.float32,
-                                           units=pq.ms),
-                               t_start=0*pq.ms, t_stop=self.__t_stop * pq.ms,
-                               file_origin=self._filename)
-            train.lazy_shape = len(self.__spiketimes)
-        else:
-            times = pq.Quantity(self.__spiketimes, dtype=np.float32,
-                                units=pq.ms)
-            train = SpikeTrain(times,
-                               t_start=0*pq.ms, t_stop=self.__t_stop * pq.ms,
-                               file_origin=self._filename)
+        times = pq.Quantity(self.__spiketimes, dtype=np.float32,
+                            units=pq.ms)
+        train = SpikeTrain(times,
+                           t_start=0 * pq.ms, t_stop=self.__t_stop * pq.ms,
+                           file_origin=self._filename)
 
         self.__seg.spiketrains = [train]
         self.__unit.spiketrains.append(train)

+ 35 - 47
code/python-neo/neo/io/brainwaresrcio.py

@@ -184,13 +184,6 @@ class BrainwareSrcIO(BaseIO):
         # whole file
         self._damaged = False
 
-        # this stores whether the current file is lazy loaded
-        self._lazy = False
-
-        # this stores whether the current file is cascading
-        # this is false by default so if we use read_block on its own it works
-        self._cascade = False
-
         # this stores an empty SpikeTrain which is used in various places.
         self._default_spiketrain = None
 
@@ -225,27 +218,27 @@ class BrainwareSrcIO(BaseIO):
         self._damaged = False
         self._fsrc = None
         self._seg0 = None
-        self._cascade = False
         self._file_origin = None
         self._lazy = False
         self._default_spiketrain = None
 
-    def read(self, lazy=False, cascade=True, **kargs):
+    def read(self, lazy=False, **kargs):
         """
         Reads the first Block from the Spike ReCording file "filename"
         generated with BrainWare.
 
         If you wish to read more than one Block, please use read_all_blocks.
         """
-        return self.read_block(lazy=lazy, cascade=cascade, **kargs)
+        return self.read_block(lazy=lazy, **kargs)
 
-    def read_block(self, lazy=False, cascade=True, **kargs):
+    def read_block(self, lazy=False, **kargs):
         """
         Reads the first Block from the Spike ReCording file "filename"
         generated with BrainWare.
 
         If you wish to read more than one Block, please use read_all_blocks.
         """
+        assert not lazy, 'Do not support lazy'
 
         # there are no keyargs implemented to so far.  If someone tries to pass
         # them they are expecting them to do something or making a mistake,
@@ -254,11 +247,11 @@ class BrainwareSrcIO(BaseIO):
             raise NotImplementedError('This method does not have any '
                                       'arguments implemented yet')
 
-        blockobj = self.read_next_block(cascade=cascade, lazy=lazy)
+        blockobj = self.read_next_block()
         self.close()
         return blockobj
 
-    def read_next_block(self, cascade=True, lazy=False, **kargs):
+    def read_next_block(self, **kargs):
         """
         Reads a single Block from the Spike ReCording file "filename"
         generated with BrainWare.
@@ -276,21 +269,17 @@ class BrainwareSrcIO(BaseIO):
             raise NotImplementedError('This method does not have any '
                                       'arguments implemented yet')
 
-        self._lazy = lazy
         self._opensrc()
 
         # create _default_spiketrain here for performance reasons
         self._default_spiketrain = self._init_default_spiketrain.copy()
         self._default_spiketrain.file_origin = self._file_origin
-        if lazy:
-            self._default_spiketrain.lazy_shape = (0,)
 
         # create the Block and the contents all Blocks of from IO share
         self._blk = Block(file_origin=self._file_origin)
-        if not cascade:
-            return self._blk
+
         self._chx = ChannelIndex(file_origin=self._file_origin,
-                                          index=np.array([], dtype=np.int))
+                                 index=np.array([], dtype=np.int))
         self._seg0 = Segment(name='Comments', file_origin=self._file_origin)
         self._unit0 = Unit(name='UnassignedSpikes',
                            file_origin=self._file_origin,
@@ -326,7 +315,7 @@ class BrainwareSrcIO(BaseIO):
         # result is None iff the end of the file is reached, so we can
         # close the file
         # this notification is not helpful if using the read method with
-        # cascade==True, since the user will know it is done when the method
+        # cascading, since the user will know it is done when the method
         # returns a value
         if result is None:
             self.logger.info('Last Block read.  Closing file.')
@@ -334,7 +323,7 @@ class BrainwareSrcIO(BaseIO):
 
         return blockobj
 
-    def read_all_blocks(self, cascade=True, lazy=False, **kargs):
+    def read_all_blocks(self, lazy=False, **kargs):
         """
         Reads all Blocks from the Spike ReCording file "filename"
         generated with BrainWare.
@@ -348,13 +337,12 @@ class BrainwareSrcIO(BaseIO):
         # there are no keyargs implemented to so far.  If someone tries to pass
         # them they are expecting them to do something or making a mistake,
         # neither of which should pass silently
+        assert not lazy, 'Do not support lazy'
+
         if kargs:
             raise NotImplementedError('This method does not have any '
                                       'argument implemented yet')
 
-        self._lazy = lazy
-        self._cascade = True
-
         self.close()
         self._opensrc()
 
@@ -364,8 +352,7 @@ class BrainwareSrcIO(BaseIO):
         blocks = []
         while self._isopen:
             try:
-                blocks.append(self.read_next_block(cascade=cascade,
-                                                   lazy=lazy))
+                blocks.append(self.read_next_block())
             except:
                 self.close()
                 raise
@@ -440,7 +427,7 @@ class BrainwareSrcIO(BaseIO):
                 # even the official reference files have invalid keys
                 # when using the official reference reader matlab
                 # scripts
-                self.logger.warning('unknown ID: %s',  seqid)
+                self.logger.warning('unknown ID: %s', seqid)
                 return []
 
         try:
@@ -518,13 +505,11 @@ class BrainwareSrcIO(BaseIO):
         _combine_events(events) - combine a list of Events
         with single events into one long Event
         """
-        if not events or self._lazy:
+        if not events:
             event = Event(times=pq.Quantity([], units=pq.s),
                           labels=np.array([], dtype='S'),
                           senders=np.array([], dtype='S'),
                           t_start=0)
-            if self._lazy:
-                event.lazy_shape = len(events)
             return event
 
         times = []
@@ -532,12 +517,24 @@ class BrainwareSrcIO(BaseIO):
         senders = []
         for event in events:
             times.append(event.times.magnitude)
-            labels.append(event.labels)
+            # With the introduction of array annotations and the adaptation of labels to use
+            # this infrastructure, even single labels are wrapped into an array to ensure
+            # consistency.
+            # The following lines were 'labels.append(event.labels)' which assumed event.labels
+            # to be a scalar. Thus, I can safely assume the array to have length 1, because
+            # it only wraps this scalar. Now this scalar is accessed as the 0th element of
+            # event.labels
+            if event.labels.shape == (1,):
+                labels.append(event.labels[0])
+            else:
+                raise AssertionError("This single event has multiple labels in an array with "
+                                     "shape {} instead of a single label.".
+                                     format(event.labels.shape))
             senders.append(event.annotations['sender'])
 
         times = np.array(times, dtype=np.float32)
         t_start = times.min()
-        times = pq.Quantity(times-t_start, units=pq.d).rescale(pq.s)
+        times = pq.Quantity(times - t_start, units=pq.d).rescale(pq.s)
 
         labels = np.array(labels)
         senders = np.array(senders)
@@ -569,9 +566,6 @@ class BrainwareSrcIO(BaseIO):
 
         if hasattr(spiketrains[0], 'waveforms') and len(spiketrains) == 1:
             train = spiketrains[0]
-            if self._lazy and not hasattr(train, 'lazy_shape'):
-                train.lazy_shape = train.shape
-                train = train[:0]
             return train
 
         if hasattr(spiketrains[0], 't_stop'):
@@ -637,12 +631,7 @@ class BrainwareSrcIO(BaseIO):
         # get the maximum time
         t_stop = times[-1] * 2.
 
-        if self._lazy:
-            timesshape = times.shape
-            times = pq.Quantity([], units=pq.ms, copy=False)
-            waveforms = pq.Quantity([[[]]], units=pq.mV)
-        else:
-            waveforms = pq.Quantity(waveforms, units=pq.mV, copy=False)
+        waveforms = pq.Quantity(waveforms, units=pq.mV, copy=False)
 
         train = SpikeTrain(times=times, copy=False,
                            t_start=self._default_t_start.copy(), t_stop=t_stop,
@@ -651,8 +640,6 @@ class BrainwareSrcIO(BaseIO):
                            timestamp=self._default_datetime,
                            respwin=np.array([], dtype=np.int32),
                            dama_index=-1, trig2=trig2, side='')
-        if self._lazy:
-            train.lazy_shape = timesshape
         return train
 
     # -------------------------------------------------------------------------
@@ -925,7 +912,7 @@ class BrainwareSrcIO(BaseIO):
 
         # int32 -- SpikeTrain length in ms
         spiketrainlen = pq.Quantity(np.fromfile(self._fsrc, dtype=np.int32,
-                                    count=1)[0], units=pq.ms, copy=False)
+                                                count=1)[0], units=pq.ms, copy=False)
 
         segments = []
         for train in trains:
@@ -981,7 +968,8 @@ class BrainwareSrcIO(BaseIO):
 
         # create a channel_index for the numchannels
         self._chx.index = np.arange(numchannels)
-        self._chx.channel_names = np.array(['Chan{}'.format(i) for i in range(numchannels)], dtype='S')
+        self._chx.channel_names = np.array(['Chan{}'.format(i)
+                                            for i in range(numchannels)], dtype='S')
 
         # store what side of the head we are dealing with
         for segment in segments:
@@ -1572,9 +1560,10 @@ if __name__ == '__main__':
                                        download_test_file,
                                        get_test_file_full_path,
                                        make_all_directories)
+
     shortname = BrainwareSrcIO.__name__.lower().strip('io')
     local_test_dir = create_local_temp_dir(shortname)
-    url = url_for_tests+shortname
+    url = url_for_tests + shortname
     FILES_TO_TEST.remove('long_170s_1rep_1clust_ch2.src')
     make_all_directories(FILES_TO_TEST, local_test_dir)
     download_test_file(FILES_TO_TEST, local_test_dir, url)
@@ -1583,4 +1572,3 @@ if __name__ == '__main__':
                                         directory=local_test_dir):
         ioobj = BrainwareSrcIO(path)
         ioobj.read_all_blocks(lazy=False)
-        ioobj.read_all_blocks(lazy=True)

+ 12 - 371
code/python-neo/neo/io/elanio.py

@@ -1,381 +1,22 @@
 # -*- coding: utf-8 -*-
-"""
-Class for reading/writing data from Elan.
 
-Elan is software for studying time-frequency maps of EEG data.
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.elanrawio import ElanRawIO
 
-Elan is developed in Lyon, France, at INSERM U821
 
-An Elan dataset is separated into 3 files :
- - .eeg          raw data file
- - .eeg.ent      hearder file
- - .eeg.pos      event file
-
-
-Depend on:
-
-Supported : Read and Write
-
-Author: sgarcia
-
-"""
-
-import datetime
-import os
-import re
-
-import numpy as np
-import quantities as pq
-
-from neo.io.baseio import BaseIO
-from neo.core import Segment, AnalogSignal, Event
-
-
-class VersionError(Exception):
-    def __init__(self, value):
-        self.value = value
-
-    def __str__(self):
-        return repr(self.value)
-
-import io
-
-class ElanIO(BaseIO):
+class ElanIO(ElanRawIO, BaseFromRaw):
     """
-    Classe for reading/writing data from Elan.
-
-    Usage:
-        >>> from neo import io
-        >>> r = io.ElanIO(filename='File_elan_1.eeg')
-        >>> seg = r.read_segment(lazy = False, cascade = True,)
-        >>> print seg.analogsignals # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-        [<AnalogSignal(array([ 89.21203613,  88.83666992,  87.21008301, ...,
-            64.56298828, 67.94128418,  68.44177246], dtype=float32) * pA,
-            [0.0 s, 101.5808 s], sampling rate: 10000.0 Hz)>]
-        >>> print seg.spiketrains   # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-        []
-        >>> print seg.events   # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-        []
+    Class for reading data from Elan.
 
+    Elan is software for studying time-frequency maps of EEG data.
 
+    Elan is developed in Lyon, France, at INSERM U821
 
+    https://elan.lyon.inserm.fr
     """
+    _prefered_signal_group_mode = 'split-all'
+    # _prefered_signal_group_mode = 'group-by-same-units'
 
-    is_readable = True
-    is_writable = False
-
-    supported_objects = [Segment, AnalogSignal, Event]
-    readable_objects = [Segment]
-    writeable_objects = []
-
-    has_header = False
-    is_streameable = False
-
-    read_params = {Segment: []}
-    write_params = {Segment: []}
-
-    name = None
-    extensions = ['eeg']
-
-    mode = 'file'
-
-    def __init__(self, filename=None):
-        """
-        This class read/write a elan based file.
-
-        **Arguments**
-            filename : the filename to read or write
-        """
-        BaseIO.__init__(self)
-        self.filename = filename
-
-    def read_segment(self, lazy=False, cascade=True):
-
-        # # Read header file
-
-        f = io.open(self.filename + '.ent', mode='rt', encoding='ascii')
-        #version
-        version = f.readline()
-        if version[:2] != 'V2' and version[:2] != 'V3':
-            # raise('read only V2 .eeg.ent files')
-            raise VersionError('Read only V2 or V3 .eeg.ent files. %s given' %
-                               version[:2])
-
-        #info
-        info1 = f.readline()[:-1]
-        info2 = f.readline()[:-1]
-
-        # strange 2 line for datetime
-        #line1
-        l = f.readline()
-        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
-        r2 = re.findall('(\d+):(\d+):(\d+)', l)
-        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
-        YY, MM, DD, hh, mm, ss = (None, ) * 6
-        if len(r1) != 0:
-            DD, MM, YY, hh, mm, ss = r1[0]
-        elif len(r2) != 0:
-            hh, mm, ss = r2[0]
-        elif len(r3) != 0:
-            DD, MM, YY = r3[0]
-
-        #line2
-        l = f.readline()
-        r1 = re.findall('(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
-        r2 = re.findall('(\d+):(\d+):(\d+)', l)
-        r3 = re.findall('(\d+)-(\d+)-(\d+)', l)
-        if len(r1) != 0:
-            DD, MM, YY, hh, mm, ss = r1[0]
-        elif len(r2) != 0:
-            hh, mm, ss = r2[0]
-        elif len(r3) != 0:
-            DD, MM, YY = r3[0]
-        try:
-            fulldatetime = datetime.datetime(int(YY), int(MM), int(DD),
-                                             int(hh), int(mm), int(ss))
-        except:
-            fulldatetime = None
-
-        seg = Segment(file_origin=os.path.basename(self.filename),
-                      elan_version=version,
-                      info1=info1,
-                      info2=info2,
-                      rec_datetime=fulldatetime)
-
-        if not cascade:
-            f.close()
-            return seg
-
-        l = f.readline()
-        l = f.readline()
-        l = f.readline()
-
-        # sampling rate sample
-        l = f.readline()
-        sampling_rate = 1. / float(l) * pq.Hz
-
-        # nb channel
-        l = f.readline()
-        nbchannel = int(l) - 2
-
-        #channel label
-        labels = []
-        for c in range(nbchannel + 2):
-            labels.append(f.readline()[:-1])
-
-        # channel type
-        types = []
-        for c in range(nbchannel + 2):
-            types.append(f.readline()[:-1])
-
-        # channel unit
-        units = []
-        for c in range(nbchannel + 2):
-            units.append(f.readline()[:-1])
-        #print units
-
-        #range
-        min_physic = []
-        for c in range(nbchannel + 2):
-            min_physic.append(float(f.readline()))
-        max_physic = []
-        for c in range(nbchannel + 2):
-            max_physic.append(float(f.readline()))
-        min_logic = []
-        for c in range(nbchannel + 2):
-            min_logic.append(float(f.readline()))
-        max_logic = []
-        for c in range(nbchannel + 2):
-            max_logic.append(float(f.readline()))
-
-        #info filter
-        info_filter = []
-        for c in range(nbchannel + 2):
-            info_filter.append(f.readline()[:-1])
-
-        f.close()
-
-        #raw data
-        n = int(round(np.log(max_logic[0] - min_logic[0]) / np.log(2)) / 8)
-        data = np.fromfile(self.filename, dtype='i' + str(n))
-        data = data.byteswap().reshape(
-            (data.size // (nbchannel + 2), nbchannel + 2)).astype('float32')
-        for c in range(nbchannel):
-            if lazy:
-                sig = []
-            else:
-                sig = (data[:, c] - min_logic[c]) / (
-                    max_logic[c] - min_logic[c]) * \
-                    (max_physic[c] - min_physic[c]) + min_physic[c]
-
-            try:
-                unit = pq.Quantity(1, units[c])
-            except:
-                unit = pq.Quantity(1, '')
-
-            ana_sig = AnalogSignal(
-                sig * unit, sampling_rate=sampling_rate,
-                t_start=0. * pq.s, name=str(labels[c]), channel_index=c)
-            if lazy:
-                ana_sig.lazy_shape = data.shape[0]
-            ana_sig.annotate(channel_name=labels[c])
-            seg.analogsignals.append(ana_sig)
-
-        # triggers
-        f = open(self.filename + '.pos')
-        times = []
-        labels = []
-        reject_codes = []
-        for l in f.readlines():
-            r = re.findall(' *(\d+) *(\d+) *(\d+) *', l)
-            times.append(float(r[0][0]) / sampling_rate.magnitude)
-            labels.append(str(r[0][1]))
-            reject_codes.append(str(r[0][2]))
-        if lazy:
-            times = [] * pq.S
-            labels = np.array([], dtype='S')
-            reject_codes = []
-        else:
-            times = np.array(times) * pq.s
-            labels = np.array(labels, dtype='S')
-            reject_codes = np.array(reject_codes)
-        ea = Event(times=times, labels=labels, reject_codes=reject_codes)
-        if lazy:
-            ea.lazy_shape = len(times)
-        seg.events.append(ea)
-
-        f.close()
-
-        seg.create_many_to_one_relationship()
-        return seg
-
-
-        #~ def write_segment(self, segment, ):
-        #~ """
-
-        #~ Arguments:
-        #~ segment : the segment to write. Only analog signals and events
-        #~ will be written.
-        #~ """
-        #~ assert self.filename.endswith('.eeg')
-        #~ fid_ent = open(self.filename+'.ent' ,'wt')
-        #~ fid_eeg = open(self.filename ,'wt')
-        #~ fid_pos = open(self.filename+'.pos' ,'wt')
-
-        #~ seg = segment
-        #~ sampling_rate = seg._analogsignals[0].sampling_rate
-        #~ N = len(seg._analogsignals)
-
-        #~ #
-        #~ # header file
-        #~ #
-        #~ fid_ent.write('V2\n')
-        #~ fid_ent.write('OpenElectrophyImport\n')
-        #~ fid_ent.write('ELAN\n')
-        #~ t =  datetime.datetime.now()
-        #~ fid_ent.write(t.strftime('%d-%m-%Y %H:%M:%S')+'\n')
-        #~ fid_ent.write(t.strftime('%d-%m-%Y %H:%M:%S')+'\n')
-        #~ fid_ent.write('-1\n')
-        #~ fid_ent.write('reserved\n')
-        #~ fid_ent.write('-1\n')
-        #~ fid_ent.write('%g\n' %  (1./sampling_rate))
-
-        #~ fid_ent.write( '%d\n' % (N+2) )
-
-        #~ # channel label
-        #~ for i, anaSig in enumerate(seg.analogsignals) :
-        #~ try :
-        #~ fid_ent.write('%s.%d\n' % (anaSig.label, i+1 ))
-        #~ except :
-        #~ fid_ent.write('%s.%d\n' % ('nolabel', i+1 ))
-        #~ fid_ent.write('Num1\n')
-        #~ fid_ent.write('Num2\n')
-
-        #~ #channel type
-        #~ for i, anaSig in enumerate(seg.analogsignals) :
-        #~ fid_ent.write('Electrode\n')
-        #~ fid_ent.write( 'dateur echantillon\n')
-        #~ fid_ent.write( 'type evenement et byte info\n')
-
-        #~ #units
-        #~ for i, anaSig in enumerate(seg._analogsignals) :
-        #~ unit_txt = str(anaSig.units).split(' ')[1]
-        #~ fid_ent.write('%s\n' % unit_txt)
-        #~ fid_ent.write('sans\n')
-        #~ fid_ent.write('sans\n')
-
-        #~ #range and data
-        #~ list_range = []
-        #~ data = np.zeros( (seg._analogsignals[0].size , N+2)  , 'i2')
-        #~ for i, anaSig in enumerate(seg._analogsignals) :
-        #~ # in elan file unit is supposed to be in microV to have a big range
-        #~ # so auto translate
-        #~ if anaSig.units == pq.V or anaSig.units == pq.mV:
-        #~ s = anaSig.rescale('uV').magnitude
-        #~ elif anaSig.units == pq.uV:
-        #~ s = anaSig.magnitude
-        #~ else:
-        #~ # automatic range in arbitrry unit
-        #~ s = anaSig.magnitude
-        #~ s*= 10**(int(np.log10(abs(s).max()))+1)
-
-        #~ list_range.append( int(abs(s).max()) +1 )
-
-        #~ s2 = s*65535/(2*list_range[i])
-        #~ data[:,i] = s2.astype('i2')
-
-        #~ for r in list_range :
-        #~ fid_ent.write('-%.0f\n'% r)
-        #~ fid_ent.write('-1\n')
-        #~ fid_ent.write('-1\n')
-        #~ for r in list_range :
-        #~ fid_ent.write('%.0f\n'% r)
-        #~ fid_ent.write('+1\n')
-        #~ fid_ent.write('+1\n')
-
-        #~ for i in range(N+2) :
-        #~ fid_ent.write('-32768\n')
-        #~ for i in range(N+2) :
-        #~ fid_ent.write('+32767\n')
-
-        #~ #info filter
-        #~ for i in range(N+2) :
-        #~ fid_ent.write('passe-haut ? Hz passe-bas ? Hz\n')
-        #~ fid_ent.write('sans\n')
-        #~ fid_ent.write('sans\n')
-
-        #~ for i in range(N+2) :
-        #~ fid_ent.write('1\n')
-
-        #~ for i in range(N+2) :
-        #~ fid_ent.write('reserved\n')
-
-        #~ # raw file .eeg
-        #~ if len(seg._eventarrays) == 1:
-        #~ ea = seg._eventarrays[0]
-        #~ trigs = (ea.times*sampling_rate).magnitude
-        #~ trigs = trigs.astype('i')
-        #~ trigs2 = trigs[ (trigs>0) & (trigs<data.shape[0]) ]
-        #~ data[trigs2,-1] = 1
-        #~ fid_eeg.write(data.byteswap().tostring())
-
-
-        #~ # pos file  eeg.pos
-        #~ if len(seg._eventarrays) == 1:
-        #~ ea = seg._eventarray[0]
-        #~ if 'reject_codes' in ea.annotations and \
-        #~     len(ea.reject_codes) == len(ea.times):
-        #~ rcs = ea.reject_codes
-        #~ else:
-        #~ rcs = np.array(  [ '' ]*ea.times.size)
-        #~ if len(ea.labels) == len(ea.times):
-        #~ labels = ea.labels
-        #~ else:
-        #~ labels = np.array(  [ '' ]*ea.times.size)
-
-        #~ for t, label, rc in zip(ea.times, labels, rcs):
-        #~ fid_pos.write('%d    %s    %s\n' % (trigs[i] , ev.label,0))
-
-        #~ fid_ent.close()
-        #~ fid_eeg.close()
-        #~ fid_pos.close()
+    def __init__(self, filename):
+        ElanRawIO.__init__(self, filename=filename)
+        BaseFromRaw.__init__(self, filename)

File diff suppressed because it is too large
+ 1364 - 1328
code/python-neo/neo/io/elphyio.py


+ 18 - 314
code/python-neo/neo/io/exampleio.py

@@ -1,327 +1,31 @@
 # -*- coding: utf-8 -*-
 """
-Class for "reading" fake data from an imaginary file.
+neo.io have been split in 2 level API:
+  * neo.io: this API give neo object
+  * neo.rawio: this API give raw data as they are in files.
 
-For the user, it generates a :class:`Segment` or a :class:`Block` with a
-sinusoidal :class:`AnalogSignal`, a :class:`SpikeTrain` and an
-:class:`Event`.
+Developper are encourage to use neo.rawio.
 
-For a developer, it is just an example showing guidelines for someone who wants
-to develop a new IO module.
-
-Depends on: scipy
-
-Supported: Read
+When this is done the neo.io is done automagically with
+this king of following code.
 
 Author: sgarcia
 
 """
 
-# needed for python 3 compatibility
-from __future__ import absolute_import
-
-# note neo.core needs only numpy and quantities
-import numpy as np
-import quantities as pq
-
-# but my specific IO can depend on many other packages
-try:
-    from scipy import stats
-except ImportError as err:
-    HAVE_SCIPY = False
-    SCIPY_ERR = err
-else:
-    HAVE_SCIPY = True
-    SCIPY_ERR = None
-
-# I need to subclass BaseIO
-from neo.io.baseio import BaseIO
-
-# to import from core
-from neo.core import Segment, AnalogSignal, SpikeTrain, Event
-
-
-# I need to subclass BaseIO
-class ExampleIO(BaseIO):
-    """
-    Class for "reading" fake data from an imaginary file.
-
-    For the user, it generates a :class:`Segment` or a :class:`Block` with a
-    sinusoidal :class:`AnalogSignal`, a :class:`SpikeTrain` and an
-    :class:`Event`.
-
-    For a developer, it is just an example showing guidelines for someone who wants
-    to develop a new IO module.
-
-    Two rules for developers:
-      * Respect the Neo IO API (:ref:`neo_io_API`)
-      * Follow :ref:`io_guiline`
-
-    Usage:
-        >>> from neo import io
-        >>> r = io.ExampleIO(filename='itisafake.nof')
-        >>> seg = r.read_segment(lazy=False, cascade=True)
-        >>> print(seg.analogsignals)  # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-        [<AnalogSignal(array([ 0.19151945,  0.62399373,  0.44149764, ...,  0.96678374,
-        ...
-        >>> print(seg.spiketrains)    # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-         [<SpikeTrain(array([ -0.83799524,   6.24017951,   7.76366686,   4.45573701,
-            12.60644415,  10.68328994,   8.07765735,   4.89967804,
-        ...
-        >>> print(seg.events)    # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-        [<Event: TriggerB@9.6976 s, TriggerA@10.2612 s, TriggerB@2.2777 s, TriggerA@6.8607 s, ...
-        >>> anasig = r.read_analogsignal(lazy=True, cascade=False)
-        >>> print(anasig._data_description)
-        {'shape': (150000,)}
-        >>> anasig = r.read_analogsignal(lazy=False, cascade=False)
-
-    """
-
-    is_readable = True  # This class can only read data
-    is_writable = False  # write is not supported
-
-    # This class is able to directly or indirectly handle the following objects
-    # You can notice that this greatly simplifies the full Neo object hierarchy
-    supported_objects  = [ Segment , AnalogSignal, SpikeTrain, Event ]
-
-    # This class can return either a Block or a Segment
-    # The first one is the default ( self.read )
-    # These lists should go from highest object to lowest object because
-    # common_io_test assumes it.
-    readable_objects    = [ Segment , AnalogSignal, SpikeTrain ]
-    # This class is not able to write objects
-    writeable_objects   = [ ]
-
-    has_header         = False
-    is_streameable     = False
-
-    # This is for GUI stuff : a definition for parameters when reading.
-    # This dict should be keyed by object (`Block`). Each entry is a list
-    # of tuple. The first entry in each tuple is the parameter name. The
-    # second entry is a dict with keys 'value' (for default value),
-    # and 'label' (for a descriptive name).
-    # Note that if the highest-level object requires parameters,
-    # common_io_test will be skipped.
-    read_params = {
-        Segment : [
-            ('segment_duration',
-                {'value' : 15., 'label' : 'Segment size (s.)'}),
-            ('num_analogsignal',
-                {'value' : 8, 'label' : 'Number of recording points'}),
-            ('num_spiketrain_by_channel',
-                {'value' : 3, 'label' : 'Num of spiketrains'}),
-            ],
-        }
-
-    # do not supported write so no GUI stuff
-    write_params       = None
-
-    name               = 'example'
-
-    extensions          = [ 'nof' ]
-
-    # mode can be 'file' or 'dir' or 'fake' or 'database'
-    # the main case is 'file' but some reader are base on a directory or a database
-    # this info is for GUI stuff also
-    mode = 'fake'
-
-
-
-    def __init__(self , filename = None) :
-        """
-
-
-        Arguments:
-            filename : the filename
-
-        Note:
-            - filename is here just for exampe because it will not be take in account
-            - if mode=='dir' the argument should be dirname (See TdtIO)
-
-        """
-        BaseIO.__init__(self)
-        self.filename = filename
-        # Seed so all instances can return the same values
-        np.random.seed(1234)
-
-
-    # Segment reading is supported so I define this :
-    def read_segment(self,
-                     # the 2 first keyword arguments are imposed by neo.io API
-                     lazy = False,
-                     cascade = True,
-                     # all following arguments are decied by this IO and are free
-                     segment_duration = 15.,
-                     num_analogsignal = 4,
-                     num_spiketrain_by_channel = 3,
-                    ):
-        """
-        Return a fake Segment.
-
-        The self.filename does not matter.
-
-        In this IO read by default a Segment.
-
-        This is just a example to be adapted to each ClassIO.
-        In this case these 3 paramters are  taken in account because this function
-        return a generated segment with fake AnalogSignal and fake SpikeTrain.
-
-        Parameters:
-            segment_duration :is the size in secend of the segment.
-            num_analogsignal : number of AnalogSignal in this segment
-            num_spiketrain : number of SpikeTrain in this segment
-
-        """
-
-        sampling_rate = 10000. #Hz
-        t_start = -1.
-
-
-        #time vector for generated signal
-        timevect = np.arange(t_start, t_start+ segment_duration , 1./sampling_rate)
-
-        # create an empty segment
-        seg = Segment( name = 'it is a seg from exampleio')
-
-        if cascade:
-            # read nested analosignal
-            for i in range(num_analogsignal):
-                ana = self.read_analogsignal( lazy = lazy , cascade = cascade ,
-                                            channel_index = i ,segment_duration = segment_duration, t_start = t_start)
-                seg.analogsignals += [ ana ]
-
-            # read nested spiketrain
-            for i in range(num_analogsignal):
-                for _ in range(num_spiketrain_by_channel):
-                    sptr = self.read_spiketrain(lazy = lazy , cascade = cascade ,
-                                                            segment_duration = segment_duration, t_start = t_start , channel_index = i)
-                    seg.spiketrains += [ sptr ]
-
-
-            # create an Event that mimic triggers.
-            # note that ExampleIO  do not allow to acess directly to Event
-            # for that you need read_segment(cascade = True)
-
-            if lazy:
-                # in lazy case no data are readed
-                # eva is empty
-                eva = Event()
-            else:
-                # otherwise it really contain data
-                n = 1000
-
-                # neo.io support quantities my vector use second for unit
-                eva = Event(timevect[(np.random.rand(n)*timevect.size).astype('i')]* pq.s)
-                # all duration are the same
-                eva.durations = np.ones(n)*500*pq.ms  # Event doesn't have durations. Is Epoch intended here?
-                # label
-                l = [ ]
-                for i in range(n):
-                    if np.random.rand()>.6: l.append( 'TriggerA' )
-                    else : l.append( 'TriggerB' )
-                eva.labels = np.array( l )
-
-            seg.events += [ eva ]
-
-        seg.create_many_to_one_relationship()
-        return seg
-
-
-    def read_analogsignal(self ,
-                          # the 2 first key arguments are imposed by neo.io API
-                          lazy = False,
-                          cascade = True,
-                          channel_index = 0,
-                          segment_duration = 15.,
-                          t_start = -1,
-                          ):
-        """
-        With this IO AnalogSignal can e acces directly with its channel number
-
-        """
-        sr = 10000.
-        sinus_freq = 3. # Hz
-        #time vector for generated signal:
-        tvect = np.arange(t_start, t_start+ segment_duration , 1./sr)
-
-
-        if lazy:
-            anasig = AnalogSignal([], units='V', sampling_rate=sr * pq.Hz,
-                                  t_start=t_start * pq.s,
-                                  channel_index=channel_index)
-            # we add the attribute lazy_shape with the size if loaded
-            anasig.lazy_shape = tvect.shape
-        else:
-            # create analogsignal (sinus of 3 Hz)
-            sig = np.sin(2*np.pi*tvect*sinus_freq + channel_index/5.*2*np.pi)+np.random.rand(tvect.size)
-            anasig = AnalogSignal(sig, units= 'V', sampling_rate=sr * pq.Hz,
-                                  t_start=t_start * pq.s,
-                                  channel_index=channel_index)
-
-        # for attributes out of neo you can annotate
-        anasig.annotate(info = 'it is a sinus of %f Hz' %sinus_freq )
-
-        return anasig
-
-
-
-
-
-    def read_spiketrain(self ,
-                                            # the 2 first key arguments are imposed by neo.io API
-                                            lazy = False,
-                                            cascade = True,
-
-                                                segment_duration = 15.,
-                                                t_start = -1,
-                                                channel_index = 0,
-                                                ):
-        """
-        With this IO SpikeTrain can e acces directly with its channel number
-        """
-        # There are 2 possibles behaviour for a SpikeTrain
-        # holding many Spike instance or directly holding spike times
-        # we choose here the first :
-        if not HAVE_SCIPY:
-            raise SCIPY_ERR
-
-        num_spike_by_spiketrain = 40
-        sr = 10000.
-
-        if lazy:
-            times = [ ]
-        else:
-            times = (np.random.rand(num_spike_by_spiketrain)*segment_duration +
-                     t_start)
-
-        # create a spiketrain
-        spiketr = SpikeTrain(times, t_start = t_start*pq.s, t_stop = (t_start+segment_duration)*pq.s ,
-                                            units = pq.s,
-                                            name = 'it is a spiketrain from exampleio',
-                                            )
-
-        if lazy:
-            # we add the attribute lazy_shape with the size if loaded
-            spiketr.lazy_shape = (num_spike_by_spiketrain,)
-
-        # ours spiketrains also hold the waveforms:
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.examplerawio import ExampleRawIO
 
-        # 1 generate a fake spike shape (2d array if trodness >1)
-        w1 = -stats.nct.pdf(np.arange(11,60,4), 5,20)[::-1]/3.
-        w2 = stats.nct.pdf(np.arange(11,60,2), 5,20)
-        w = np.r_[ w1 , w2 ]
-        w = -w/max(w)
 
-        if not lazy:
-            # in the neo API the waveforms attr is 3 D in case tetrode
-            # in our case it is mono electrode so dim 1 is size 1
-            waveforms  = np.tile( w[np.newaxis,np.newaxis,:], ( num_spike_by_spiketrain ,1, 1) )
-            waveforms *=  np.random.randn(*waveforms.shape)/6+1
-            spiketr.waveforms = waveforms*pq.mV
-            spiketr.sampling_rate = sr * pq.Hz
-            spiketr.left_sweep = 1.5* pq.s
+class ExampleIO(ExampleRawIO, BaseFromRaw):
+    name = 'example IO'
+    description = "Fake IO"
 
-        # for attributes out of neo you can annotate
-        spiketr.annotate(channel_index = channel_index)
+    # This is an inportant choice when there are several channels.
+    #   'split-all' :  1 AnalogSignal each 1 channel
+    #   'group-by-same-units' : one 2D AnalogSignal for each group of channel with same units
+    _prefered_signal_group_mode = 'group-by-same-units'
 
-        return spiketr
+    def __init__(self, filename=''):
+        ExampleRawIO.__init__(self, filename=filename)
+        BaseFromRaw.__init__(self, filename)

+ 51 - 65
code/python-neo/neo/io/hdf5io.py

@@ -11,6 +11,7 @@ import logging
 import pickle
 import numpy as np
 import quantities as pq
+
 try:
     import h5py
 except ImportError as err:
@@ -60,9 +61,8 @@ class NeoHdf5IO(BaseIO):
         BaseIO.__init__(self, filename=filename)
         self._data = h5py.File(filename, 'r')
         self.object_refs = {}
-        self._lazy = False
 
-    def read_all_blocks(self, lazy=False, cascade=True, merge_singles=True, **kargs):
+    def read_all_blocks(self, lazy=False, merge_singles=True, **kargs):
         """
         Loads all blocks in the file that are attached to the root (which
         happens when they are saved with save() or write_block()).
@@ -71,8 +71,8 @@ class NeoHdf5IO(BaseIO):
          `AnalogSignal` objects into multichannel objects, and similarly for single `Epoch`,
          `Event` and `IrregularlySampledSignal` objects.
         """
-        self._lazy = lazy
-        self._cascade = cascade
+        assert not lazy, 'Do not support lazy'
+
         self.merge_singles = merge_singles
 
         blocks = []
@@ -81,11 +81,12 @@ class NeoHdf5IO(BaseIO):
                 blocks.append(self._read_block(node))
         return blocks
 
-    def read_block(self, lazy=False, cascade=True, **kargs):
+    def read_block(self, lazy=False, **kargs):
         """
         Load the first block in the file.
         """
-        return self.read_all_blocks(lazy=lazy, cascade=cascade)[0]
+        assert not lazy, 'Do not support lazy'
+        return self.read_all_blocks(lazy=lazy)[0]
 
     def _read_block(self, node):
         attributes = self._get_standard_attributes(node)
@@ -93,28 +94,29 @@ class NeoHdf5IO(BaseIO):
             attributes["index"] = int(attributes["index"])
         block = Block(**attributes)
 
-        if self._cascade:
-            for name, child_node in node['segments'].items():
-                if "Segment" in name:
-                    block.segments.append(self._read_segment(child_node, parent=block))
-
-            if len(node['recordingchannelgroups']) > 0:
-                for name, child_node in node['recordingchannelgroups'].items():
-                    if "RecordingChannelGroup" in name:
-                        block.channel_indexes.append(self._read_recordingchannelgroup(child_node, parent=block))
-                self._resolve_channel_indexes(block)
-            elif self.merge_singles:
-                # if no RecordingChannelGroups are defined, merging
-                # takes place here.
-                for segment in block.segments:
-                    if hasattr(segment, 'unmerged_analogsignals'):
-                        segment.analogsignals.extend(
-                                self._merge_data_objects(segment.unmerged_analogsignals))
-                        del segment.unmerged_analogsignals
-                    if hasattr(segment, 'unmerged_irregularlysampledsignals'):
-                        segment.irregularlysampledsignals.extend(
-                                self._merge_data_objects(segment.unmerged_irregularlysampledsignals))
-                        del segment.unmerged_irregularlysampledsignals
+        for name, child_node in node['segments'].items():
+            if "Segment" in name:
+                block.segments.append(self._read_segment(child_node, parent=block))
+
+        if len(node['recordingchannelgroups']) > 0:
+            for name, child_node in node['recordingchannelgroups'].items():
+                if "RecordingChannelGroup" in name:
+                    block.channel_indexes.append(
+                        self._read_recordingchannelgroup(child_node, parent=block))
+            self._resolve_channel_indexes(block)
+        elif self.merge_singles:
+            # if no RecordingChannelGroups are defined, merging
+            # takes place here.
+            for segment in block.segments:
+                if hasattr(segment, 'unmerged_analogsignals'):
+                    segment.analogsignals.extend(
+                        self._merge_data_objects(segment.unmerged_analogsignals))
+                    del segment.unmerged_analogsignals
+                if hasattr(segment, 'unmerged_irregularlysampledsignals'):
+                    segment.irregularlysampledsignals.extend(
+                        self._merge_data_objects(segment.unmerged_irregularlysampledsignals))
+                    del segment.unmerged_irregularlysampledsignals
+
         return block
 
     def _read_segment(self, node, parent):
@@ -183,10 +185,6 @@ class NeoHdf5IO(BaseIO):
         signal = AnalogSignal(self._get_quantity(node["signal"]),
                               sampling_rate=sampling_rate, t_start=t_start,
                               **attributes)
-        if self._lazy:
-            signal.lazy_shape = node["signal"].shape
-            if len(signal.lazy_shape) == 1:
-                signal.lazy_shape = (signal.lazy_shape[0], 1)
         signal.segment = parent
         self.object_refs[node.attrs["object_ref"]] = signal
         return signal
@@ -200,10 +198,6 @@ class NeoHdf5IO(BaseIO):
                                           signal=self._get_quantity(node["signal"]),
                                           **attributes)
         signal.segment = parent
-        if self._lazy:
-            signal.lazy_shape = node["signal"].shape
-            if len(signal.lazy_shape) == 1:
-                signal.lazy_shape = (signal.lazy_shape[0], 1)
         return signal
 
     def _read_spiketrain(self, node, parent):
@@ -215,8 +209,6 @@ class NeoHdf5IO(BaseIO):
                                 t_start=t_start, t_stop=t_stop,
                                 **attributes)
         spiketrain.segment = parent
-        if self._lazy:
-            spiketrain.lazy_shape = node["times"].shape
         self.object_refs[node.attrs["object_ref"]] = spiketrain
         return spiketrain
 
@@ -224,14 +216,9 @@ class NeoHdf5IO(BaseIO):
         attributes = self._get_standard_attributes(node)
         times = self._get_quantity(node["times"])
         durations = self._get_quantity(node["durations"])
-        if self._lazy:
-            labels = np.array((), dtype=node["labels"].dtype)
-        else:
-            labels = node["labels"].value
+        labels = node["labels"].value
         epoch = Epoch(times=times, durations=durations, labels=labels, **attributes)
         epoch.segment = parent
-        if self._lazy:
-            epoch.lazy_shape = node["times"].shape
         return epoch
 
     def _read_epoch(self, node, parent):
@@ -240,14 +227,9 @@ class NeoHdf5IO(BaseIO):
     def _read_eventarray(self, node, parent):
         attributes = self._get_standard_attributes(node)
         times = self._get_quantity(node["times"])
-        if self._lazy:
-            labels = np.array((), dtype=node["labels"].dtype)
-        else:
-            labels = node["labels"].value
+        labels = node["labels"].value
         event = Event(times=times, labels=labels, **attributes)
         event.segment = parent
-        if self._lazy:
-            event.lazy_shape = node["times"].shape
         return event
 
     def _read_event(self, node, parent):
@@ -260,7 +242,7 @@ class NeoHdf5IO(BaseIO):
         channel_names = node["channel_names"].value
 
         if channel_indexes.size:
-            if len(node['recordingchannels']) :
+            if len(node['recordingchannels']):
                 raise MergeError("Cannot handle a RecordingChannelGroup which both has a "
                                  "'channel_indexes' attribute and contains "
                                  "RecordingChannel objects")
@@ -317,7 +299,9 @@ class NeoHdf5IO(BaseIO):
                 try:
                     combined_obj_ref = merged_objects[-1].annotations['object_ref']
                     merged_objects[-1] = merged_objects[-1].merge(obj)
-                    merged_objects[-1].annotations['object_ref'] = combined_obj_ref + "-" + obj.annotations['object_ref']
+                    merged_objects[-1].annotations['object_ref'] = combined_obj_ref + \
+                                                                   "-" + obj.annotations[
+                                                                       'object_ref']
                 except MergeError:
                     merged_objects.append(obj)
             for obj in merged_objects:
@@ -327,10 +311,7 @@ class NeoHdf5IO(BaseIO):
             return objects
 
     def _get_quantity(self, node):
-        if self._lazy and len(node.shape) > 0:
-            value = np.array((), dtype=node.dtype)
-        else:
-            value = node.value
+        value = node.value
         unit_str = [x for x in node.attrs.keys() if "unit" in x][0].split("__")[1]
         units = getattr(pq, unit_str)
         return value * units
@@ -352,7 +333,8 @@ class NeoHdf5IO(BaseIO):
         else:
             annotations = pickle.loads(node.attrs['annotations'])
         attributes.update(annotations)
-        attribute_names = list(attributes.keys())  # avoid "dictionary changed size during iteration" error
+        # avoid "dictionary changed size during iteration" error
+        attribute_names = list(attributes.keys())
         if sys.version_info.major > 2:
             for name in attribute_names:
                 if isinstance(attributes[name], (bytes, np.bytes_)):
@@ -367,8 +349,9 @@ class NeoHdf5IO(BaseIO):
         def disjoint_channel_indexes(channel_indexes):
             channel_indexes = channel_indexes[:]
             for ci1 in channel_indexes:
-                signal_group1 = set(tuple(x[1]) for x in ci1._channels)  # this works only on analogsignals
-                for ci2 in channel_indexes:                              # need to take irregularly sampled signals
+                # this works only on analogsignals
+                signal_group1 = set(tuple(x[1]) for x in ci1._channels)
+                for ci2 in channel_indexes:  # need to take irregularly sampled signals
                     signal_group2 = set(tuple(x[1]) for x in ci2._channels)  # into account too
                     if signal_group1 != signal_group2:
                         if signal_group2.issubset(signal_group1):
@@ -382,7 +365,8 @@ class NeoHdf5IO(BaseIO):
             ids = []
             by_segment = {}
             for (index, analogsignals, irregsignals) in ci._channels:
-                ids.append(index)  # note that what was called "index" in Neo 0.3/0.4 is "id" in Neo 0.5
+                # note that what was called "index" in Neo 0.3/0.4 is "id" in Neo 0.5
+                ids.append(index)
                 for signal_ref in analogsignals:
                     signal = self.object_refs[signal_ref]
                     segment_id = id(signal.segment)
@@ -416,8 +400,9 @@ class NeoHdf5IO(BaseIO):
                         merged_signals = self._merge_data_objects(segment_data['analogsignals'])
                         assert len(merged_signals) == 1
                         merged_signals[0].channel_index = ci
-                        merged_signals[0].annotations['object_ref'] = "-".join(obj.annotations['object_ref']
-                                                                               for obj in segment_data['analogsignals'])
+                        merged_signals[0].annotations['object_ref'] = "-".join(
+                            obj.annotations['object_ref']
+                            for obj in segment_data['analogsignals'])
                         segment.analogsignals.extend(merged_signals)
                         ci.analogsignals = merged_signals
 
@@ -425,8 +410,9 @@ class NeoHdf5IO(BaseIO):
                         merged_signals = self._merge_data_objects(segment_data['irregsignals'])
                         assert len(merged_signals) == 1
                         merged_signals[0].channel_index = ci
-                        merged_signals[0].annotations['object_ref'] = "-".join(obj.annotations['object_ref']
-                                                                               for obj in segment_data['irregsignals'])
+                        merged_signals[0].annotations['object_ref'] = "-".join(
+                            obj.annotations['object_ref']
+                            for obj in segment_data['irregsignals'])
                         segment.irregularlysampledsignals.extend(merged_signals)
                         ci.irregularlysampledsignals = merged_signals
             else:
@@ -441,4 +427,4 @@ class NeoHdf5IO(BaseIO):
                         break
                 ci.analogsignals = cipr.analogsignals
                 ci.channel_ids = np.array(ids)
-                ci.index = np.where(np.in1d(cipr.channel_ids, ci.channel_ids))[0]
+                ci.index = np.where(np.in1d(cipr.channel_ids, ci.channel_ids))[0]

+ 28 - 27
code/python-neo/neo/io/igorproio.py

@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 """
-Class for reading data created by IGOR Pro 
+Class for reading data created by IGOR Pro
 (WaveMetrics, Inc., Portland, OR, USA)
 
 Depends on: igor (https://pypi.python.org/pypi/igor/)
@@ -18,9 +18,11 @@ import numpy as np
 import quantities as pq
 from neo.io.baseio import BaseIO
 from neo.core import Block, Segment, AnalogSignal
+
 try:
     import igor.binarywave as bw
     import igor.packed as pxp
+
     HAVE_IGOR = True
 except ImportError:
     HAVE_IGOR = False
@@ -28,11 +30,10 @@ except ImportError:
 
 class IgorIO(BaseIO):
     """
-    Class for reading Igor Binary Waves (.ibw) written by WaveMetrics’ 
+    Class for reading Igor Binary Waves (.ibw)
+    or Packed Experiment (.pxp) files written by WaveMetrics’
     IGOR Pro software.
 
-    Support for Packed Experiment (.pxp) files is planned.
-
     It requires the `igor` Python package by W. Trevor King.
 
     Usage:
@@ -43,10 +44,10 @@ class IgorIO(BaseIO):
 
     """
 
-    is_readable = True   # This class can only read data
+    is_readable = True  # This class can only read data
     is_writable = False  # write is not supported
     supported_objects = [Block, Segment, AnalogSignal]
-    readable_objects = [Block, Segment , AnalogSignal]
+    readable_objects = [Block, Segment, AnalogSignal]
     writeable_objects = []
     has_header = False
     is_streameable = False
@@ -54,7 +55,7 @@ class IgorIO(BaseIO):
     extensions = ['ibw', 'pxp']
     mode = 'file'
 
-    def __init__(self, filename=None, parse_notes=None) :
+    def __init__(self, filename=None, parse_notes=None):
         """
 
 
@@ -73,22 +74,26 @@ class IgorIO(BaseIO):
         self.extension = filename.split('.')[-1]
         self.parse_notes = parse_notes
 
-    def read_block(self, lazy=False, cascade=True):
+    def read_block(self, lazy=False):
+        assert not lazy, 'Do not support lazy'
+
         block = Block(file_origin=self.filename)
-        if cascade:
-            block.segments.append(self.read_segment(lazy=lazy, cascade=cascade))
-            block.segments[-1].block = block
+        block.segments.append(self.read_segment(lazy=lazy))
+        block.segments[-1].block = block
         return block
 
-    def read_segment(self, lazy=False, cascade=True):
+    def read_segment(self, lazy=False):
+        assert not lazy, 'Do not support lazy'
+
         segment = Segment(file_origin=self.filename)
-        if cascade:
-            segment.analogsignals.append(
-                self.read_analogsignal(lazy=lazy, cascade=cascade))
-            segment.analogsignals[-1].segment = segment
+        segment.analogsignals.append(
+            self.read_analogsignal(lazy=lazy))
+        segment.analogsignals[-1].segment = segment
         return segment
 
-    def read_analogsignal(self, path=None, lazy=False, cascade=True):
+    def read_analogsignal(self, path=None, lazy=False):
+        assert not lazy, 'Do not support lazy'
+
         if not HAVE_IGOR:
             raise Exception(("`igor` package not installed. "
                              "Try `pip install igor`"))
@@ -101,7 +106,7 @@ class IgorIO(BaseIO):
         elif self.extension == 'pxp':
             assert type(path) is str, \
                 "A colon-separated Igor-style path must be provided."
-            _,filesystem = pxp.load(self.filename)
+            _, filesystem = pxp.load(self.filename)
             path = path.split(':')
             location = filesystem['root']
             for element in path:
@@ -112,14 +117,10 @@ class IgorIO(BaseIO):
         if "padding" in content:
             assert content['padding'].size == 0, \
                 "Cannot handle non-empty padding"
-        if lazy:
-            # not really lazy, since the `igor` module loads the data anyway
-            signal = np.array((), dtype=content['wData'].dtype)
-        else:
-            signal = content['wData']
+        signal = content['wData']
         note = content['note']
         header = content['wave_header']
-        name = header['bname']
+        name = str(header['bname'].decode('utf-8'))
         units = "".join([x.decode() for x in header['dataUnits']])
         try:
             time_units = "".join([x.decode() for x in header['xUnits']])
@@ -146,8 +147,6 @@ class IgorIO(BaseIO):
         signal = AnalogSignal(signal, units=units, copy=False, t_start=t_start,
                               sampling_period=sampling_period, name=name,
                               file_origin=self.filename, **annotations)
-        if lazy:
-            signal.lazy_shape = content['wData'].shape
         return signal
 
 
@@ -162,7 +161,7 @@ def key_value_string_parser(itemsep=";", kvsep=":"):
         kvsep - character which separates the key and value within an item
 
     Returns:
-        a function which takes the string to be parsed as the sole argument 
+        a function which takes the string to be parsed as the sole argument
         and returns a dict.
 
     Example:
@@ -171,7 +170,9 @@ def key_value_string_parser(itemsep=";", kvsep=":"):
         >>> parse("a:2;b:3")
         {'a': 2, 'b': 3}
     """
+
     def parser(s):
         items = s.split(itemsep)
         return dict(item.split(kvsep, 1) for item in items if item)
+
     return parser

+ 52 - 66
code/python-neo/neo/io/klustakwikio.py

@@ -21,6 +21,7 @@ import shutil
 
 # note neo.core need only numpy and quantitie
 import numpy as np
+
 try:
     import matplotlib.mlab as mlab
 except ImportError as err:
@@ -30,7 +31,6 @@ else:
     HAVE_MLAB = True
     MLAB_ERR = None
 
-
 # I need to subclass BaseIO
 from neo.io.baseio import BaseIO
 
@@ -52,10 +52,10 @@ Assuming N1 spikes (spike1...spikeN1), N2 electrodes (e1...eN2) and
 N3 coefficients (c1...cN3), this file looks like:
 
 nbDimensions
-c1_e1_spike1   c2_e1_spike1  ... cN3_e1_spike1   c1_e2_spike1  ... cN3_eN2_spike1   timestamp_spike1
-c1_e1_spike2   c2_e1_spike2  ... cN3_e1_spike2   c1_e2_spike2  ... cN3_eN2_spike2   timestamp_spike2
+c1_e1_spk1   c2_e1_spk1  ... cN3_e1_spk1   c1_e2_spk1  ... cN3_eN2_spk1   timestamp_spk1
+c1_e1_spk2   c2_e1_spk2  ... cN3_e1_spk2   c1_e2_spk2  ... cN3_eN2_spk2   timestamp_spk2
 ...
-c1_e1_spikeN1  c2_e1_spikeN1 ... cN3_e1_spikeN1  c1_e2_spikeN1 ... cN3_eN2_spikeN1  timestamp_spikeN1
+c1_e1_spkN1  c2_e1_spkN1 ... cN3_e1_spkN1  c1_e2_spkN1 ... cN3_eN2_spkN1  timestamp_spkN1
 
 The timestamp is expressed in multiples of the sampling interval. For
 instance, for a 20kHz recording (50 microsecond sampling interval), a
@@ -69,21 +69,21 @@ Notice that the last line must end with a newline or carriage return.
 class KlustaKwikIO(BaseIO):
     """Reading and writing from KlustaKwik-format files."""
     # Class variables demonstrating capabilities of this IO
-    is_readable        = True
-    is_writable        = True
+    is_readable = True
+    is_writable = True
 
     # This IO can only manipulate objects relating to spike times
-    supported_objects  = [Block, SpikeTrain, Unit]
+    supported_objects = [Block, SpikeTrain, Unit]
 
     # Keep things simple by always returning a block
-    readable_objects    = [Block]
+    readable_objects = [Block]
 
     # And write a block
-    writeable_objects   = [Block]
+    writeable_objects = [Block]
 
     # Not sure what these do, if anything
-    has_header         = False
-    is_streameable     = False
+    has_header = False
+    is_streameable = False
 
     # GUI params
     read_params = {}
@@ -92,8 +92,8 @@ class KlustaKwikIO(BaseIO):
     write_params = {}
 
     # The IO name and the file extensions it uses
-    name               = 'KlustaKwik'
-    extensions          = ['fet', 'clu', 'res', 'spk']
+    name = 'KlustaKwik'
+    extensions = ['fet', 'clu', 'res', 'spk']
 
     # Operates on directories
     mode = 'file'
@@ -109,7 +109,7 @@ class KlustaKwikIO(BaseIO):
         if not HAVE_MLAB:
             raise MLAB_ERR
         BaseIO.__init__(self)
-        #self.filename = os.path.normpath(filename)
+        # self.filename = os.path.normpath(filename)
         self.filename, self.basename = os.path.split(os.path.abspath(filename))
         self.sampling_rate = float(sampling_rate)
 
@@ -120,9 +120,7 @@ class KlustaKwikIO(BaseIO):
         # initialize a helper object to parse filenames
         self._fp = FilenameParser(dirname=self.filename, basename=self.basename)
 
-    # The reading methods. The `lazy` and `cascade` parameters are imposed
-    # by neo.io API
-    def read_block(self, lazy=False, cascade=True):
+    def read_block(self, lazy=False):
         """Returns a Block containing spike information.
 
         There is no obvious way to infer the segment boundaries from
@@ -131,13 +129,15 @@ class KlustaKwikIO(BaseIO):
         boundaries, and then change this code to put the spikes in the right
         segments.
         """
+        assert not lazy, 'Do not support lazy'
+
         # Create block and segment to hold all the data
         block = Block()
         # Search data directory for KlustaKwik files.
         # If nothing found, return empty block
         self._fetfiles = self._fp.read_filenames('fet')
         self._clufiles = self._fp.read_filenames('clu')
-        if len(self._fetfiles) == 0 or not cascade:
+        if len(self._fetfiles) == 0:
             return block
 
         # Create a single segment to hold all of the data
@@ -169,27 +169,19 @@ class KlustaKwikIO(BaseIO):
             for unit_id in sorted(unique_unit_ids):
                 # Initialize the unit
                 u = Unit(name=('unit %d from group %d' % (unit_id, group)),
-                    index=unit_id, group=group)
+                         index=unit_id, group=group)
 
                 # Initialize a new SpikeTrain for the spikes from this unit
-                if lazy:
-                    st = SpikeTrain(
-                        times=[],
-                        units='sec', t_start=0.0,
-                        t_stop=spks.max() / self.sampling_rate,
-                        name=('unit %d from group %d' % (unit_id, group)))
-                    st.lazy_shape = len(spks[uids==unit_id])
-                else:
-                    st = SpikeTrain(
-                        times=spks[uids==unit_id] / self.sampling_rate,
-                        units='sec', t_start=0.0,
-                        t_stop=spks.max() / self.sampling_rate,
-                        name=('unit %d from group %d' % (unit_id, group)))
+                st = SpikeTrain(
+                    times=spks[uids == unit_id] / self.sampling_rate,
+                    units='sec', t_start=0.0,
+                    t_stop=spks.max() / self.sampling_rate,
+                    name=('unit %d from group %d' % (unit_id, group)))
                 st.annotations['cluster'] = unit_id
                 st.annotations['group'] = group
 
                 # put features in
-                if not lazy and len(features) != 0:
+                if len(features) != 0:
                     st.annotations['waveform_features'] = features
 
                 # Link
@@ -202,36 +194,32 @@ class KlustaKwikIO(BaseIO):
     # Helper hidden functions for reading
     def _load_spike_times(self, fetfilename):
         """Reads and returns the spike times and features"""
-        f = file(fetfilename, 'r')
-
-        # Number of clustering features is integer on first line
-        nbFeatures = int(f.readline().strip())
+        with open(fetfilename, mode='r') as f:
+            # Number of clustering features is integer on first line
+            nbFeatures = int(f.readline().strip())
 
-        # Each subsequent line consists of nbFeatures values, followed by
-        # the spike time in samples.
-        names = ['fet%d' % n for n in xrange(nbFeatures)]
-        names.append('spike_time')
+            # Each subsequent line consists of nbFeatures values, followed by
+            # the spike time in samples.
+            names = ['fet%d' % n for n in range(nbFeatures)]
+            names.append('spike_time')
 
-        # Load into recarray
-        data = mlab.csv2rec(f, names=names, skiprows=1, delimiter=' ')
-        f.close()
+            # Load into recarray
+            data = mlab.csv2rec(f, names=names, skiprows=1, delimiter=' ')
 
         # get features
-        features = np.array([data['fet%d' % n] for n in xrange(nbFeatures)])
+        features = np.array([data['fet%d' % n] for n in range(nbFeatures)])
 
         # Return the spike_time column
         return data['spike_time'], features.transpose()
 
     def _load_unit_id(self, clufilename):
         """Reads and return the cluster ids as int32"""
-        f = file(clufilename, 'r')
-
-        # Number of clusters on this tetrode is integer on first line
-        nbClusters = int(f.readline().strip())
+        with open(clufilename, mode='r') as f:
+            # Number of clusters on this tetrode is integer on first line
+            nbClusters = int(f.readline().strip())
 
-        # Read each cluster name as a string
-        cluster_names = f.readlines()
-        f.close()
+            # Read each cluster name as a string
+            cluster_names = f.readlines()
 
         # Convert names to integers
         # I think the spec requires cluster names to be integers, but
@@ -251,7 +239,6 @@ class KlustaKwikIO(BaseIO):
 
         return cluster_ids
 
-
     # writing functions
     def write_block(self, block):
         """Write spike times and unit ids to disk.
@@ -333,8 +320,8 @@ class KlustaKwikIO(BaseIO):
                     fetfilehandle.write("%d\n" % n_features)
                 if n_features != all_features.shape[1]:
                     raise ValueError("inconsistent number of features: " +
-                        "supposed to be %d but I got %d" %\
-                        (n_features, all_features.shape[1]))
+                                     "supposed to be %d but I got %d" %
+                                     (n_features, all_features.shape[1]))
 
                 # Write features and time for each spike
                 for stt, features in zip(spike_times_in_samples, all_features):
@@ -392,9 +379,9 @@ class KlustaKwikIO(BaseIO):
     def _new_group(self, id_group, nbClusters):
         # generate filenames
         fetfilename = os.path.join(self.filename,
-            self.basename + ('.fet.%d' % id_group))
+                                   self.basename + ('.fet.%d' % id_group))
         clufilename = os.path.join(self.filename,
-            self.basename + ('.clu.%d' % id_group))
+                                   self.basename + ('.clu.%d' % id_group))
 
         # back up before overwriting
         if os.path.exists(fetfilename):
@@ -403,20 +390,23 @@ class KlustaKwikIO(BaseIO):
             shutil.copyfile(clufilename, clufilename + '~')
 
         # create file handles
-        self._fetfilehandles[id_group] = file(fetfilename, 'w')
-        self._clufilehandles[id_group] = file(clufilename, 'w')
+        self._fetfilehandles[id_group] = open(fetfilename, mode='w')
+        self._clufilehandles[id_group] = open(clufilename, mode='w')
 
         # write out first line
-        #self._fetfilehandles[id_group].write("0\n") # Number of features
+        # self._fetfilehandles[id_group].write("0\n") # Number of features
         self._clufilehandles[id_group].write("%d\n" % nbClusters)
 
     def _close_all_files(self):
-        for val in self._fetfilehandles.values(): val.close()
-        for val in self._clufilehandles.values(): val.close()
+        for val in self._fetfilehandles.values():
+            val.close()
+        for val in self._clufilehandles.values():
+            val.close()
 
 
 class FilenameParser:
     """Simple class to interpret user's requests into KlustaKwik filenames"""
+
     def __init__(self, dirname, basename=None):
         """Initialize a new parser for a directory containing files
 
@@ -453,7 +443,6 @@ class FilenameParser:
         """
         all_filenames = glob.glob(os.path.join(self.dirname, '*'))
 
-
         # Fill the dict with valid filenames
         d = {}
         for v in all_filenames:
@@ -474,6 +463,3 @@ class FilenameParser:
                     d[tetn] = v
 
         return d
-
-
-

+ 50 - 54
code/python-neo/neo/io/kwikio.py

@@ -87,13 +87,12 @@ class KwikIO(BaseIO):
             raise KWIK_ERR
         BaseIO.__init__(self)
         self.filename = os.path.abspath(filename)
-        model = kwik.KwikModel(self.filename) # TODO this group is loaded twice
+        model = kwik.KwikModel(self.filename)  # TODO this group is loaded twice
         self.models = [kwik.KwikModel(self.filename, channel_group=grp)
                        for grp in model.channel_groups]
 
     def read_block(self,
                    lazy=False,
-                   cascade=True,
                    get_waveforms=True,
                    cluster_group=None,
                    raw_data_units='uV',
@@ -113,55 +112,51 @@ class KwikIO(BaseIO):
             Which clusters to load, possibilities are "noise", "unsorted",
             "good", if None all is loaded.
         """
+        assert not lazy, 'Do not support lazy'
+
         blk = Block()
-        if cascade:
-            seg = Segment(file_origin=self.filename)
-            blk.segments += [seg]
-            for model in self.models:
-                group_id = model.channel_group
-                group_meta = {'group_id': group_id}
-                group_meta.update(model.metadata)
-                chx = ChannelIndex(name='channel group #{}'.format(group_id),
-                                   index=model.channels,
-                                   **group_meta)
-                blk.channel_indexes.append(chx)
-                clusters = model.spike_clusters
-                for cluster_id in model.cluster_ids:
-                    meta = model.cluster_metadata[cluster_id]
-                    if cluster_group is None:
-                        pass
-                    elif cluster_group != meta:
-                        continue
-                    sptr = self.read_spiketrain(cluster_id=cluster_id,
-                                                model=model, lazy=lazy,
-                                                cascade=cascade,
-                                                get_waveforms=get_waveforms,
-                                                raw_data_units=raw_data_units)
-                    sptr.annotations.update({'cluster_group': meta,
-                                             'group_id': model.channel_group})
-                    sptr.channel_index = chx
-                    unit = Unit(cluster_group=meta,
-                                group_id=model.channel_group,
-                                name='unit #{}'.format(cluster_id))
-                    unit.spiketrains.append(sptr)
-                    chx.units.append(unit)
-                    unit.channel_index = chx
-                    seg.spiketrains.append(sptr)
-                if get_raw_data:
-                    ana = self.read_analogsignal(model, raw_data_units,
-                                                 lazy, cascade)
-                    ana.channel_index = chx
-                    seg.analogsignals.append(ana)
-
-            seg.duration = model.duration * pq.s
+        seg = Segment(file_origin=self.filename)
+        blk.segments += [seg]
+        for model in self.models:
+            group_id = model.channel_group
+            group_meta = {'group_id': group_id}
+            group_meta.update(model.metadata)
+            chx = ChannelIndex(name='channel group #{}'.format(group_id),
+                               index=model.channels,
+                               **group_meta)
+            blk.channel_indexes.append(chx)
+            clusters = model.spike_clusters
+            for cluster_id in model.cluster_ids:
+                meta = model.cluster_metadata[cluster_id]
+                if cluster_group is None:
+                    pass
+                elif cluster_group != meta:
+                    continue
+                sptr = self.read_spiketrain(cluster_id=cluster_id,
+                                            model=model,
+                                            get_waveforms=get_waveforms,
+                                            raw_data_units=raw_data_units)
+                sptr.annotations.update({'cluster_group': meta,
+                                         'group_id': model.channel_group})
+                sptr.channel_index = chx
+                unit = Unit(cluster_group=meta,
+                            group_id=model.channel_group,
+                            name='unit #{}'.format(cluster_id))
+                unit.spiketrains.append(sptr)
+                chx.units.append(unit)
+                unit.channel_index = chx
+                seg.spiketrains.append(sptr)
+            if get_raw_data:
+                ana = self.read_analogsignal(model, units=raw_data_units)
+                ana.channel_index = chx
+                seg.analogsignals.append(ana)
+
+        seg.duration = model.duration * pq.s
 
         blk.create_many_to_one_relationship()
         return blk
 
-    def read_analogsignal(self, model, units='uV',
-                          lazy=False,
-                          cascade=True,
-                          ):
+    def read_analogsignal(self, model, units='uV', lazy=False):
         """
         Reads analogsignals
 
@@ -169,15 +164,16 @@ class KwikIO(BaseIO):
         units: str, default = "uV"
             SI units of the raw trace according to voltage_gain given to klusta
         """
-        arr = model.traces[:]*model.metadata['voltage_gain']
-        ana = AnalogSignal(arr, sampling_rate=model.sample_rate*pq.Hz,
+        assert not lazy, 'Do not support lazy'
+
+        arr = model.traces[:] * model.metadata['voltage_gain']
+        ana = AnalogSignal(arr, sampling_rate=model.sample_rate * pq.Hz,
                            units=units,
                            file_origin=model.metadata['raw_data_files'])
         return ana
 
     def read_spiketrain(self, cluster_id, model,
                         lazy=False,
-                        cascade=True,
                         get_waveforms=True,
                         raw_data_units=None
                         ):
@@ -193,13 +189,13 @@ class KwikIO(BaseIO):
             A KwikModel object obtained by klusta.kwik.KwikModel(fname)
         """
         try:
-            if ((not(cluster_id in model.cluster_ids))):
+            if ((not (cluster_id in model.cluster_ids))):
                 raise ValueError
         except ValueError:
-                print("Exception: cluster_id (%d) not found !! " % cluster_id)
-                return
+            print("Exception: cluster_id (%d) not found !! " % cluster_id)
+            return
         clusters = model.spike_clusters
-        idx = np.argwhere(clusters == cluster_id)
+        idx = np.nonzero(clusters == cluster_id)
         if get_waveforms:
             w = model.all_waveforms[idx]
             # klusta: num_spikes, samples_per_spike, num_chans = w.shape
@@ -209,7 +205,7 @@ class KwikIO(BaseIO):
             w = None
         sptr = SpikeTrain(times=model.spike_times[idx],
                           t_stop=model.duration, waveforms=w, units='s',
-                          sampling_rate=model.sample_rate*pq.Hz,
+                          sampling_rate=model.sample_rate * pq.Hz,
                           file_origin=self.filename,
                           **{'cluster_id': cluster_id})
         return sptr

+ 8 - 216
code/python-neo/neo/io/micromedio.py

@@ -1,222 +1,14 @@
 # -*- coding: utf-8 -*-
-"""
-Class for reading/writing data from micromed (.trc).
-Inspired by the Matlab code for EEGLAB from Rami K. Niazy.
 
-Completed with matlab Guillaume BECQ code.
-
-Supported : Read
-
-Author: sgarcia
-"""
-
-import datetime
-import os
-import struct
-
-from io import open, BufferedReader
-
-import numpy as np
-import quantities as pq
-
-from neo.io.baseio import BaseIO
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.micromedrawio import MicromedRawIO
 from neo.core import Segment, AnalogSignal, Epoch, Event
 
 
-class StructFile(BufferedReader):
-    def read_f(self, fmt):
-        return struct.unpack(fmt, self.read(struct.calcsize(fmt)))
-
-
-class MicromedIO(BaseIO):
-    """
-    Class for reading  data from micromed (.trc).
-
-    Usage:
-        >>> from neo import io
-        >>> r = io.MicromedIO(filename='File_micromed_1.TRC')
-        >>> seg = r.read_segment(lazy=False, cascade=True)
-        >>> print seg.analogsignals # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-        [<AnalogSignal(array([ -1.77246094e+02,  -2.24707031e+02,
-            -2.66015625e+02, ...
-    """
-    is_readable = True
-    is_writable = False
-
-    supported_objects = [Segment, AnalogSignal, Event, Epoch]
-    readable_objects = [Segment]
-    writeable_objects = []
-
-    has_header = False
-    is_streameable = False
-    read_params = {Segment: []}
-    write_params = None
-
-    name = None
-    extensions = ['TRC']
-
-    mode = 'file'
-
-    def __init__(self, filename=None):
-        """
-        This class read a micromed TRC file.
-
-        Arguments:
-            filename : the filename to read
-        """
-        BaseIO.__init__(self)
-        self.filename = filename
-
-    def read_segment(self, cascade=True, lazy=False, ):
-        """
-        Arguments:
-        """
-        f = StructFile(open(self.filename, 'rb'))
-
-        # Name
-        f.seek(64, 0)
-        surname = f.read(22).decode('ascii')
-        while surname[-1] == ' ':
-            if len(surname) == 0:
-                break
-            surname = surname[:-1]
-        firstname = f.read(20).decode('ascii')
-        while firstname[-1] == ' ':
-            if len(firstname) == 0:
-                break
-            firstname = firstname[:-1]
-
-        #Date
-        f.seek(128, 0)
-        day, month, year, hour, minute, sec = f.read_f('bbbbbb')
-        rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
-                                         sec)
-
-        f.seek(138, 0)
-        Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
-            'IHHHH')
-        #~ print Num_Chan, Bytes
-
-        #header version
-        f.seek(175, 0)
-        header_version, = f.read_f('b')
-        assert header_version == 4
-
-        seg = Segment(name=str(firstname + ' ' + surname),
-                      file_origin=os.path.basename(self.filename))
-        seg.annotate(surname=surname)
-        seg.annotate(firstname=firstname)
-        seg.annotate(rec_datetime=rec_datetime)
-
-        if not cascade:
-            f.close()
-            return seg
-
-        # area
-        f.seek(176, 0)
-        zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B',
-                      'IMPED_E', 'MONTAGE',
-                      'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
-                      'EVENT B', 'TRIGGER']
-        zones = {}
-        for zname in zone_names:
-            zname2, pos, length = f.read_f('8sII')
-            zones[zname] = zname2, pos, length
-            #~ print zname2, pos, length
-
-        # reading raw data
-        if not lazy:
-            f.seek(Data_Start_Offset, 0)
-            rawdata = np.fromstring(f.read(), dtype='u' + str(Bytes))
-            rawdata = rawdata.reshape((-1, Num_Chan))
-
-        # Reading Code Info
-        zname2, pos, length = zones['ORDER']
-        f.seek(pos, 0)
-        code = np.fromstring(f.read(Num_Chan*2), dtype='u2', count=Num_Chan)
-
-        units = {-1: pq.nano * pq.V, 0: pq.uV, 1: pq.mV, 2: 1, 100: pq.percent,
-                 101: pq.dimensionless, 102: pq.dimensionless}
-
-        for c in range(Num_Chan):
-            zname2, pos, length = zones['LABCOD']
-            f.seek(pos + code[c] * 128 + 2, 0)
-
-            label = f.read(6).strip(b"\x00").decode('ascii')
-            ground = f.read(6).strip(b"\x00").decode('ascii')
-            (logical_min, logical_max, logical_ground, physical_min,
-             physical_max) = f.read_f('iiiii')
-            k, = f.read_f('h')
-            if k in units.keys():
-                unit = units[k]
-            else:
-                unit = pq.uV
-
-            f.seek(8, 1)
-            sampling_rate, = f.read_f('H') * pq.Hz
-            sampling_rate *= Rate_Min
-
-            if lazy:
-                signal = [] * unit
-            else:
-                factor = float(physical_max - physical_min) / float(
-                    logical_max - logical_min + 1)
-                signal = (rawdata[:, c].astype(
-                    'f') - logical_ground) * factor * unit
-
-            ana_sig = AnalogSignal(signal, sampling_rate=sampling_rate,
-                                   name=str(label), channel_index=c)
-            if lazy:
-                ana_sig.lazy_shape = None
-            ana_sig.annotate(ground=ground)
-
-            seg.analogsignals.append(ana_sig)
-
-        sampling_rate = np.mean(
-            [ana_sig.sampling_rate for ana_sig in seg.analogsignals]) * pq.Hz
-
-        # Read trigger and notes
-        for zname, label_dtype in [('TRIGGER', 'u2'), ('NOTE', 'S40')]:
-            zname2, pos, length = zones[zname]
-            f.seek(pos, 0)
-            triggers = np.fromstring(f.read(length), dtype=[('pos', 'u4'), (
-                'label', label_dtype)])
-            if not lazy:
-                keep = (triggers['pos'] >= triggers['pos'][0]) & (
-                    triggers['pos'] < rawdata.shape[0]) & (
-                    triggers['pos'] != 0)
-                triggers = triggers[keep]
-                ea = Event(name=zname[0] + zname[1:].lower(),
-                           labels=triggers['label'].astype('S'),
-                           times=(triggers['pos'] / sampling_rate).rescale('s'))
-            else:
-                ea = Event(name=zname[0] + zname[1:].lower())
-                ea.lazy_shape = triggers.size
-            seg.events.append(ea)
-
-        # Read Event A and B
-        # Not so well  tested
-        for zname in ['EVENT A', 'EVENT B']:
-            zname2, pos, length = zones[zname]
-            f.seek(pos, 0)
-            epochs = np.fromstring(f.read(length),
-                                   dtype=[('label', 'u4'), ('start', 'u4'),
-                                          ('stop', 'u4'), ])
-            ep = Epoch(name=zname[0] + zname[1:].lower())
-            if not lazy:
-                keep = (epochs['start'] > 0) & (
-                    epochs['start'] < rawdata.shape[0]) & (
-                    epochs['stop'] < rawdata.shape[0])
-                epochs = epochs[keep]
-                ep = Epoch(name=zname[0] + zname[1:].lower(),
-                           labels=epochs['label'].astype('S'),
-                           times=(epochs['start'] / sampling_rate).rescale('s'),
-                           durations=((epochs['stop'] - epochs['start']) / sampling_rate).rescale('s'))
-            else:
-                ep = Epoch(name=zname[0] + zname[1:].lower())
-                ep.lazy_shape = triggers.size
-            seg.epochs.append(ep)
+class MicromedIO(MicromedRawIO, BaseFromRaw):
+    """Class for reading/writing data from Micromed files (.trc)."""
+    _prefered_signal_group_mode = 'group-by-same-units'
 
-        seg.create_many_to_one_relationship()
-        f.close()
-        return seg
+    def __init__(self, filename):
+        MicromedRawIO.__init__(self, filename=filename)
+        BaseFromRaw.__init__(self, filename)

+ 52 - 60
code/python-neo/neo/io/neomatlabio.py

@@ -30,19 +30,17 @@ except ImportError as err:
 else:
     if version.LooseVersion(scipy.version.version) < '0.12.0':
         HAVE_SCIPY = False
-        SCIPY_ERR = ImportError("your scipy version is too old to support " +
-                                "MatlabIO, you need at least 0.12.0. " +
-                                "You have %s" % scipy.version.version)
+        SCIPY_ERR = ImportError("your scipy version is too old to support "
+                                + "MatlabIO, you need at least 0.12.0. "
+                                + "You have %s" % scipy.version.version)
     else:
         HAVE_SCIPY = True
         SCIPY_ERR = None
 
-
 from neo.io.baseio import BaseIO
 from neo.core import (Block, Segment, AnalogSignal, Event, Epoch, SpikeTrain,
                       objectnames, class_by_name)
 
-
 classname_lower_to_upper = {}
 for k in objectnames:
     classname_lower_to_upper[k.lower()] = k
@@ -123,7 +121,7 @@ class NeoMatlabIO(BaseIO):
                 seg.epochs{1} = epoch;
 
                 block.segments{s} = seg;
-                
+
             end
 
             save 'myblock.mat' block -V7
@@ -151,7 +149,8 @@ class NeoMatlabIO(BaseIO):
                 seg = neo.Segment(name='segment' + str(s))
                 bl.segments.append(seg)
                 for a in range(5):
-                    anasig = neo.AnalogSignal(rand(100)*pq.mV, t_start=0*pq.s, sampling_rate=100*pq.Hz)
+                    anasig = neo.AnalogSignal(rand(100)*pq.mV, t_start=0*pq.s,
+                                              sampling_rate=100*pq.Hz)
                     seg.analogsignals.append(anasig)
                 for t in range(7):
                     sptr = neo.SpikeTrain(rand(40)*pq.ms, t_start=0*pq.ms, t_stop=10*pq.ms)
@@ -218,20 +217,22 @@ class NeoMatlabIO(BaseIO):
         BaseIO.__init__(self)
         self.filename = filename
 
-    def read_block(self, cascade=True, lazy=False,):
+    def read_block(self, lazy=False):
         """
         Arguments:
 
         """
+        assert not lazy, 'Do not support lazy'
+
         d = scipy.io.loadmat(self.filename, struct_as_record=False,
                              squeeze_me=True, mat_dtype=True)
-        if not 'block' in d:
+        if 'block' not in d:
             self.logger.exception('No block in ' + self.filename)
             return None
 
         bl_struct = d['block']
         bl = self.create_ob_from_struct(
-            bl_struct, 'Block', cascade=cascade, lazy=lazy)
+            bl_struct, 'Block')
         bl.create_many_to_one_relationship()
         return bl
 
@@ -279,24 +280,23 @@ class NeoMatlabIO(BaseIO):
         for i, attr in enumerate(ob._all_attrs):
             attrname, attrtype = attr[0], attr[1]
 
-            #~ if attrname =='':
-                #~ struct['array'] = ob.magnitude
-                #~ struct['units'] = ob.dimensionality.string
-                #~ continue
+            # ~ if attrname =='':
+            # ~ struct['array'] = ob.magnitude
+            # ~ struct['units'] = ob.dimensionality.string
+            # ~ continue
 
-            if (hasattr(ob, '_quantity_attr') and
-                    ob._quantity_attr == attrname):
+            if (hasattr(ob, '_quantity_attr') and ob._quantity_attr == attrname):
                 struct[attrname] = ob.magnitude
-                struct[attrname+'_units'] = ob.dimensionality.string
+                struct[attrname + '_units'] = ob.dimensionality.string
                 continue
 
-            if not(attrname in ob.annotations or hasattr(ob, attrname)):
+            if not (attrname in ob.annotations or hasattr(ob, attrname)):
                 continue
             if getattr(ob, attrname) is None:
                 continue
 
             if attrtype == pq.Quantity:
-                #ndim = attr[2]
+                # ndim = attr[2]
                 struct[attrname] = getattr(ob, attrname).magnitude
                 struct[attrname + '_units'] = getattr(
                     ob, attrname).dimensionality.string
@@ -307,27 +307,32 @@ class NeoMatlabIO(BaseIO):
 
         return struct
 
-    def create_ob_from_struct(self, struct, classname,
-                              cascade=True, lazy=False):
+    def create_ob_from_struct(self, struct, classname):
         cl = class_by_name[classname]
         # check if hinerits Quantity
-        #~ is_quantity = False
-        #~ for attr in cl._necessary_attrs:
-            #~ if attr[0] == '' and attr[1] == pq.Quantity:
-                #~ is_quantity = True
-                #~ break
-        #~ is_quantiy = hasattr(cl, '_quantity_attr')
-
-        #~ if is_quantity:
+        # ~ is_quantity = False
+        # ~ for attr in cl._necessary_attrs:
+        # ~ if attr[0] == '' and attr[1] == pq.Quantity:
+        # ~ is_quantity = True
+        # ~ break
+        # ~ is_quantiy = hasattr(cl, '_quantity_attr')
+
+        # ~ if is_quantity:
         if hasattr(cl, '_quantity_attr'):
             quantity_attr = cl._quantity_attr
             arr = getattr(struct, quantity_attr)
-            #~ data_complement = dict(units=str(struct.units))
+            # ~ data_complement = dict(units=str(struct.units))
             data_complement = dict(units=str(
                 getattr(struct, quantity_attr + '_units')))
             if "sampling_rate" in (at[0] for at in cl._necessary_attrs):
                 # put fake value for now, put correct value later
                 data_complement["sampling_rate"] = 0 * pq.kHz
+            try:
+                len(arr)
+            except TypeError:
+                # strange scipy.io behavior: if len is 1 we get a float
+                arr = np.array(arr)
+                arr = arr.reshape((-1,))  # new view with one dimension
             if "t_stop" in (at[0] for at in cl._necessary_attrs):
                 if len(arr) > 0:
                     data_complement["t_stop"] = arr.max()
@@ -339,32 +344,28 @@ class NeoMatlabIO(BaseIO):
                 else:
                     data_complement["t_start"] = 0.0
 
-            if lazy:
-                ob = cl([], **data_complement)
-                ob.lazy_shape = arr.shape
-            else:
-                ob = cl(arr, **data_complement)
+            ob = cl(arr, **data_complement)
         else:
             ob = cl()
 
         for attrname in struct._fieldnames:
             # check children
             if attrname in getattr(ob, '_single_child_containers', []):
+                child_struct = getattr(struct, attrname)
                 try:
-                    for c in range(len(getattr(struct, attrname))):
-                        if cascade:
-                            child = self.create_ob_from_struct(
-                                getattr(struct, attrname)[c],
-                                classname_lower_to_upper[attrname[:-1]],
-                                cascade=cascade, lazy=lazy)
-                            getattr(ob, attrname.lower()).append(child)
+                    # try must only surround len() or other errors are captured
+                    child_len = len(child_struct)
                 except TypeError:
                     # strange scipy.io behavior: if len is 1 there is no len()
-                    if cascade:
+                    child = self.create_ob_from_struct(
+                        child_struct,
+                        classname_lower_to_upper[attrname[:-1]])
+                    getattr(ob, attrname.lower()).append(child)
+                else:
+                    for c in range(child_len):
                         child = self.create_ob_from_struct(
-                            getattr(struct, attrname),
-                            classname_lower_to_upper[attrname[:-1]],
-                            cascade=cascade, lazy=lazy)
+                            child_struct[c],
+                            classname_lower_to_upper[attrname[:-1]])
                         getattr(ob, attrname.lower()).append(child)
                 continue
 
@@ -372,8 +373,7 @@ class NeoMatlabIO(BaseIO):
             if attrname.endswith('_units') or attrname == 'units':
                 # linked with another field
                 continue
-            if (hasattr(cl, '_quantity_attr') and
-                    cl._quantity_attr == attrname):
+            if (hasattr(cl, '_quantity_attr') and cl._quantity_attr == attrname):
                 continue
 
             item = getattr(struct, attrname)
@@ -383,7 +383,7 @@ class NeoMatlabIO(BaseIO):
             if attrname in dict_attributes:
                 attrtype = dict_attributes[attrname][0]
                 if attrtype == datetime:
-                    m = '(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+).(\d+)'
+                    m = r'(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+).(\d+)'
                     r = re.findall(m, str(item))
                     if len(r) == 1:
                         item = datetime(*[int(e) for e in r[0]])
@@ -391,22 +391,14 @@ class NeoMatlabIO(BaseIO):
                         item = None
                 elif attrtype == np.ndarray:
                     dt = dict_attributes[attrname][2]
-                    if lazy:
-                        item = np.array([], dtype=dt)
-                        ob.lazy_shape = item.shape
-                    else:
-                        item = item.astype(dt)
+                    item = item.astype(dt)
                 elif attrtype == pq.Quantity:
                     ndim = dict_attributes[attrname][1]
-                    units = str(getattr(struct, attrname+'_units'))
+                    units = str(getattr(struct, attrname + '_units'))
                     if ndim == 0:
                         item = pq.Quantity(item, units)
                     else:
-                        if lazy:
-                            item = pq.Quantity([], units)
-                            item.lazy_shape = item.shape
-                        else:
-                            item = pq.Quantity(item, units)
+                        item = pq.Quantity(item, units)
                 else:
                     item = attrtype(item)
 

+ 86 - 86
code/python-neo/neo/io/nestio.py

@@ -93,7 +93,7 @@ class NestIO(BaseIO):
                              t_stop=None, sampling_period=None,
                              id_column=0, time_column=1,
                              value_columns=2, value_types=None,
-                             value_units=None, lazy=False):
+                             value_units=None):
         """
         Internal function called by read_analogsignal() and read_segment().
         """
@@ -125,9 +125,9 @@ class NestIO(BaseIO):
         column_list_no_None = [c for c in column_list if c is not None]
         if len(np.unique(column_list_no_None)) < len(column_list_no_None):
             raise ValueError(
-                    'One or more columns have been specified to contain '
-                    'the same data. Columns were specified to %s.'
-                    '' % column_list_no_None)
+                'One or more columns have been specified to contain '
+                'the same data. Columns were specified to %s.'
+                '' % column_list_no_None)
 
         # extracting condition and sorting parameters for raw data loading
         (condition, condition_column,
@@ -138,10 +138,10 @@ class NestIO(BaseIO):
                                                             t_stop)
         # loading raw data columns
         data = self.avail_IOs['dat'].get_columns(
-                column_ids=column_ids,
-                condition=condition,
-                condition_column=condition_column,
-                sorting_columns=sorting_column)
+            column_ids=column_ids,
+            condition=condition,
+            condition_column=condition_column,
+            sorting_columns=sorting_column)
 
         sampling_period = self._check_input_sampling_period(sampling_period,
                                                             time_column,
@@ -149,41 +149,40 @@ class NestIO(BaseIO):
                                                             data)
         analogsignal_list = []
 
-        if not lazy:
-            # extracting complete gid list for anasig generation
-            if (gid_list == []) and id_column is not None:
-                gid_list = np.unique(data[:, id_column])
+        # extracting complete gid list for anasig generation
+        if (gid_list == []) and id_column is not None:
+            gid_list = np.unique(data[:, id_column])
 
-            # generate analogsignals for each neuron ID
-            for i in gid_list:
-                selected_ids = self._get_selected_ids(
-                        i, id_column, time_column, t_start, t_stop, time_unit,
-                        data)
+        # generate analogsignals for each neuron ID
+        for i in gid_list:
+            selected_ids = self._get_selected_ids(
+                i, id_column, time_column, t_start, t_stop, time_unit,
+                data)
 
-                # extract starting time of analogsignal
-                if (time_column is not None) and data.size:
-                    anasig_start_time = data[selected_ids[0], 1] * time_unit
-                else:
-                    # set t_start equal to sampling_period because NEST starts
-                    #  recording only after 1 sampling_period
-                    anasig_start_time = 1. * sampling_period
-
-                # create one analogsignal per value column requested
-                for v_id, value_column in enumerate(value_columns):
-                    signal = data[
-                             selected_ids[0]:selected_ids[1], value_column]
-
-                    # create AnalogSignal objects and annotate them with
-                    #  the neuron ID
-                    analogsignal_list.append(AnalogSignal(
-                            signal * value_units[v_id],
-                            sampling_period=sampling_period,
-                            t_start=anasig_start_time,
-                            id=i,
-                            type=value_types[v_id]))
-                    # check for correct length of analogsignal
-                    assert (analogsignal_list[-1].t_stop ==
-                            anasig_start_time + len(signal) * sampling_period)
+            # extract starting time of analogsignal
+            if (time_column is not None) and data.size:
+                anasig_start_time = data[selected_ids[0], 1] * time_unit
+            else:
+                # set t_start equal to sampling_period because NEST starts
+                #  recording only after 1 sampling_period
+                anasig_start_time = 1. * sampling_period
+
+            # create one analogsignal per value column requested
+            for v_id, value_column in enumerate(value_columns):
+                signal = data[
+                    selected_ids[0]:selected_ids[1], value_column]
+
+                # create AnalogSignal objects and annotate them with
+                #  the neuron ID
+                analogsignal_list.append(AnalogSignal(
+                    signal * value_units[v_id],
+                    sampling_period=sampling_period,
+                    t_start=anasig_start_time,
+                    id=i,
+                    type=value_types[v_id]))
+                # check for correct length of analogsignal
+                assert (analogsignal_list[-1].t_stop
+                        == anasig_start_time + len(signal) * sampling_period)
         return analogsignal_list
 
     def __read_spiketrains(self, gdf_id_list, time_unit,
@@ -224,10 +223,10 @@ class NestIO(BaseIO):
                                              gdf_id_list, t_start, t_stop)
 
         data = self.avail_IOs['gdf'].get_columns(
-                column_ids=column_ids,
-                condition=condition,
-                condition_column=condition_column,
-                sorting_columns=sorting_column)
+            column_ids=column_ids,
+            condition=condition,
+            condition_column=condition_column,
+            sorting_columns=sorting_column)
 
         # create a list of SpikeTrains for all neuron IDs in gdf_id_list
         # assign spike times to neuron IDs if id_column is given
@@ -243,9 +242,9 @@ class NestIO(BaseIO):
                 times = data[selected_ids[0]:selected_ids[1], time_column]
                 spiketrain_list.append(SpikeTrain(
 
-                        times, units=time_unit,
-                        t_start=t_start, t_stop=t_stop,
-                        id=nid, **args))
+                    times, units=time_unit,
+                    t_start=t_start, t_stop=t_stop,
+                    id=nid, **args))
 
         # if id_column is not given, all spike times are collected in one
         #  spike train with id=None
@@ -364,7 +363,7 @@ class NestIO(BaseIO):
         if sampling_period is None:
             if time_column is not None:
                 data_sampling = np.unique(
-                        np.diff(sorted(np.unique(data[:, 1]))))
+                    np.diff(sorted(np.unique(data[:, 1]))))
                 if len(data_sampling) > 1:
                     raise ValueError('Different sampling distances found in '
                                      'data set (%s)' % data_sampling)
@@ -399,7 +398,9 @@ class NestIO(BaseIO):
         curr_id = 0
         if ((gid_list != [None]) and (gid_list is not None)):
             if gid_list != []:
-                condition = lambda x: x in gid_list
+                def condition(x):
+                    return x in gid_list
+
                 condition_column = id_column
             sorting_column.append(curr_id)  # Sorting according to gids first
             curr_id += 1
@@ -443,11 +444,11 @@ class NestIO(BaseIO):
         if time_column is not None:
             id_shifts[0] = np.searchsorted(gid_data[:, 1],
                                            t_start.rescale(
-                                                   time_unit).magnitude,
+                                               time_unit).magnitude,
                                            side='left')
             id_shifts[1] = (np.searchsorted(gid_data[:, 1],
                                             t_stop.rescale(
-                                                    time_unit).magnitude,
+                                                time_unit).magnitude,
                                             side='left') - gid_data.shape[0])
 
         selected_ids = gid_ids + id_shifts
@@ -457,12 +458,14 @@ class NestIO(BaseIO):
                    t_stop=None, sampling_period=None, id_column_dat=0,
                    time_column_dat=1, value_columns_dat=2,
                    id_column_gdf=0, time_column_gdf=1, value_types=None,
-                   value_units=None, lazy=False, cascade=True):
+                   value_units=None, lazy=False):
+        assert not lazy, 'Do not support lazy'
+
         seg = self.read_segment(gid_list, time_unit, t_start,
                                 t_stop, sampling_period, id_column_dat,
                                 time_column_dat, value_columns_dat,
                                 id_column_gdf, time_column_gdf, value_types,
-                                value_units, lazy, cascade)
+                                value_units)
         blk = Block(file_origin=seg.file_origin, file_datetime=seg.file_datetime)
         blk.segments.append(seg)
         seg.block = blk
@@ -472,7 +475,7 @@ class NestIO(BaseIO):
                      t_stop=None, sampling_period=None, id_column_dat=0,
                      time_column_dat=1, value_columns_dat=2,
                      id_column_gdf=0, time_column_gdf=1, value_types=None,
-                     value_units=None, lazy=False, cascade=True):
+                     value_units=None, lazy=False):
         """
         Reads a Segment which contains SpikeTrain(s) with specified neuron IDs
         from the GDF data.
@@ -508,7 +511,6 @@ class NestIO(BaseIO):
         value_units : Quantity (amplitude), default: None
             The physical unit of the recorded signal values.
         lazy : bool, optional, default: False
-        cascade : bool, optional, default: True
 
         Returns
         -------
@@ -516,6 +518,8 @@ class NestIO(BaseIO):
             The Segment contains one SpikeTrain and one AnalogSignal for
             each ID in gid_list.
         """
+        assert not lazy, 'Do not support lazy'
+
         if isinstance(gid_list, tuple):
             if gid_list[0] > gid_list[1]:
                 raise ValueError('The second entry in gid_list must be '
@@ -532,29 +536,27 @@ class NestIO(BaseIO):
         # todo: rather than take the first file for the timestamp, we should take the oldest
         #       in practice, there won't be much difference
 
-        if cascade:
-            # Load analogsignals and attach to Segment
-            if 'dat' in self.avail_formats:
-                seg.analogsignals = self.__read_analogsignals(
-                        gid_list,
-                        time_unit,
-                        t_start,
-                        t_stop,
-                        sampling_period=sampling_period,
-                        id_column=id_column_dat,
-                        time_column=time_column_dat,
-                        value_columns=value_columns_dat,
-                        value_types=value_types,
-                        value_units=value_units,
-                        lazy=lazy)
-            if 'gdf' in self.avail_formats:
-                seg.spiketrains = self.__read_spiketrains(
-                        gid_list,
-                        time_unit,
-                        t_start,
-                        t_stop,
-                        id_column=id_column_gdf,
-                        time_column=time_column_gdf)
+        # Load analogsignals and attach to Segment
+        if 'dat' in self.avail_formats:
+            seg.analogsignals = self.__read_analogsignals(
+                gid_list,
+                time_unit,
+                t_start,
+                t_stop,
+                sampling_period=sampling_period,
+                id_column=id_column_dat,
+                time_column=time_column_dat,
+                value_columns=value_columns_dat,
+                value_types=value_types,
+                value_units=value_units)
+        if 'gdf' in self.avail_formats:
+            seg.spiketrains = self.__read_spiketrains(
+                gid_list,
+                time_unit,
+                t_start,
+                t_stop,
+                id_column=id_column_gdf,
+                time_column=time_column_gdf)
 
         return seg
 
@@ -599,6 +601,7 @@ class NestIO(BaseIO):
             The requested SpikeTrain object with an annotation 'id'
             corresponding to the gdf_id parameter.
         """
+        assert not lazy, 'Do not support lazy'
 
         # __read_spiketrains() needs a list of IDs
         return self.__read_analogsignals([gid], time_unit,
@@ -608,12 +611,11 @@ class NestIO(BaseIO):
                                          time_column=time_column,
                                          value_columns=value_column,
                                          value_types=value_type,
-                                         value_units=value_unit,
-                                         lazy=lazy)[0]
+                                         value_units=value_unit)[0]
 
     def read_spiketrain(
             self, gdf_id=None, time_unit=pq.ms, t_start=None, t_stop=None,
-            id_column=0, time_column=1, lazy=False, cascade=True, **args):
+            id_column=0, time_column=1, lazy=False, **args):
         """
         Reads a SpikeTrain with specified neuron ID from the GDF data.
 
@@ -634,7 +636,6 @@ class NestIO(BaseIO):
         time_column : int, optional, default: 1
             Column index of time stamps.
         lazy : bool, optional, default: False
-        cascade : bool, optional, default: True
 
         Returns
         -------
@@ -642,6 +643,7 @@ class NestIO(BaseIO):
             The requested SpikeTrain object with an annotation 'id'
             corresponding to the gdf_id parameter.
         """
+        assert not lazy, 'Do not support lazy'
 
         if (not isinstance(gdf_id, int)) and gdf_id is not None:
             raise ValueError('gdf_id has to be of type int or None.')
@@ -735,9 +737,7 @@ class ColumnIO:
         elif (condition is not None) and (condition_column is not None):
             condition_function = np.vectorize(condition)
             mask = condition_function(
-                    selected_data[
-                    :, condition_column]).astype(bool)
-
+                selected_data[:, condition_column]).astype(bool)
             selected_data = selected_data[mask, :]
 
         # Apply sorting if requested

File diff suppressed because it is too large
+ 14 - 2394
code/python-neo/neo/io/neuralynxio.py


+ 8 - 327
code/python-neo/neo/io/neuroexplorerio.py

@@ -1,332 +1,13 @@
 # -*- coding: utf-8 -*-
-"""
-Class for reading data from NeuroExplorer (.nex)
 
-Documentation for dev :
-http://www.neuroexplorer.com/downloads/HowToReadAndWriteNexAndNex5FilesInMatlab.zip
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.neuroexplorerrawio import NeuroExplorerRawIO
 
-Depend on:
 
-Supported : Read
+class NeuroExplorerIO(NeuroExplorerRawIO, BaseFromRaw):
+    """Class for reading data from NeuroExplorer (.nex)"""
+    _prefered_signal_group_mode = 'split-all'
 
-Author: sgarcia,luc estebanez, mark hollenbeck
-
-"""
-
-import os
-import struct
-
-import numpy as np
-import quantities as pq
-
-from neo.io.baseio import BaseIO
-from neo.core import Segment, AnalogSignal, SpikeTrain, Epoch, Event
-
-
-class NeuroExplorerIO(BaseIO):
-    """
-    Class for reading nex files.
-
-    Usage:
-        >>> from neo import io
-        >>> r = io.NeuroExplorerIO(filename='File_neuroexplorer_1.nex')
-        >>> seg = r.read_segment(lazy=False, cascade=True)
-        >>> print seg.analogsignals # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-        [<AnalogSignal(array([ 39.0625    ,   0.        ,   0.        , ...,
-        >>> print seg.spiketrains   # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-        [<SpikeTrain(array([  2.29499992e-02,   6.79249987e-02, ...
-        >>> print seg.events   # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-        [<Event: @21.1967754364 s, @21.2993755341 s, @21.350725174 s, ...
-        >>> print seg.epochs   # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
-        [<neo.core.epoch.Epoch object at 0x10561ba90>,
-         <neo.core.epoch.Epoch object at 0x10561bad0>]
-    """
-
-    is_readable = True
-    is_writable = False
-
-    supported_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
-    readable_objects = [Segment]
-    writeable_objects = []
-
-    has_header = False
-    is_streameable = False
-
-    # This is for GUI stuff: a definition for parameters when reading.
-    read_params = {Segment: []}
-    write_params = None
-
-    name = 'NeuroExplorer'
-    extensions = ['nex']
-
-    mode = 'file'
-
-    def __init__(self, filename=None):
-        """
-        This class read a nex file.
-
-        Arguments:
-            filename: the filename to read
-        """
-        BaseIO.__init__(self)
-        self.filename = filename
-
-    def read_segment(self, lazy=False, cascade=True):
-        fid = open(self.filename, 'rb')
-        global_header = HeaderReader(fid, GlobalHeader).read_f(offset=0)
-        # ~ print globalHeader
-        #~ print 'version' , globalHeader['version']
-        seg = Segment()
-        seg.file_origin = os.path.basename(self.filename)
-        seg.annotate(neuroexplorer_version=global_header['version'])
-        seg.annotate(comment=global_header['comment'])
-
-        if not cascade:
-            return seg
-
-        offset = 544
-        for i in range(global_header['nvar']):
-            entity_header = HeaderReader(fid, EntityHeader).read_f(
-                offset=offset + i * 208)
-            entity_header['name'] = entity_header['name'].replace('\x00', '')
-
-            #print 'i',i, entityHeader['type']
-
-            if entity_header['type'] == 0:
-                # neuron
-                if lazy:
-                    spike_times = [] * pq.s
-                else:
-                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
-                                            shape=(entity_header['n']),
-                                            offset=entity_header['offset'])
-                    spike_times = spike_times.astype('f8') / global_header[
-                        'freq'] * pq.s
-                sptr = SpikeTrain(
-                    times=spike_times,
-                    t_start=global_header['tbeg'] /
-                    global_header['freq'] * pq.s,
-                    t_stop=global_header['tend'] /
-                    global_header['freq'] * pq.s,
-                    name=entity_header['name'])
-                if lazy:
-                    sptr.lazy_shape = entity_header['n']
-                sptr.annotate(channel_index=entity_header['WireNumber'])
-                seg.spiketrains.append(sptr)
-
-            if entity_header['type'] == 1:
-                # event
-                if lazy:
-                    event_times = [] * pq.s
-                else:
-                    event_times = np.memmap(self.filename, np.dtype('i4'), 'r',
-                                            shape=(entity_header['n']),
-                                            offset=entity_header['offset'])
-                    event_times = event_times.astype('f8') / global_header[
-                        'freq'] * pq.s
-                labels = np.array([''] * event_times.size, dtype='S')
-                evar = Event(times=event_times, labels=labels,
-                             channel_name=entity_header['name'])
-                if lazy:
-                    evar.lazy_shape = entity_header['n']
-                seg.events.append(evar)
-
-            if entity_header['type'] == 2:
-                # interval
-                if lazy:
-                    start_times = [] * pq.s
-                    stop_times = [] * pq.s
-                else:
-                    start_times = np.memmap(self.filename, np.dtype('i4'), 'r',
-                                            shape=(entity_header['n']),
-                                            offset=entity_header['offset'])
-                    start_times = start_times.astype('f8') / global_header[
-                        'freq'] * pq.s
-                    stop_times = np.memmap(self.filename, np.dtype('i4'), 'r',
-                                           shape=(entity_header['n']),
-                                           offset=entity_header['offset'] +
-                                           entity_header['n'] * 4)
-                    stop_times = stop_times.astype('f') / global_header[
-                        'freq'] * pq.s
-                epar = Epoch(times=start_times,
-                             durations=stop_times - start_times,
-                             labels=np.array([''] * start_times.size,
-                                             dtype='S'),
-                             channel_name=entity_header['name'])
-                if lazy:
-                    epar.lazy_shape = entity_header['n']
-                seg.epochs.append(epar)
-
-            if entity_header['type'] == 3:
-                # spiketrain and wavefoms
-                if lazy:
-                    spike_times = [] * pq.s
-                    waveforms = None
-                else:
-
-                    spike_times = np.memmap(self.filename, np.dtype('i4'), 'r',
-                                            shape=(entity_header['n']),
-                                            offset=entity_header['offset'])
-                    spike_times = spike_times.astype('f8') / global_header[
-                        'freq'] * pq.s
-
-                    waveforms = np.memmap(self.filename, np.dtype('i2'), 'r',
-                                          shape=(entity_header['n'], 1,
-                                                 entity_header['NPointsWave']),
-                                          offset=entity_header['offset'] +
-                                          entity_header['n'] * 4)
-                    waveforms = (waveforms.astype('f') *
-                                 entity_header['ADtoMV'] +
-                                 entity_header['MVOffset']) * pq.mV
-                t_stop = global_header['tend'] / global_header['freq'] * pq.s
-                if spike_times.size > 0:
-                    t_stop = max(t_stop, max(spike_times))
-                sptr = SpikeTrain(
-                    times=spike_times,
-                    t_start=global_header['tbeg'] /
-                    global_header['freq'] * pq.s,
-                    #~ t_stop = max(globalHeader['tend']/
-                    #~ globalHeader['freq']*pq.s,max(spike_times)),
-                    t_stop=t_stop, name=entity_header['name'],
-                    waveforms=waveforms,
-                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
-                    left_sweep=0 * pq.ms)
-                if lazy:
-                    sptr.lazy_shape = entity_header['n']
-                sptr.annotate(channel_index=entity_header['WireNumber'])
-                seg.spiketrains.append(sptr)
-
-            if entity_header['type'] == 4:
-                # popvectors
-                pass
-
-            if entity_header['type'] == 5:
-                # analog
-                timestamps = np.memmap(self.filename, np.dtype('i4'), 'r',
-                                       shape=(entity_header['n']),
-                                       offset=entity_header['offset'])
-                timestamps = timestamps.astype('f8') / global_header['freq']
-
-                fragment_starts_offset = entity_header['offset'] + entity_header['n']*4
-                fragment_starts = np.memmap(self.filename, np.dtype('i4'), 'r',
-                                            shape=(entity_header['n']),
-                                            offset=fragment_starts_offset)
-                fragment_starts = fragment_starts.astype('f8') / global_header[
-                    'freq']
-                t_start = timestamps[0] - fragment_starts[0] / float(
-                    entity_header['WFrequency'])
-                del timestamps, fragment_starts
-
-                if lazy:
-                    signal = [] * pq.mV
-                else:
-                    signal_offset = fragment_starts_offset + entity_header['n']*4
-                    signal = np.memmap(self.filename, np.dtype('i2'), 'r',
-                                       shape=(entity_header['NPointsWave']),
-                                       offset=signal_offset)
-                    signal = signal.astype('f')
-                    signal *= entity_header['ADtoMV']
-                    signal += entity_header['MVOffset']
-                    signal = signal * pq.mV
-
-                ana_sig = AnalogSignal(
-                    signal=signal, t_start=t_start * pq.s,
-                    sampling_rate=entity_header['WFrequency'] * pq.Hz,
-                    name=entity_header['name'],
-                    channel_index=entity_header['WireNumber'])
-                if lazy:
-                    ana_sig.lazy_shape = entity_header['NPointsWave']
-                seg.analogsignals.append(ana_sig)
-
-            if entity_header['type'] == 6:
-                # markers  : TO TEST
-                if lazy:
-                    times = [] * pq.s
-                    labels = np.array([], dtype='S')
-                    markertype = None
-                else:
-                    times = np.memmap(self.filename, np.dtype('i4'), 'r',
-                                      shape=(entity_header['n']),
-                                      offset=entity_header['offset'])
-                    times = times.astype('f8') / global_header['freq'] * pq.s
-                    fid.seek(entity_header['offset'] + entity_header['n'] * 4)
-                    markertype = fid.read(64).replace('\x00', '')
-                    labels = np.memmap(
-                        self.filename, np.dtype(
-                            'S' + str(entity_header['MarkerLength'])),
-                        'r', shape=(entity_header['n']),
-                        offset=entity_header['offset'] +
-                        entity_header['n'] * 4 + 64)
-                ea = Event(times=times,
-                           labels=labels.view(np.ndarray),
-                           name=entity_header['name'],
-                           channel_index=entity_header['WireNumber'],
-                           marker_type=markertype)
-                if lazy:
-                    ea.lazy_shape = entity_header['n']
-                seg.events.append(ea)
-
-        seg.create_many_to_one_relationship()
-        return seg
-
-
-GlobalHeader = [
-    ('signature', '4s'),
-    ('version', 'i'),
-    ('comment', '256s'),
-    ('freq', 'd'),
-    ('tbeg', 'i'),
-    ('tend', 'i'),
-    ('nvar', 'i'),
-]
-
-EntityHeader = [
-    ('type', 'i'),
-    ('varVersion', 'i'),
-    ('name', '64s'),
-    ('offset', 'i'),
-    ('n', 'i'),
-    ('WireNumber', 'i'),
-    ('UnitNumber', 'i'),
-    ('Gain', 'i'),
-    ('Filter', 'i'),
-    ('XPos', 'd'),
-    ('YPos', 'd'),
-    ('WFrequency', 'd'),
-    ('ADtoMV', 'd'),
-    ('NPointsWave', 'i'),
-    ('NMarkers', 'i'),
-    ('MarkerLength', 'i'),
-    ('MVOffset', 'd'),
-    ('dummy', '60s'),
-]
-
-MarkerHeader = [
-    ('type', 'i'),
-    ('varVersion', 'i'),
-    ('name', '64s'),
-    ('offset', 'i'),
-    ('n', 'i'),
-    ('WireNumber', 'i'),
-    ('UnitNumber', 'i'),
-    ('Gain', 'i'),
-    ('Filter', 'i'),
-]
-
-
-class HeaderReader():
-    def __init__(self, fid, description):
-        self.fid = fid
-        self.description = description
-
-    def read_f(self, offset=0):
-        self.fid.seek(offset)
-        d = {}
-        for key, fmt in self.description:
-            val = struct.unpack(fmt, self.fid.read(struct.calcsize(fmt)))
-            if len(val) == 1:
-                val = val[0]
-            else:
-                val = list(val)
-            d[key] = val
-        return d
+    def __init__(self, filename):
+        NeuroExplorerRawIO.__init__(self, filename=filename)
+        BaseFromRaw.__init__(self, filename)

+ 9 - 120
code/python-neo/neo/io/neuroscopeio.py

@@ -1,128 +1,17 @@
 # -*- coding: utf-8 -*-
-"""
-Reading from neuroscope format files.
-Ref: http://neuroscope.sourceforge.net/
 
-It is an old format from Buzsaki's lab.
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.neuroscoperawio import NeuroScopeRawIO
 
-Supported: Read
 
-#TODO:
-SpikeTrain file   '.clu'  '.res'
-Event  '.ext.evt'  or '.evt.ext'
-
-Author: sgarcia
-
-"""
-
-# needed for python 3 compatibility
-from __future__ import absolute_import
-
-import os
-from xml.etree import ElementTree
-
-import numpy as np
-import quantities as pq
-
-from neo.io.baseio import BaseIO
-from neo.io.rawbinarysignalio import RawBinarySignalIO
-from neo.core import (Block, Segment,  ChannelIndex,
-                      AnalogSignal)
-
-
-class NeuroScopeIO(BaseIO):
+class NeuroScopeIO(NeuroScopeRawIO, BaseFromRaw):
     """
+    Reading from Neuroscope format files.
 
-
+    Ref: http://neuroscope.sourceforge.net/
     """
+    _prefered_signal_group_mode = 'group-by-same-units'
 
-    is_readable = True
-    is_writable = False
-
-    supported_objects  = [ Block, Segment , AnalogSignal,  ChannelIndex]
-
-    readable_objects    = [ Block ]
-    writeable_objects   = [ ]
-
-    has_header         = False
-    is_streameable     = False
-    read_params = {
-        Segment : [ ]
-        }
-
-    # do not supported write so no GUI stuff
-    write_params       = None
-
-    name               = 'NeuroScope'
-
-    extensions          = [ 'xml' ]
-    mode = 'file'
-
-
-
-    def __init__(self , filename = None) :
-        """
-        Arguments:
-            filename : the filename
-            
-        """
-        BaseIO.__init__(self)
-        self.filename = filename
-
-
-        
-        
-
-
-    def read_block(self,
-                     lazy = False,
-                     cascade = True,
-                    ):
-        """
-        """
-
-        
-        tree = ElementTree.parse(self.filename)
-        root = tree.getroot()
-        acq = root.find('acquisitionSystem')
-        nbits = int(acq.find('nBits').text)
-        nbchannel = int(acq.find('nChannels').text)
-        sampling_rate = float(acq.find('samplingRate').text)*pq.Hz
-        voltage_range = float(acq.find('voltageRange').text)
-        #offset = int(acq.find('offset').text)
-        amplification = float(acq.find('amplification').text)
-        
-        bl = Block(file_origin = os.path.basename(self.filename).replace('.xml', ''))
-        if cascade:
-            seg = Segment()
-            bl.segments.append(seg)
-            
-            # RCG
-            for i, xml_chx in  enumerate(root.find('anatomicalDescription').find('channelGroups').findall('group')):
-                n_channels = len(xml_chx)
-                chx = ChannelIndex(name='Group {0}'.format(i),
-                                   index=np.arange(n_channels, dtype = int))
-                chx.channel_ids = np.array([int(xml_rc.text) for xml_rc in xml_chx])
-                chx.channel_names = np.array(['Channel{0}'.format(id) for id in chx.channel_ids], dtype = 'S')
-                bl.channel_indexes.append(chx)
-        
-            # AnalogSignals
-            reader = RawBinarySignalIO(filename = self.filename.replace('.xml', '.dat'))
-            seg2 = reader.read_segment(cascade = True, lazy = lazy,
-                                                        sampling_rate = sampling_rate,
-                                                        t_start = 0.*pq.s,
-                                                        unit = pq.V, nbchannel = nbchannel,
-                                                        bytesoffset = 0,
-                                                        dtype = np.int16 if nbits<=16 else np.int32,
-                                                        rangemin = -voltage_range/2.,
-                                                        rangemax = voltage_range/2.,)
-            for s, sig in enumerate(seg2.analogsignals):
-                if not lazy:
-                    sig /= amplification
-                sig.segment = seg
-                seg.analogsignals.append(sig)
-                chx.analogsignals.append(sig)
-            
-        bl.create_many_to_one_relationship()
-        return bl
-
+    def __init__(self, filename):
+        NeuroScopeRawIO.__init__(self, filename=filename)
+        BaseFromRaw.__init__(self, filename)

+ 316 - 355
code/python-neo/neo/io/neuroshareapiio.py

@@ -18,179 +18,168 @@ import quantities as pq
 
 import os
 
-#check to see if the neuroshare bindings are properly imported    
+# check to see if the neuroshare bindings are properly imported
 try:
     import neuroshare as ns
 except ImportError as err:
-    print (err)
-    #print('\n neuroshare library not found, loading data will not work!' )
-    #print('\n be sure to install the library found at:')
-    #print('\n www.http://pythonhosted.org/neuroshare/')
+    print(err)
+    # print('\n neuroshare library not found, loading data will not work!' )
+    # print('\n be sure to install the library found at:')
+    # print('\n www.http://pythonhosted.org/neuroshare/')
 
 else:
     pass
-    #print('neuroshare library successfully imported')
+    # print('neuroshare library successfully imported')
 
-
-#import BaseIO
+# import BaseIO
 from neo.io.baseio import BaseIO
 
-#import objects from neo.core
+# import objects from neo.core
 from neo.core import Segment, AnalogSignal, SpikeTrain, Event, Epoch
 
 
 # create an object based on BaseIO
 class NeuroshareapiIO(BaseIO):
+    # setting some class parameters
+    is_readable = True  # This class can only read data
+    is_writable = False  # write is not supported
+    supported_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
 
-    #setting some class parameters
-    is_readable = True # This class can only read data
-    is_writable = False # write is not supported
-    supported_objects  = [ Segment , AnalogSignal, SpikeTrain, Event, Epoch ]
-    
-    has_header         = False
-    is_streameable     = False
+    has_header = False
+    is_streameable = False
 
-    readable_objects    = [ Segment , AnalogSignal, SpikeTrain, Event, Epoch]
+    readable_objects = [Segment, AnalogSignal, SpikeTrain, Event, Epoch]
     # This class is not able to write objects
-    writeable_objects   = [ ]
-
- 
-#    # This is for GUI stuff : a definition for parameters when reading.
-#    # This dict should be keyed by object (`Block`). Each entry is a list
-#    # of tuple. The first entry in each tuple is the parameter name. The
-#    # second entry is a dict with keys 'value' (for default value),
-#    # and 'label' (for a descriptive name).
-#    # Note that if the highest-level object requires parameters,
-#    # common_io_test will be skipped.
+    writeable_objects = []
+
+    #    # This is for GUI stuff : a definition for parameters when reading.
+    #    # This dict should be keyed by object (`Block`). Each entry is a list
+    #    # of tuple. The first entry in each tuple is the parameter name. The
+    #    # second entry is a dict with keys 'value' (for default value),
+    #    # and 'label' (for a descriptive name).
+    #    # Note that if the highest-level object requires parameters,
+    #    # common_io_test will be skipped.
     read_params = {
-        Segment : [
-            ("segment_duration",{"value" : 0., "label" : "Segment size (s.)"}),
-            ("t_start",{"value" : 0.,"label" : "start reading (s.)"}),
-            #("lazy",{"value" : False,"label" : "load in lazy mode?"}),
-            #("cascade",{"value" : True,"label" : "Cascade?"})
-#            ("num_analogsignal",
-#                {'value" : 8, "label" : "Number of recording points"}),
-#            ("num_spiketrain_by_channel',
-#                {"value" : 3, "label" : "Num of spiketrains"}),
-            ],
-        }
-#
+        Segment: [
+            ("segment_duration", {"value": 0., "label": "Segment size (s.)"}),
+            ("t_start", {"value": 0., "label": "start reading (s.)"}),
+            #            ("num_analogsignal",
+            #                {'value" : 8, "label" : "Number of recording points"}),
+            #            ("num_spiketrain_by_channel',
+            #                {"value" : 3, "label" : "Num of spiketrains"}),
+        ],
+    }
+    #
     # do not supported write so no GUI stuff
-    write_params       = None
+    write_params = None
 
-    name               = "Neuroshare"
+    name = "Neuroshare"
 
-    extensions          = []
+    extensions = []
 
     # This object operates on neuroshare files
     mode = "file"
 
-
-
-    def __init__(self , filename = None, dllpath = None) :
+    def __init__(self, filename=None, dllpath=None):
         """
         Arguments:
             filename : the filename
-        The init function will run automatically upon calling of the class, as 
+        The init function will run automatically upon calling of the class, as
         in: test = MultichannelIO(filename = filetoberead.mcd), therefore the first
         operations with the file are set here, so that the user doesn't have to
         remember to use another method, than the ones defined in the NEO library
-            
+
         """
         BaseIO.__init__(self)
         self.filename = filename
-        #set the flags for each event type
-        eventID = 1        
+        # set the flags for each event type
+        eventID = 1
         analogID = 2
         epochID = 3
-        #if a filename was given, create a dictionary with information that will 
-        #be needed later on.
-        if self.filename != None:
+        # if a filename was given, create a dictionary with information that will
+        # be needed later on.
+        if self.filename is not None:
             if dllpath is not None:
                 name = os.path.splitext(os.path.basename(dllpath))[0]
                 library = ns.Library(name, dllpath)
             else:
                 library = None
-            self.fd = ns.File(self.filename, library = library)
-            #get all the metadata from file
+            self.fd = ns.File(self.filename, library=library)
+            # get all the metadata from file
             self.metadata = self.fd.metadata_raw
-            #get sampling rate
-            self.metadata["sampRate"] = 1./self.metadata["TimeStampResolution"]#hz
-            #create lists and array for electrode, spike cutouts and trigger channels
+            # get sampling rate
+            self.metadata["sampRate"] = 1. / self.metadata["TimeStampResolution"]  # hz
+            # create lists and array for electrode, spike cutouts and trigger channels
             self.metadata["elecChannels"] = list()
-            self.metadata["elecChanId"]   = list()
-            self.metadata["num_analogs"]  = 0
-            self.metadata["spkChannels"]  = list()
-            self.metadata["spkChanId"]    = list()
+            self.metadata["elecChanId"] = list()
+            self.metadata["num_analogs"] = 0
+            self.metadata["spkChannels"] = list()
+            self.metadata["spkChanId"] = list()
             self.metadata["num_spkChans"] = 0
-            self.metadata["triggers"]     = list()
-            self.metadata["triggersId"]   = list()
-            self.metadata["num_trigs"]    = 0
+            self.metadata["triggers"] = list()
+            self.metadata["triggersId"] = list()
+            self.metadata["num_trigs"] = 0
             self.metadata["digital epochs"] = list()
-            self.metadata["digiEpochId"]    = list()
+            self.metadata["digiEpochId"] = list()
             self.metadata["num_digiEpochs"] = 0
 
-            #loop through all entities in file to get the indexes for each entity
-            #type, so that one can run through the indexes later, upon reading the 
-            #segment
+            # loop through all entities in file to get the indexes for each entity
+            # type, so that one can run through the indexes later, upon reading the
+            # segment
             for entity in self.fd.entities:
-                #if entity is analog and not the digital line recording 
-                #(stored as analog in neuroshare files) 
-                if entity.entity_type == analogID and entity.label[0:4]!= "digi":
-                    #get the electrode number                    
+                # if entity is analog and not the digital line recording
+                # (stored as analog in neuroshare files)
+                if entity.entity_type == analogID and entity.label[0:4] != "digi":
+                    # get the electrode number
                     self.metadata["elecChannels"].append(entity.label[-4:])
-                    #get the electrode index
+                    # get the electrode index
                     self.metadata["elecChanId"].append(entity.id)
-                    #increase the number of electrodes found
+                    # increase the number of electrodes found
                     self.metadata["num_analogs"] += 1
                 # if the entity is a event entitiy and a trigger
                 if entity.entity_type == eventID and entity.label[0:4] == "trig":
-                    #get the digital bit/trigger number
-                    self.metadata["triggers"].append(entity.label[0:4]+entity.label[-4:])
-                    #get the digital bit index
+                    # get the digital bit/trigger number
+                    self.metadata["triggers"].append(entity.label[0:4] + entity.label[-4:])
+                    # get the digital bit index
                     self.metadata["triggersId"].append(entity.id)
-                    #increase the number of triggers found                    
+                    # increase the number of triggers found
                     self.metadata["num_trigs"] += 1
-                #if the entity is non triggered digital values with duration
+                # if the entity is non triggered digital values with duration
                 if entity.entity_type == eventID and entity.label[0:4] == "digi":
-                    #get the digital bit number
+                    # get the digital bit number
                     self.metadata["digital epochs"].append(entity.label[-5:])
-                    #get the digital bit index
+                    # get the digital bit index
                     self.metadata["digiEpochId"].append(entity.id)
-                    #increase the number of triggers found                    
+                    # increase the number of triggers found
                     self.metadata["num_digiEpochs"] += 1
-                #if the entity is spike cutouts
+                # if the entity is spike cutouts
                 if entity.entity_type == epochID and entity.label[0:4] == "spks":
                     self.metadata["spkChannels"].append(entity.label[-4:])
                     self.metadata["spkChanId"].append(entity.id)
                     self.metadata["num_spkChans"] += 1
-            
-    #function to create a block and read in a segment
-#    def create_block(self,
-#                     lazy = False,
-#                     cascade = True,
-#                     
-#                     ):
-#        
-#        blk=Block(name = self.fileName+"_segment:",
-#                  file_datetime = str(self.metadata_raw["Time_Day"])+"/"+
-#                                  str(self.metadata_raw["Time_Month"])+"/"+
-#                                  str(self.metadata_raw["Time_Year"])+"_"+
-#                                  str(self.metadata_raw["Time_Hour"])+":"+
-#                                  str(self.metadata_raw["Time_Min"]))
-#        
-#        blk.rec_datetime = blk.file_datetime
-#        return blk
-    
-    #create function to read segment
+
+                    # function to create a block and read in a segment
+                #    def create_block(self,
+                #
+                #                     ):
+                #
+                #        blk=Block(name = self.fileName+"_segment:",
+                #                  file_datetime = str(self.metadata_raw["Time_Day"])+"/"+
+                #                                  str(self.metadata_raw["Time_Month"])+"/"+
+                #                                  str(self.metadata_raw["Time_Year"])+"_"+
+                #                                  str(self.metadata_raw["Time_Hour"])+":"+
+                #                                  str(self.metadata_raw["Time_Min"]))
+                #
+                #        blk.rec_datetime = blk.file_datetime
+                #        return blk
+
+    # create function to read segment
     def read_segment(self,
-                     # the 2 first keyword arguments are imposed by neo.io API
-                     lazy = False,
-                     cascade = True,
+                     lazy=False,
                      # all following arguments are decided by this IO and are free
-                     t_start = 0.,
-                     segment_duration = 0.,
-                    ):
+                     t_start=0.,
+                     segment_duration=0.,
+                     ):
         """
         Return a Segment containing all analog and spike channels, as well as
         all trigger events.
@@ -199,280 +188,252 @@ class NeuroshareapiIO(BaseIO):
             segment_duration :is the size in secend of the segment.
             num_analogsignal : number of AnalogSignal in this segment
             num_spiketrain : number of SpikeTrain in this segment
-            
+
         """
-        #if no segment duration is given, use the complete file
-        if segment_duration == 0. :
-            segment_duration=float(self.metadata["TimeSpan"])
-        #if the segment duration is bigger than file, use the complete file
-        if segment_duration >=float(self.metadata["TimeSpan"]):
-            segment_duration=float(self.metadata["TimeSpan"])
-        #if the time sum of start point and segment duration is bigger than
-        #the file time span, cap it at the end
-        if segment_duration+t_start>float(self.metadata["TimeSpan"]):
-            segment_duration = float(self.metadata["TimeSpan"])-t_start
-        
+        assert not lazy, 'Do not support lazy'
+
+        # if no segment duration is given, use the complete file
+        if segment_duration == 0.:
+            segment_duration = float(self.metadata["TimeSpan"])
+        # if the segment duration is bigger than file, use the complete file
+        if segment_duration >= float(self.metadata["TimeSpan"]):
+            segment_duration = float(self.metadata["TimeSpan"])
+        # if the time sum of start point and segment duration is bigger than
+        # the file time span, cap it at the end
+        if segment_duration + t_start > float(self.metadata["TimeSpan"]):
+            segment_duration = float(self.metadata["TimeSpan"]) - t_start
+
         # create an empty segment
-        seg = Segment( name = "segment from the NeuroshareapiIO")
+        seg = Segment(name="segment from the NeuroshareapiIO")
 
-        if cascade:
-            # read nested analosignal
-            
-            if self.metadata["num_analogs"] == 0:
-                print ("no analog signals in this file!")
-            else:
-                #run through the number of analog channels found at the __init__ function
-                for i in range(self.metadata["num_analogs"]):
-                    #create an analog signal object for each channel found
-                    ana = self.read_analogsignal( lazy = lazy , cascade = cascade ,
-                                             channel_index = self.metadata["elecChanId"][i],
-                                            segment_duration = segment_duration, t_start=t_start)
-                    #add analog signal read to segment object
-                    seg.analogsignals += [ ana ]
-            
-            # read triggers (in this case without any duration)
-            for i in range(self.metadata["num_trigs"]):
-                #create event object for each trigger/bit found
-                eva = self.read_eventarray(lazy = lazy , 
-                                           cascade = cascade,
-                                           channel_index = self.metadata["triggersId"][i],
-                                           segment_duration = segment_duration,
-                                           t_start = t_start,)
-                #add event object to segment
-                seg.eventarrays +=  [eva]
-            #read epochs (digital events with duration)
-            for i in range(self.metadata["num_digiEpochs"]):
-                #create event object for each trigger/bit found
-                epa = self.read_epocharray(lazy = lazy, 
-                                           cascade = cascade,
-                                           channel_index = self.metadata["digiEpochId"][i],
-                                            segment_duration = segment_duration,
-                                            t_start = t_start,)
-                #add event object to segment
-                seg.epocharrays +=  [epa]
-            # read nested spiketrain
-            #run through all spike channels found
-            for i in range(self.metadata["num_spkChans"]):
-                #create spike object
-                sptr = self.read_spiketrain(lazy = lazy, cascade = cascade,
-                        channel_index = self.metadata["spkChanId"][i],
-                        segment_duration = segment_duration,
-                        t_start = t_start)
-                #add the spike object to segment
-                seg.spiketrains += [sptr]
+        # read nested analosignal
+
+        if self.metadata["num_analogs"] == 0:
+            print("no analog signals in this file!")
+        else:
+            # run through the number of analog channels found at the __init__ function
+            for i in range(self.metadata["num_analogs"]):
+                # create an analog signal object for each channel found
+                ana = self.read_analogsignal(channel_index=self.metadata["elecChanId"][i],
+                                             segment_duration=segment_duration, t_start=t_start)
+                # add analog signal read to segment object
+                seg.analogsignals += [ana]
+
+        # read triggers (in this case without any duration)
+        for i in range(self.metadata["num_trigs"]):
+            # create event object for each trigger/bit found
+            eva = self.read_event(channel_index=self.metadata["triggersId"][i],
+                                       segment_duration=segment_duration,
+                                       t_start=t_start, )
+            # add event object to segment
+            seg.events += [eva]
+        # read epochs (digital events with duration)
+        for i in range(self.metadata["num_digiEpochs"]):
+            # create event object for each trigger/bit found
+            epa = self.read_epoch(channel_index=self.metadata["digiEpochId"][i],
+                                       segment_duration=segment_duration,
+                                       t_start=t_start, )
+            # add event object to segment
+            seg.epochs += [epa]
+        # read nested spiketrain
+        # run through all spike channels found
+        for i in range(self.metadata["num_spkChans"]):
+            # create spike object
+            sptr = self.read_spiketrain(channel_index=self.metadata["spkChanId"][i],
+                                        segment_duration=segment_duration,
+                                        t_start=t_start)
+            # add the spike object to segment
+            seg.spiketrains += [sptr]
 
         seg.create_many_to_one_relationship()
-        
+
         return seg
 
     """
         With this IO AnalogSignal can be accessed directly with its channel number
     """
+
     def read_analogsignal(self,
-                          # the 2 first key arguments are imposed by neo.io
-                          lazy = False,
-                          cascade = True,
-                          #channel index as given by the neuroshare API
-                          channel_index = 0,
-                          #time in seconds to be read
-                          segment_duration = 0.,
-                          #time in seconds to start reading from
-                          t_start = 0.,
+                          lazy=False,
+                          # channel index as given by the neuroshare API
+                          channel_index=0,
+                          # time in seconds to be read
+                          segment_duration=0.,
+                          # time in seconds to start reading from
+                          t_start=0.,
                           ):
-        
-        #some controls:        
-        #if no segment duration is given, use the complete file
-        if segment_duration ==0.:
-            segment_duration=float(self.metadata["TimeSpan"])
-        #if the segment duration is bigger than file, use the complete file
-        if segment_duration >=float(self.metadata["TimeSpan"]):
-            segment_duration=float(self.metadata["TimeSpan"])
-            
-        if lazy:
-            anasig = AnalogSignal([], units="V", sampling_rate =  self.metadata["sampRate"] * pq.Hz,
-                                  t_start=t_start * pq.s,
-                                  )
-            #create a dummie time vector                     
-            tvect = np.arange(t_start, t_start+ segment_duration , 1./self.metadata["sampRate"])                                  
-            # we add the attribute lazy_shape with the size if loaded
-            anasig.lazy_shape = tvect.shape
-        else:
-            #get the analog object
-            sig =  self.fd.get_entity(channel_index)
-            #get the units (V, mV etc)            
-            sigUnits = sig.units
-            #get the electrode number
-            chanName = sig.label[-4:]
-            
-            #transform t_start into index (reading will start from this index)           
-            startat = int(t_start*self.metadata["sampRate"])
-            #get the number of bins to read in
-            bins = int(segment_duration * self.metadata["sampRate"])
-            
-            #if the number of bins to read is bigger than 
-            #the total number of bins, read only till the end of analog object
-            if startat+bins > sig.item_count:
-                bins = sig.item_count-startat
-            #read the data from the sig object
-            sig,_,_ = sig.get_data(index = startat, count = bins)
-            #store it to the 'AnalogSignal' object
-            anasig = AnalogSignal(sig, units = sigUnits, sampling_rate=self.metadata["sampRate"] * pq.Hz,
-                                  t_start=t_start * pq.s,
-                                  t_stop = (t_start+segment_duration)*pq.s,
-                                  channel_index=channel_index)
-
-            # annotate from which electrode the signal comes from
-            anasig.annotate(info = "signal from channel %s" %chanName )
+        assert not lazy, 'Do not support lazy'
+
+        # some controls:
+        # if no segment duration is given, use the complete file
+        if segment_duration == 0.:
+            segment_duration = float(self.metadata["TimeSpan"])
+        # if the segment duration is bigger than file, use the complete file
+        if segment_duration >= float(self.metadata["TimeSpan"]):
+            segment_duration = float(self.metadata["TimeSpan"])
+
+        # get the analog object
+        sig = self.fd.get_entity(channel_index)
+        # get the units (V, mV etc)
+        sigUnits = sig.units
+        # get the electrode number
+        chanName = sig.label[-4:]
+
+        # transform t_start into index (reading will start from this index)
+        startat = int(t_start * self.metadata["sampRate"])
+        # get the number of bins to read in
+        bins = int(segment_duration * self.metadata["sampRate"])
+
+        # if the number of bins to read is bigger than
+        # the total number of bins, read only till the end of analog object
+        if startat + bins > sig.item_count:
+            bins = sig.item_count - startat
+        # read the data from the sig object
+        sig, _, _ = sig.get_data(index=startat, count=bins)
+        # store it to the 'AnalogSignal' object
+        anasig = AnalogSignal(sig, units=sigUnits, sampling_rate=self.metadata["sampRate"] * pq.Hz,
+                              t_start=t_start * pq.s,
+                              t_stop=(t_start + segment_duration) * pq.s,
+                              channel_index=channel_index)
+
+        # annotate from which electrode the signal comes from
+        anasig.annotate(info="signal from channel %s" % chanName)
 
         return anasig
 
-
-
-    #function to read spike trains
-    def read_spiketrain(self ,
-                        # the 2 first key arguments are imposed by neo.io API
-                        lazy = False,
-                        cascade = True,
-                        channel_index = 0,
-                        segment_duration = 0.,
-                        t_start = 0.):
+    # function to read spike trains
+    def read_spiketrain(self,
+                        lazy=False,
+                        channel_index=0,
+                        segment_duration=0.,
+                        t_start=0.):
         """
         Function to read in spike trains. This API still does not support read in of
         specific channels as they are recorded. rather the fuunction gets the entity set
         by 'channel_index' which is set in the __init__ function (all spike channels)
         """
-        
-        #sampling rate
+        assert not lazy, 'Do not support lazy'
+
+        # sampling rate
         sr = self.metadata["sampRate"]
-        
+
         # create a list to store spiketrain times
-        times = list() 
-        
-        if lazy:
-            # we add the attribute lazy_shape with the size if lazy
-            spiketr = SpikeTrain(times,units = pq.s, 
-                       t_stop = t_start+segment_duration,
-                       t_start = t_start*pq.s,lazy_shape = 40)
-        
-        else:
-            #get the spike data from a specific channel index
-            tempSpks =  self.fd.get_entity(channel_index)    
-            #transform t_start into index (reading will start from this index) 
-            startat = tempSpks.get_index_by_time(t_start,0)#zero means closest index to value
-            #get the last index to read, using segment duration and t_start
-            endat = tempSpks.get_index_by_time(float(segment_duration+t_start),-1)#-1 means last index before time
-            numIndx = endat-startat
-            #get the end point using segment duration
-            #create a numpy empty array to store the waveforms
-            waveforms=np.array(np.zeros([numIndx,tempSpks.max_sample_count]))
-            #loop through the data from the specific channel index
-            for i in range(startat,endat,1):
-                #get cutout, timestamp, cutout duration, and spike unit
-                tempCuts,timeStamp,duration,unit = tempSpks.get_data(i)
-                #save the cutout in the waveform matrix
-                waveforms[i]=tempCuts[0]
-                #append time stamp to list
-                times.append(timeStamp)
-                
-            #create a spike train object
-            spiketr = SpikeTrain(times,units = pq.s, 
-                         t_stop = t_start+segment_duration,
-                         t_start = t_start*pq.s,
-                         name ="spikes from electrode"+tempSpks.label[-3:],
-                         waveforms = waveforms*pq.volt,
-                         sampling_rate = sr * pq.Hz,
-                         file_origin = self.filename,
-                         annotate = ("channel_index:"+ str(channel_index)))
-            
+        times = list()
+
+        # get the spike data from a specific channel index
+        tempSpks = self.fd.get_entity(channel_index)
+        # transform t_start into index (reading will start from this index)
+        startat = tempSpks.get_index_by_time(t_start, 0)  # zero means closest index to value
+        # get the last index to read, using segment duration and t_start
+        # -1 means last index before time
+        endat = tempSpks.get_index_by_time(float(segment_duration + t_start), -1)
+        numIndx = endat - startat
+        # get the end point using segment duration
+        # create a numpy empty array to store the waveforms
+        waveforms = np.array(np.zeros([numIndx, tempSpks.max_sample_count]))
+        # loop through the data from the specific channel index
+        for i in range(startat, endat, 1):
+            # get cutout, timestamp, cutout duration, and spike unit
+            tempCuts, timeStamp, duration, unit = tempSpks.get_data(i)
+            # save the cutout in the waveform matrix
+            waveforms[i] = tempCuts[0]
+            # append time stamp to list
+            times.append(timeStamp)
+
+        # create a spike train object
+        spiketr = SpikeTrain(times, units=pq.s,
+                             t_stop=t_start + segment_duration,
+                             t_start=t_start * pq.s,
+                             name="spikes from electrode" + tempSpks.label[-3:],
+                             waveforms=waveforms * pq.volt,
+                             sampling_rate=sr * pq.Hz,
+                             file_origin=self.filename,
+                             annotate=("channel_index:" + str(channel_index)))
+
         return spiketr
 
-    def read_eventarray(self,lazy = False, cascade = True,
-                        channel_index = 0,
-                        t_start = 0.,
-                        segment_duration = 0.):
+    def read_event(self, lazy=False, channel_index=0,
+                        t_start=0.,
+                        segment_duration=0.):
         """function to read digital timestamps. this function only reads the event
         onset. to get digital event durations, use the epoch function (to be implemented)."""
-        if lazy:
-            eva = Event(file_origin = self.filename)
-        else:
-            #create temporary empty lists to store data
-            tempNames = list()
-            tempTimeStamp = list()
-            #get entity from file
-            trigEntity = self.fd.get_entity(channel_index)
-            #transform t_start into index (reading will start from this index) 
-            startat = trigEntity.get_index_by_time(t_start,0)#zero means closest index to value
-            #get the last index to read, using segment duration and t_start
-            endat = trigEntity.get_index_by_time(float(segment_duration+t_start),-1)#-1 means last index before time
-            #numIndx = endat-startat
-            #run through specified intervals in entity
-            for i in range(startat,endat+1,1):#trigEntity.item_count):
-                #get in which digital bit was the trigger detected
-                tempNames.append(trigEntity.label[-8:])
-                #get the time stamps of onset events
-                tempData, onOrOff = trigEntity.get_data(i)
-                #if this was an onset event, save it to the list
-                #on triggered recordings it seems that only onset events are
-                #recorded. On continuous recordings both onset(==1) 
-                #and offset(==255) seem to be recorded
-                if onOrOff == 1:               
-                    #append the time stamp to them empty list
-                    tempTimeStamp.append(tempData)
-                #create an event array        
-            eva = Event(labels = np.array(tempNames,dtype = "S"),
-    			        times = np.array(tempTimeStamp)*pq.s,
-			     file_origin = self.filename,                            
-                             description = "the trigger events (without durations)")       
+        assert not lazy, 'Do not support lazy'
+
+        # create temporary empty lists to store data
+        tempNames = list()
+        tempTimeStamp = list()
+        # get entity from file
+        trigEntity = self.fd.get_entity(channel_index)
+        # transform t_start into index (reading will start from this index)
+        startat = trigEntity.get_index_by_time(t_start, 0)  # zero means closest index to value
+        # get the last index to read, using segment duration and t_start
+        endat = trigEntity.get_index_by_time(
+            float(segment_duration + t_start), -1)  # -1 means last index before time
+        # numIndx = endat-startat
+        # run through specified intervals in entity
+        for i in range(startat, endat + 1, 1):  # trigEntity.item_count):
+            # get in which digital bit was the trigger detected
+            tempNames.append(trigEntity.label[-8:])
+            # get the time stamps of onset events
+            tempData, onOrOff = trigEntity.get_data(i)
+            # if this was an onset event, save it to the list
+            # on triggered recordings it seems that only onset events are
+            # recorded. On continuous recordings both onset(==1)
+            # and offset(==255) seem to be recorded
+            if onOrOff == 1:
+                # append the time stamp to them empty list
+                tempTimeStamp.append(tempData)
+                # create an event array
+        eva = Event(labels=np.array(tempNames, dtype="S"),
+                    times=np.array(tempTimeStamp) * pq.s,
+                    file_origin=self.filename,
+                    description="the trigger events (without durations)")
         return eva
-        
-       
-    def read_epocharray(self,lazy = False, cascade = True, 
-                        channel_index = 0,
-                        t_start = 0.,
-                        segment_duration = 0.):
+
+    def read_epoch(self, lazy=False,
+                        channel_index=0,
+                        t_start=0.,
+                        segment_duration=0.):
         """function to read digital timestamps. this function reads the event
         onset and offset and outputs onset and duration. to get only onsets use
         the event array function"""
-        if lazy:
-            epa = Epoch(file_origin = self.filename,
-                        times=None, durations=None, labels=None)
-        else:
-            #create temporary empty lists to store data
-            tempNames = list()
-            tempTimeStamp = list()
-            durations = list()
-            #get entity from file
-            digEntity = self.fd.get_entity(channel_index)
-            #transform t_start into index (reading will start from this index) 
-            startat = digEntity.get_index_by_time(t_start,0)#zero means closest index to value
-            #get the last index to read, using segment duration and t_start
-            endat = digEntity.get_index_by_time(float(segment_duration+t_start),-1)#-1 means last index before time       
-            
-            #run through entity using only odd "i"s 
-            for i in range(startat,endat+1,1):
-                if i % 2 == 1:
-                    #get in which digital bit was the trigger detected
-                    tempNames.append(digEntity.label[-8:])
-                    #get the time stamps of even events
-                    tempData, onOrOff = digEntity.get_data(i-1)
-                    #if this was an onset event, save it to the list
-                    #on triggered recordings it seems that only onset events are
-                    #recorded. On continuous recordings both onset(==1) 
-                    #and offset(==255) seem to be recorded
-                    #if onOrOff == 1:
-                    #append the time stamp to them empty list
-                    tempTimeStamp.append(tempData)
-                
-                    #get time stamps of odd events
-                    tempData1, onOrOff = digEntity.get_data(i)
-                    #if onOrOff == 255:
-                    #pass
-                    durations.append(tempData1-tempData)
-            epa = Epoch(file_origin = self.filename,
-                        times = np.array(tempTimeStamp)*pq.s,
-                        durations = np.array(durations)*pq.s,
-                        labels = np.array(tempNames,dtype = "S"),
-                        description = "digital events with duration")
-            return epa
-        
-        
+        assert not lazy, 'Do not support lazy'
+
+        # create temporary empty lists to store data
+        tempNames = list()
+        tempTimeStamp = list()
+        durations = list()
+        # get entity from file
+        digEntity = self.fd.get_entity(channel_index)
+        # transform t_start into index (reading will start from this index)
+        startat = digEntity.get_index_by_time(t_start, 0)  # zero means closest index to value
+        # get the last index to read, using segment duration and t_start
+        # -1 means last index before time
+        endat = digEntity.get_index_by_time(float(segment_duration + t_start), -1)
+
+        # run through entity using only odd "i"s
+        for i in range(startat, endat + 1, 1):
+            if i % 2 == 1:
+                # get in which digital bit was the trigger detected
+                tempNames.append(digEntity.label[-8:])
+                # get the time stamps of even events
+                tempData, onOrOff = digEntity.get_data(i - 1)
+                # if this was an onset event, save it to the list
+                # on triggered recordings it seems that only onset events are
+                # recorded. On continuous recordings both onset(==1)
+                # and offset(==255) seem to be recorded
+                # if onOrOff == 1:
+                # append the time stamp to them empty list
+                tempTimeStamp.append(tempData)
+
+                # get time stamps of odd events
+                tempData1, onOrOff = digEntity.get_data(i)
+                # if onOrOff == 255:
+                # pass
+                durations.append(tempData1 - tempData)
+        epa = Epoch(file_origin=self.filename,
+                    times=np.array(tempTimeStamp) * pq.s,
+                    durations=np.array(durations) * pq.s,
+                    labels=np.array(tempNames, dtype="S"),
+                    description="digital events with duration")
+        return epa

+ 239 - 245
code/python-neo/neo/io/neurosharectypesio.py

@@ -26,6 +26,7 @@ try:
     file
 except NameError:
     import io
+
     file = io.BufferedReader
 
 import numpy as np
@@ -34,17 +35,17 @@ import quantities as pq
 from neo.io.baseio import BaseIO
 from neo.core import Segment, AnalogSignal, SpikeTrain, Event
 
-ns_OK = 0 #Function successful
-ns_LIBERROR = -1 #Generic linked library error
-ns_TYPEERROR = -2 #Library unable to open file type
-ns_FILEERROR = -3 #File access or read error
-ns_BADFILE = -4 # Invalid file handle passed to function
-ns_BADENTITY = -5 #Invalid or inappropriate entity identifier specified
-ns_BADSOURCE = -6 #Invalid source identifier specified
-ns_BADINDEX = -7 #Invalid entity index specified
+ns_OK = 0  # Function successful
+ns_LIBERROR = -1  # Generic linked library error
+ns_TYPEERROR = -2  # Library unable to open file type
+ns_FILEERROR = -3  # File access or read error
+ns_BADFILE = -4  # Invalid file handle passed to function
+ns_BADENTITY = -5  # Invalid or inappropriate entity identifier specified
+ns_BADSOURCE = -6  # Invalid source identifier specified
+ns_BADINDEX = -7  # Invalid entity index specified
 
 
-class NeuroshareError( Exception ):
+class NeuroshareError(Exception):
     def __init__(self, lib, errno):
         self.lib = lib
         self.errno = errno
@@ -53,20 +54,22 @@ class NeuroshareError( Exception ):
         errstr = '{}: {}'.format(errno, pszMsgBuffer.value)
         Exception.__init__(self, errstr)
 
+
 class DllWithError():
     def __init__(self, lib):
         self.lib = lib
-    
+
     def __getattr__(self, attr):
         f = getattr(self.lib, attr)
         return self.decorate_with_error(f)
-    
+
     def decorate_with_error(self, f):
         def func_with_error(*args):
             errno = f(*args)
             if errno != ns_OK:
                 raise NeuroshareError(self.lib, errno)
             return errno
+
         return func_with_error
 
 
@@ -78,13 +81,13 @@ class NeurosharectypesIO(BaseIO):
     Usage:
         >>> from neo import io
         >>> r = io.NeuroshareIO(filename='a_file', dllname=the_name_of_dll)
-        >>> seg = r.read_segment(lazy=False, cascade=True, import_neuroshare_segment=True)
+        >>> seg = r.read_segment(import_neuroshare_segment=True)
         >>> print seg.analogsignals        # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
         [<AnalogSignal(array([ -1.77246094e+02,  -2.24707031e+02,  -2.66015625e+02,
         ...
         >>> print seg.spiketrains
         []
-        >>> print seg.eventarrays
+        >>> print seg.events
         [<EventArray: 1@1.12890625 s, 1@2.02734375 s, 1@3.82421875 s>]
 
     Note:
@@ -99,26 +102,24 @@ class NeurosharectypesIO(BaseIO):
 
     """
 
-    is_readable        = True
-    is_writable        = False
+    is_readable = True
+    is_writable = False
 
-    supported_objects            = [Segment , AnalogSignal, Event, SpikeTrain ]
-    readable_objects    = [Segment]
-    writeable_objects    = [ ]
+    supported_objects = [Segment, AnalogSignal, Event, SpikeTrain]
+    readable_objects = [Segment]
+    writeable_objects = []
 
-    has_header         = False
-    is_streameable     = False
+    has_header = False
+    is_streameable = False
 
-    read_params        = { Segment : [] }
-    write_params       = None
+    read_params = {Segment: []}
+    write_params = None
 
-    name               = 'neuroshare'
-    extensions          = [  ]
+    name = 'neuroshare'
+    extensions = []
     mode = 'file'
 
-
-
-    def __init__(self , filename = '', dllname = '') :
+    def __init__(self, filename='', dllname=''):
         """
         Arguments:
             filename: the file to read
@@ -127,195 +128,186 @@ class NeurosharectypesIO(BaseIO):
         BaseIO.__init__(self)
         self.dllname = dllname
         self.filename = filename
-        
-
-
 
-
-    def read_segment(self, import_neuroshare_segment = True,
-                     lazy=False, cascade=True):
+    def read_segment(self, import_neuroshare_segment=True,
+                     lazy=False):
         """
         Arguments:
-            import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all.
-
+            import_neuroshare_segment: import neuroshare segment as SpikeTrain
+            with associated waveforms or not imported at all.
         """
-        seg = Segment( file_origin = os.path.basename(self.filename), )
-        
+        assert not lazy, 'Do not support lazy'
+
+        seg = Segment(file_origin=os.path.basename(self.filename), )
+
         if sys.platform.startswith('win'):
             neuroshare = ctypes.windll.LoadLibrary(self.dllname)
         elif sys.platform.startswith('linux'):
             neuroshare = ctypes.cdll.LoadLibrary(self.dllname)
         neuroshare = DllWithError(neuroshare)
-        
-        #elif sys.platform.startswith('darwin'):
-        
+
+        # elif sys.platform.startswith('darwin'):
 
         # API version
         info = ns_LIBRARYINFO()
-        neuroshare.ns_GetLibraryInfo(ctypes.byref(info) , ctypes.sizeof(info))
-        seg.annotate(neuroshare_version = str(info.dwAPIVersionMaj)+'.'+str(info.dwAPIVersionMin))
-
-        if not cascade:
-            return seg
-
+        neuroshare.ns_GetLibraryInfo(ctypes.byref(info), ctypes.sizeof(info))
+        seg.annotate(neuroshare_version=str(info.dwAPIVersionMaj)
+                                        + '.' + str(info.dwAPIVersionMin))
 
         # open file
         hFile = ctypes.c_uint32(0)
-        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename) ,ctypes.byref(hFile))
+        neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename), ctypes.byref(hFile))
         fileinfo = ns_FILEINFO()
-        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo) , ctypes.sizeof(fileinfo))
-        
+        neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo), ctypes.sizeof(fileinfo))
+
         # read all entities
         for dwEntityID in range(fileinfo.dwEntityCount):
             entityInfo = ns_ENTITYINFO()
-            neuroshare.ns_GetEntityInfo( hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo))
+            neuroshare.ns_GetEntityInfo(hFile, dwEntityID, ctypes.byref(
+                entityInfo), ctypes.sizeof(entityInfo))
 
             # EVENT
             if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT':
                 pEventInfo = ns_EVENTINFO()
-                neuroshare.ns_GetEventInfo ( hFile,  dwEntityID,  ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo))
+                neuroshare.ns_GetEventInfo(hFile, dwEntityID, ctypes.byref(
+                    pEventInfo), ctypes.sizeof(pEventInfo))
 
-                if pEventInfo.dwEventType == 0: #TEXT
+                if pEventInfo.dwEventType == 0:  # TEXT
                     pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
-                elif pEventInfo.dwEventType == 1:#CVS
+                elif pEventInfo.dwEventType == 1:  # CVS
                     pData = ctypes.create_string_buffer(pEventInfo.dwMaxDataLength)
-                elif pEventInfo.dwEventType == 2:# 8bit
+                elif pEventInfo.dwEventType == 2:  # 8bit
                     pData = ctypes.c_byte(0)
-                elif pEventInfo.dwEventType == 3:# 16bit
+                elif pEventInfo.dwEventType == 3:  # 16bit
                     pData = ctypes.c_int16(0)
-                elif pEventInfo.dwEventType == 4:# 32bit
+                elif pEventInfo.dwEventType == 4:  # 32bit
                     pData = ctypes.c_int32(0)
-                pdTimeStamp  = ctypes.c_double(0.)
+                pdTimeStamp = ctypes.c_double(0.)
                 pdwDataRetSize = ctypes.c_uint32(0)
 
-                ea = Event(name = str(entityInfo.szEntityLabel),)
-                if not lazy:
-                    times = [ ]
-                    labels = [ ]
-                    for dwIndex in range(entityInfo.dwItemCount ):
-                        neuroshare.ns_GetEventData ( hFile, dwEntityID, dwIndex,
-                                            ctypes.byref(pdTimeStamp), ctypes.byref(pData),
-                                            ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize) )
-                        times.append(pdTimeStamp.value)
-                        labels.append(str(pData.value))
-                    ea.times = times*pq.s
-                    ea.labels = np.array(labels, dtype ='S')
-                else :
-                    ea.lazy_shape = entityInfo.dwItemCount
-                seg.eventarrays.append(ea)
+                ea = Event(name=str(entityInfo.szEntityLabel), )
+
+                times = []
+                labels = []
+                for dwIndex in range(entityInfo.dwItemCount):
+                    neuroshare.ns_GetEventData(hFile, dwEntityID, dwIndex,
+                                               ctypes.byref(pdTimeStamp), ctypes.byref(pData),
+                                               ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize))
+                    times.append(pdTimeStamp.value)
+                    labels.append(str(pData.value))
+                ea.times = times * pq.s
+                ea.labels = np.array(labels, dtype='S')
+
+                seg.events.append(ea)
 
             # analog
             if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG':
                 pAnalogInfo = ns_ANALOGINFO()
 
-                neuroshare.ns_GetAnalogInfo( hFile, dwEntityID,ctypes.byref(pAnalogInfo),ctypes.sizeof(pAnalogInfo) )
+                neuroshare.ns_GetAnalogInfo(hFile, dwEntityID, ctypes.byref(
+                    pAnalogInfo), ctypes.sizeof(pAnalogInfo))
                 dwIndexCount = entityInfo.dwItemCount
 
-                if lazy:
-                    signal = [ ]*pq.Quantity(1, pAnalogInfo.szUnits)
-                else:
-                    pdwContCount = ctypes.c_uint32(0)
-                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
-                    total_read = 0
-                    while total_read< entityInfo.dwItemCount:
-                        dwStartIndex = ctypes.c_uint32(total_read)
-                        dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
-                        
-                        neuroshare.ns_GetAnalogData( hFile,  dwEntityID,  dwStartIndex,
-                                     dwStopIndex, ctypes.byref( pdwContCount) , pData[total_read:].ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
-                        total_read += pdwContCount.value
-                            
-                    signal =  pq.Quantity(pData, units=pAnalogInfo.szUnits, copy = False)
-
-                #t_start
+                pdwContCount = ctypes.c_uint32(0)
+                pData = np.zeros((entityInfo.dwItemCount,), dtype='float64')
+                total_read = 0
+                while total_read < entityInfo.dwItemCount:
+                    dwStartIndex = ctypes.c_uint32(total_read)
+                    dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read)
+
+                    neuroshare.ns_GetAnalogData(hFile, dwEntityID, dwStartIndex,
+                                                dwStopIndex, ctypes.byref(pdwContCount),
+                                                pData[total_read:].ctypes.data_as(
+                                                    ctypes.POINTER(ctypes.c_double)))
+                    total_read += pdwContCount.value
+
+                signal = pq.Quantity(pData, units=pAnalogInfo.szUnits, copy=False)
+
+                # t_start
                 dwIndex = 0
                 pdTime = ctypes.c_double(0)
-                neuroshare.ns_GetTimeByIndex( hFile,  dwEntityID,  dwIndex, ctypes.byref(pdTime))
+                neuroshare.ns_GetTimeByIndex(hFile, dwEntityID, dwIndex, ctypes.byref(pdTime))
 
                 anaSig = AnalogSignal(signal,
-                                                    sampling_rate = pAnalogInfo.dSampleRate*pq.Hz,
-                                                    t_start = pdTime.value * pq.s,
-                                                    name = str(entityInfo.szEntityLabel),
-                                                    )
-                anaSig.annotate( probe_info = str(pAnalogInfo.szProbeInfo))
-                if lazy:
-                    anaSig.lazy_shape = entityInfo.dwItemCount
-                seg.analogsignals.append( anaSig )
+                                      sampling_rate=pAnalogInfo.dSampleRate * pq.Hz,
+                                      t_start=pdTime.value * pq.s,
+                                      name=str(entityInfo.szEntityLabel),
+                                      )
+                anaSig.annotate(probe_info=str(pAnalogInfo.szProbeInfo))
+                seg.analogsignals.append(anaSig)
 
-
-            #segment
-            if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:
+            # segment
+            if entity_types[
+                    entityInfo.dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment:
 
                 pdwSegmentInfo = ns_SEGMENTINFO()
                 if not str(entityInfo.szEntityLabel).startswith('spks'):
                     continue
 
-                neuroshare.ns_GetSegmentInfo( hFile,  dwEntityID,
-                                             ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo) )
+                neuroshare.ns_GetSegmentInfo(hFile, dwEntityID,
+                                             ctypes.byref(pdwSegmentInfo),
+                                             ctypes.sizeof(pdwSegmentInfo))
                 nsource = pdwSegmentInfo.dwSourceCount
 
-                pszMsgBuffer  = ctypes.create_string_buffer(" "*256)
+                pszMsgBuffer = ctypes.create_string_buffer(" " * 256)
                 neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256)
-                
-                for dwSourceID in range(pdwSegmentInfo.dwSourceCount) :
+
+                for dwSourceID in range(pdwSegmentInfo.dwSourceCount):
                     pSourceInfo = ns_SEGSOURCEINFO()
-                    neuroshare.ns_GetSegmentSourceInfo( hFile,  dwEntityID, dwSourceID,
-                                    ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo) )
-
-                if lazy:
-                    sptr = SpikeTrain(times, name = str(entityInfo.szEntityLabel), t_stop = 0.*pq.s)
-                    sptr.lazy_shape = entityInfo.dwItemCount
-                else:
-                    pdTimeStamp  = ctypes.c_double(0.)
-                    dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount*pdwSegmentInfo.dwSourceCount
-                    pData = np.zeros( (dwDataBufferSize), dtype = 'float64')
-                    pdwSampleCount = ctypes.c_uint32(0)
-                    pdwUnitID= ctypes.c_uint32(0)
-
-                    nsample  = int(dwDataBufferSize)
-                    times = np.empty( (entityInfo.dwItemCount), dtype = 'f')
-                    waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype = 'f')
-                    for dwIndex in range(entityInfo.dwItemCount ):
-                        neuroshare.ns_GetSegmentData ( hFile,  dwEntityID,  dwIndex,
-                            ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
-                            dwDataBufferSize * 8, ctypes.byref(pdwSampleCount),
-                                ctypes.byref(pdwUnitID ) )
-
-                        times[dwIndex] = pdTimeStamp.value
-                        waveforms[dwIndex, :,:] = pData[:nsample*nsource].reshape(nsample ,nsource).transpose()
-                    
-                    sptr = SpikeTrain(times = pq.Quantity(times, units = 's', copy = False),
-                                        t_stop = times.max(),
-                                        waveforms = pq.Quantity(waveforms, units = str(pdwSegmentInfo.szUnits), copy = False ),
-                                        left_sweep = nsample/2./float(pdwSegmentInfo.dSampleRate)*pq.s,
-                                        sampling_rate = float(pdwSegmentInfo.dSampleRate)*pq.Hz,
-                                        name = str(entityInfo.szEntityLabel),
-                                        )
+                    neuroshare.ns_GetSegmentSourceInfo(hFile, dwEntityID, dwSourceID,
+                                                       ctypes.byref(pSourceInfo),
+                                                       ctypes.sizeof(pSourceInfo))
+
+                pdTimeStamp = ctypes.c_double(0.)
+                dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount * pdwSegmentInfo.dwSourceCount
+                pData = np.zeros((dwDataBufferSize), dtype='float64')
+                pdwSampleCount = ctypes.c_uint32(0)
+                pdwUnitID = ctypes.c_uint32(0)
+
+                nsample = int(dwDataBufferSize)
+                times = np.empty((entityInfo.dwItemCount), dtype='f')
+                waveforms = np.empty((entityInfo.dwItemCount, nsource, nsample), dtype='f')
+                for dwIndex in range(entityInfo.dwItemCount):
+                    neuroshare.ns_GetSegmentData(
+                        hFile, dwEntityID, dwIndex,
+                        ctypes.byref(pdTimeStamp),
+                        pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
+                        dwDataBufferSize * 8,
+                        ctypes.byref(pdwSampleCount),
+                        ctypes.byref(pdwUnitID))
+
+                    times[dwIndex] = pdTimeStamp.value
+                    waveforms[dwIndex, :, :] = pData[:nsample * nsource].reshape(
+                        nsample, nsource).transpose()
+
+                sptr = SpikeTrain(times=pq.Quantity(times, units='s', copy=False),
+                                  t_stop=times.max(),
+                                  waveforms=pq.Quantity(waveforms, units=str(
+                                      pdwSegmentInfo.szUnits), copy=False),
+                                  left_sweep=nsample / 2.
+                                             / float(pdwSegmentInfo.dSampleRate) * pq.s,
+                                  sampling_rate=float(pdwSegmentInfo.dSampleRate) * pq.Hz,
+                                  name=str(entityInfo.szEntityLabel),
+                                  )
                 seg.spiketrains.append(sptr)
 
-
             # neuralevent
             if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT':
-
                 pNeuralInfo = ns_NEURALINFO()
-                neuroshare.ns_GetNeuralInfo ( hFile,  dwEntityID,
-                                 ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo))
-
-                if lazy:
-                    times = [ ]*pq.s
-                    t_stop = 0*pq.s
-                else:
-                    pData = np.zeros( (entityInfo.dwItemCount,), dtype = 'float64')
-                    dwStartIndex = 0
-                    dwIndexCount = entityInfo.dwItemCount
-                    neuroshare.ns_GetNeuralData( hFile,  dwEntityID,  dwStartIndex,
-                        dwIndexCount,  pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
-                    times = pData*pq.s
-                    t_stop = times.max()
-                sptr = SpikeTrain(times, t_stop =t_stop,
-                                                name = str(entityInfo.szEntityLabel),)
-                if lazy:
-                    sptr.lazy_shape = entityInfo.dwItemCount
+                neuroshare.ns_GetNeuralInfo(hFile, dwEntityID,
+                                            ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo))
+
+                pData = np.zeros((entityInfo.dwItemCount,), dtype='float64')
+                dwStartIndex = 0
+                dwIndexCount = entityInfo.dwItemCount
+                neuroshare.ns_GetNeuralData(hFile, dwEntityID, dwStartIndex,
+                                            dwIndexCount,
+                                            pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
+                times = pData * pq.s
+                t_stop = times.max()
+
+                sptr = SpikeTrain(times, t_stop=t_stop,
+                                  name=str(entityInfo.szEntityLabel), )
                 seg.spiketrains.append(sptr)
 
         # close
@@ -325,14 +317,12 @@ class NeurosharectypesIO(BaseIO):
         return seg
 
 
-
-
 # neuroshare structures
 class ns_FILEDESC(ctypes.Structure):
-    _fields_ = [('szDescription', ctypes.c_char*32),
-                ('szExtension', ctypes.c_char*8),
-                ('szMacCodes', ctypes.c_char*8),
-                ('szMagicCode', ctypes.c_char*16),
+    _fields_ = [('szDescription', ctypes.c_char * 32),
+                ('szExtension', ctypes.c_char * 8),
+                ('szMacCodes', ctypes.c_char * 8),
+                ('szMagicCode', ctypes.c_char * 16),
                 ]
 
 
@@ -341,110 +331,114 @@ class ns_LIBRARYINFO(ctypes.Structure):
                 ('dwLibVersionMin', ctypes.c_uint32),
                 ('dwAPIVersionMaj', ctypes.c_uint32),
                 ('dwAPIVersionMin', ctypes.c_uint32),
-                ('szDescription', ctypes.c_char*64),
-                ('szCreator',ctypes.c_char*64),
-                ('dwTime_Year',ctypes.c_uint32),
-                ('dwTime_Month',ctypes.c_uint32),
-                ('dwTime_Day',ctypes.c_uint32),
-                ('dwFlags',ctypes.c_uint32),
-                ('dwMaxFiles',ctypes.c_uint32),
-                ('dwFileDescCount',ctypes.c_uint32),
-                ('FileDesc',ns_FILEDESC*16),
+                ('szDescription', ctypes.c_char * 64),
+                ('szCreator', ctypes.c_char * 64),
+                ('dwTime_Year', ctypes.c_uint32),
+                ('dwTime_Month', ctypes.c_uint32),
+                ('dwTime_Day', ctypes.c_uint32),
+                ('dwFlags', ctypes.c_uint32),
+                ('dwMaxFiles', ctypes.c_uint32),
+                ('dwFileDescCount', ctypes.c_uint32),
+                ('FileDesc', ns_FILEDESC * 16),
                 ]
 
+
 class ns_FILEINFO(ctypes.Structure):
-    _fields_ = [('szFileType', ctypes.c_char*32),
+    _fields_ = [('szFileType', ctypes.c_char * 32),
                 ('dwEntityCount', ctypes.c_uint32),
                 ('dTimeStampResolution', ctypes.c_double),
                 ('dTimeSpan', ctypes.c_double),
-                ('szAppName', ctypes.c_char*64),
-                ('dwTime_Year',ctypes.c_uint32),
-                ('dwTime_Month',ctypes.c_uint32),
-                ('dwReserved',ctypes.c_uint32),
-                ('dwTime_Day',ctypes.c_uint32),
-                ('dwTime_Hour',ctypes.c_uint32),
-                ('dwTime_Min',ctypes.c_uint32),
-                ('dwTime_Sec',ctypes.c_uint32),
-                ('dwTime_MilliSec',ctypes.c_uint32),
-                ('szFileComment',ctypes.c_char*256),
+                ('szAppName', ctypes.c_char * 64),
+                ('dwTime_Year', ctypes.c_uint32),
+                ('dwTime_Month', ctypes.c_uint32),
+                ('dwReserved', ctypes.c_uint32),
+                ('dwTime_Day', ctypes.c_uint32),
+                ('dwTime_Hour', ctypes.c_uint32),
+                ('dwTime_Min', ctypes.c_uint32),
+                ('dwTime_Sec', ctypes.c_uint32),
+                ('dwTime_MilliSec', ctypes.c_uint32),
+                ('szFileComment', ctypes.c_char * 256),
                 ]
 
+
 class ns_ENTITYINFO(ctypes.Structure):
-    _fields_ = [('szEntityLabel', ctypes.c_char*32),
-                ('dwEntityType',ctypes.c_uint32),
-                ('dwItemCount',ctypes.c_uint32),
+    _fields_ = [('szEntityLabel', ctypes.c_char * 32),
+                ('dwEntityType', ctypes.c_uint32),
+                ('dwItemCount', ctypes.c_uint32),
                 ]
 
-entity_types = { 0 : 'ns_ENTITY_UNKNOWN' ,
-                    1 : 'ns_ENTITY_EVENT' ,
-                    2 : 'ns_ENTITY_ANALOG' ,
-                    3 : 'ns_ENTITY_SEGMENT' ,
-                    4 : 'ns_ENTITY_NEURALEVENT' ,
-                    }
+
+entity_types = {0: 'ns_ENTITY_UNKNOWN',
+                1: 'ns_ENTITY_EVENT',
+                2: 'ns_ENTITY_ANALOG',
+                3: 'ns_ENTITY_SEGMENT',
+                4: 'ns_ENTITY_NEURALEVENT',
+                }
+
 
 class ns_EVENTINFO(ctypes.Structure):
     _fields_ = [
-                ('dwEventType',ctypes.c_uint32),
-                ('dwMinDataLength',ctypes.c_uint32),
-                ('dwMaxDataLength',ctypes.c_uint32),
-                ('szCSVDesc', ctypes.c_char*128),
-                ]
+        ('dwEventType', ctypes.c_uint32),
+        ('dwMinDataLength', ctypes.c_uint32),
+        ('dwMaxDataLength', ctypes.c_uint32),
+        ('szCSVDesc', ctypes.c_char * 128),
+    ]
+
 
 class ns_ANALOGINFO(ctypes.Structure):
     _fields_ = [
-                ('dSampleRate',ctypes.c_double),
-                ('dMinVal',ctypes.c_double),
-                ('dMaxVal',ctypes.c_double),
-                ('szUnits', ctypes.c_char*16),
-                ('dResolution',ctypes.c_double),
-                ('dLocationX',ctypes.c_double),
-                ('dLocationY',ctypes.c_double),
-                ('dLocationZ',ctypes.c_double),
-                ('dLocationUser',ctypes.c_double),
-                ('dHighFreqCorner',ctypes.c_double),
-                ('dwHighFreqOrder',ctypes.c_uint32),
-                ('szHighFilterType', ctypes.c_char*16),
-                ('dLowFreqCorner',ctypes.c_double),
-                ('dwLowFreqOrder',ctypes.c_uint32),
-                ('szLowFilterType', ctypes.c_char*16),
-                ('szProbeInfo', ctypes.c_char*128),
-            ]
+        ('dSampleRate', ctypes.c_double),
+        ('dMinVal', ctypes.c_double),
+        ('dMaxVal', ctypes.c_double),
+        ('szUnits', ctypes.c_char * 16),
+        ('dResolution', ctypes.c_double),
+        ('dLocationX', ctypes.c_double),
+        ('dLocationY', ctypes.c_double),
+        ('dLocationZ', ctypes.c_double),
+        ('dLocationUser', ctypes.c_double),
+        ('dHighFreqCorner', ctypes.c_double),
+        ('dwHighFreqOrder', ctypes.c_uint32),
+        ('szHighFilterType', ctypes.c_char * 16),
+        ('dLowFreqCorner', ctypes.c_double),
+        ('dwLowFreqOrder', ctypes.c_uint32),
+        ('szLowFilterType', ctypes.c_char * 16),
+        ('szProbeInfo', ctypes.c_char * 128),
+    ]
 
 
 class ns_SEGMENTINFO(ctypes.Structure):
     _fields_ = [
-                ('dwSourceCount',ctypes.c_uint32),
-                ('dwMinSampleCount',ctypes.c_uint32),
-                ('dwMaxSampleCount',ctypes.c_uint32),
-                ('dSampleRate',ctypes.c_double),
-                ('szUnits', ctypes.c_char*32),
-                ]
+        ('dwSourceCount', ctypes.c_uint32),
+        ('dwMinSampleCount', ctypes.c_uint32),
+        ('dwMaxSampleCount', ctypes.c_uint32),
+        ('dSampleRate', ctypes.c_double),
+        ('szUnits', ctypes.c_char * 32),
+    ]
+
 
 class ns_SEGSOURCEINFO(ctypes.Structure):
     _fields_ = [
-                ('dMinVal',ctypes.c_double),
-                ('dMaxVal',ctypes.c_double),
-                ('dResolution',ctypes.c_double),
-                ('dSubSampleShift',ctypes.c_double),
-                ('dLocationX',ctypes.c_double),
-                ('dLocationY',ctypes.c_double),
-                ('dLocationZ',ctypes.c_double),
-                ('dLocationUser',ctypes.c_double),
-                ('dHighFreqCorner',ctypes.c_double),
-                ('dwHighFreqOrder',ctypes.c_uint32),
-                ('szHighFilterType', ctypes.c_char*16),
-                ('dLowFreqCorner',ctypes.c_double),
-                ('dwLowFreqOrder',ctypes.c_uint32),
-                ('szLowFilterType', ctypes.c_char*16),
-                ('szProbeInfo', ctypes.c_char*128),
-                ]
+        ('dMinVal', ctypes.c_double),
+        ('dMaxVal', ctypes.c_double),
+        ('dResolution', ctypes.c_double),
+        ('dSubSampleShift', ctypes.c_double),
+        ('dLocationX', ctypes.c_double),
+        ('dLocationY', ctypes.c_double),
+        ('dLocationZ', ctypes.c_double),
+        ('dLocationUser', ctypes.c_double),
+        ('dHighFreqCorner', ctypes.c_double),
+        ('dwHighFreqOrder', ctypes.c_uint32),
+        ('szHighFilterType', ctypes.c_char * 16),
+        ('dLowFreqCorner', ctypes.c_double),
+        ('dwLowFreqOrder', ctypes.c_uint32),
+        ('szLowFilterType', ctypes.c_char * 16),
+        ('szProbeInfo', ctypes.c_char * 128),
+    ]
+
 
 class ns_NEURALINFO(ctypes.Structure):
     _fields_ = [
-                ('dwSourceEntityID',ctypes.c_uint32),
-                ('dwSourceUnitID',ctypes.c_uint32),
-                ('szProbeInfo',ctypes.c_char*128),
-                ]
-
-
-
+        ('dwSourceEntityID', ctypes.c_uint32),
+        ('dwSourceUnitID', ctypes.c_uint32),
+        ('szProbeInfo', ctypes.c_char * 128),
+    ]

File diff suppressed because it is too large
+ 997 - 981
code/python-neo/neo/io/nixio.py


+ 45 - 50
code/python-neo/neo/io/nsdfio.py

@@ -84,7 +84,7 @@ class NSDFIO(BaseIO):
         for i, block in enumerate(blocks):
             self.write_block(block, name_pattern.format(i), writer, blocks_model)
 
-    def write_block(self, block = None, name='0', writer=None, parent=None):
+    def write_block(self, block=None, name='0', writer=None, parent=None):
         """
         Write a Block to the file
 
@@ -110,7 +110,6 @@ class NSDFIO(BaseIO):
 
         self._clean_nsdfio_annotations(block)
 
-
     def _write_block_children(self, block, block_model, writer):
         segments_model = nsdf.ModelComponent(name='segments', uid=uuid1().hex, parent=block_model)
         self._write_model_component(segments_model, writer)
@@ -119,15 +118,15 @@ class NSDFIO(BaseIO):
             self.write_segment(segment=segment, name=name_pattern.format(i),
                                writer=writer, parent=segments_model)
 
-        channel_indexes_model = nsdf.ModelComponent(name='channel_indexes', uid=uuid1().hex, parent=block_model)
+        channel_indexes_model = nsdf.ModelComponent(
+            name='channel_indexes', uid=uuid1().hex, parent=block_model)
         self._write_model_component(channel_indexes_model, writer)
         name_pattern = self._name_pattern(len(block.channel_indexes))
         for i, channelindex in enumerate(block.channel_indexes):
             self.write_channelindex(channelindex=channelindex, name=name_pattern.format(i),
                                     writer=writer, parent=channel_indexes_model)
 
-
-    def write_segment(self, segment = None, name='0', writer=None, parent=None):
+    def write_segment(self, segment=None, name='0', writer=None, parent=None):
         """
         Write a Segment to the file
 
@@ -157,7 +156,8 @@ class NSDFIO(BaseIO):
             self._clean_nsdfio_annotations(segment)
 
     def _write_segment_children(self, model, segment, writer):
-        analogsignals_model = nsdf.ModelComponent(name='analogsignals', uid=uuid1().hex, parent=model)
+        analogsignals_model = nsdf.ModelComponent(
+            name='analogsignals', uid=uuid1().hex, parent=model)
         self._write_model_component(analogsignals_model, writer)
         name_pattern = self._name_pattern(len(segment.analogsignals))
         for i, signal in enumerate(segment.analogsignals):
@@ -185,7 +185,8 @@ class NSDFIO(BaseIO):
         signal.annotations['nsdfio_uid'] = uid
 
         r_signal = np.swapaxes(signal, 0, 1)
-        channels_model, channels, source_ds = self._create_signal_data_sources(model, r_signal, uid, writer)
+        channels_model, channels, source_ds = self._create_signal_data_sources(
+            model, r_signal, uid, writer)
         self._write_signal_data(model, channels, r_signal, signal, source_ds, writer)
 
         self._write_model_component(model, writer)
@@ -213,7 +214,8 @@ class NSDFIO(BaseIO):
         self._write_channelindex_children(channelindex, model, writer)
 
     def _write_channelindex_children(self, channelindex, model, writer):
-        analogsignals_model = nsdf.ModelComponent(name='analogsignals', uid=uuid1().hex, parent=model)
+        analogsignals_model = nsdf.ModelComponent(
+            name='analogsignals', uid=uuid1().hex, parent=model)
         self._write_model_component(analogsignals_model, writer)
         name_pattern = self._name_pattern(len(channelindex.analogsignals))
         for i, signal in enumerate(channelindex.analogsignals):
@@ -242,7 +244,7 @@ class NSDFIO(BaseIO):
         return '{{:0{}d}}'.format(self._number_of_digits(max(how_many_items - 1, 0)))
 
     def _clean_nsdfio_annotations(self, object):
-        nsdfio_annotations = ('nsdfio_uid', )
+        nsdfio_annotations = ('nsdfio_uid',)
 
         for key in nsdfio_annotations:
             object.annotations.pop(key, None)
@@ -329,33 +331,35 @@ class NSDFIO(BaseIO):
         else:
             group.create_dataset(name, data=array)
 
-    def read_all_blocks(self, lazy=False, cascade=True):
+    def read_all_blocks(self, lazy=False):
         """
         Read all blocks from the file
 
         :param lazy: Enables lazy reading
-        :param cascade: Read nested objects or not?
         :return: List of read blocks
         """
+        assert not lazy, 'Do not support lazy'
+
         reader = self._init_reading()
         blocks = []
 
         blocks_path = self.modeltree_path + 'blocks/'
         for block in reader.model[blocks_path].values():
-            blocks.append(self.read_block(lazy, cascade, group=block, reader=reader))
+            blocks.append(self.read_block(group=block, reader=reader))
 
         return blocks
 
-    def read_block(self, lazy=False, cascade=True, group=None, reader=None):
+    def read_block(self, lazy=False, group=None, reader=None):
         """
         Read a Block from the file
 
         :param lazy: Enables lazy reading
-        :param cascade: Read nested objects or not?
         :param group: HDF5 Group representing the block in NSDF model tree (optional)
         :param reader: NSDFReader instance (optional)
         :return: Read block
         """
+        assert not lazy, 'Do not support lazy'
+
         block = Block()
         group, reader = self._select_first_container(group, reader, 'block')
 
@@ -364,30 +368,30 @@ class NSDFIO(BaseIO):
 
         attrs = group.attrs
 
-        if cascade:
-            self._read_block_children(lazy, block, group, reader)
+        self._read_block_children(block, group, reader)
         block.create_many_to_one_relationship()
 
         self._read_container_metadata(attrs, block)
 
         return block
 
-    def _read_block_children(self, lazy, block, group, reader):
+    def _read_block_children(self, block, group, reader):
         for child in group['segments/'].values():
-            block.segments.append(self.read_segment(lazy=lazy, group=child, reader=reader))
+            block.segments.append(self.read_segment(group=child, reader=reader))
         for child in group['channel_indexes/'].values():
-            block.channel_indexes.append(self.read_channelindex(lazy=lazy, group=child, reader=reader))
+            block.channel_indexes.append(self.read_channelindex(group=child, reader=reader))
 
-    def read_segment(self, lazy=False, cascade=True, group=None, reader=None):
+    def read_segment(self, lazy=False, group=None, reader=None):
         """
         Read a Segment from the file
 
         :param lazy: Enables lazy reading
-        :param cascade: Read nested objects or not?
         :param group: HDF5 Group representing the segment in NSDF model tree (optional)
         :param reader: NSDFReader instance (optional)
         :return: Read segment
         """
+        assert not lazy, 'Do not support lazy'
+
         segment = Segment()
         group, reader = self._select_first_container(group, reader, 'segment')
 
@@ -396,27 +400,27 @@ class NSDFIO(BaseIO):
 
         attrs = group.attrs
 
-        if cascade:
-            self._read_segment_children(lazy, group, reader, segment)
+        self._read_segment_children(group, reader, segment)
 
         self._read_container_metadata(attrs, segment)
 
         return segment
 
-    def _read_segment_children(self, lazy, group, reader, segment):
+    def _read_segment_children(self, group, reader, segment):
         for child in group['analogsignals/'].values():
-            segment.analogsignals.append(self.read_analogsignal(lazy=lazy, group=child, reader=reader))
+            segment.analogsignals.append(self.read_analogsignal(group=child, reader=reader))
 
-    def read_analogsignal(self, lazy=False, cascade=True, group=None, reader=None):
+    def read_analogsignal(self, lazy=False, group=None, reader=None):
         """
         Read an AnalogSignal from the file (must be child of a Segment)
 
         :param lazy: Enables lazy reading
-        :param cascade: Read nested objects or not?
         :param group: HDF5 Group representing the analogsignal in NSDF model tree
         :param reader: NSDFReader instance
         :return: Read AnalogSignal
         """
+        assert not lazy, 'Do not support lazy'
+
         attrs = group.attrs
 
         if attrs.get('reference_to') is not None:
@@ -426,36 +430,36 @@ class NSDFIO(BaseIO):
         data_group = reader.data['uniform/{}/signal'.format(uid)]
 
         t_start = self._read_analogsignal_t_start(attrs, data_group)
-        signal = self._create_analogsignal(data_group, lazy, group, t_start, uid, reader)
+        signal = self._create_analogsignal(data_group, group, t_start, uid, reader)
 
         self._read_basic_metadata(attrs, signal)
 
         self.objects_dict[uid] = signal
         return signal
 
-    def read_channelindex(self, lazy=False, cascade=True, group=None, reader=None):
+    def read_channelindex(self, lazy=False, group=None, reader=None):
         """
         Read a ChannelIndex from the file (must be child of a Block)
 
         :param lazy: Enables lazy reading
-        :param cascade: Read nested objects or not?
         :param group: HDF5 Group representing the channelindex in NSDF model tree
         :param reader: NSDFReader instance
         :return: Read ChannelIndex
         """
+        assert not lazy, 'Do not support lazy'
+
         attrs = group.attrs
 
         channelindex = self._create_channelindex(group)
-        if cascade:
-            self._read_channelindex_children(lazy, group, reader, channelindex)
+        self._read_channelindex_children(group, reader, channelindex)
 
         self._read_basic_metadata(attrs, channelindex)
 
         return channelindex
 
-    def _read_channelindex_children(self, lazy, group, reader, channelindex):
+    def _read_channelindex_children(self, group, reader, channelindex):
         for child in group['analogsignals/'].values():
-            channelindex.analogsignals.append(self.read_analogsignal(lazy=lazy, group=child, reader=reader))
+            channelindex.analogsignals.append(self.read_analogsignal(group=child, reader=reader))
 
     def _init_reading(self):
         reader = nsdf.NSDFReader(self.filename)
@@ -504,15 +508,13 @@ class NSDFIO(BaseIO):
         if attrs.get('index') is not None:
             object.index = attrs['index']
 
-    def _create_analogsignal(self, data_group, lazy, group, t_start, uid, reader):
-        if lazy:
-            data_shape = data_group.shape
-            data_shape = (data_shape[1], data_shape[0])
-            signal = self._create_lazy_analogsignal(data_shape, data_group, uid, t_start)
-        else:
-            dataobj = reader.get_uniform_data(uid, 'signal')
-            data = self._read_signal_data(dataobj, group)
-            signal = self._create_normal_analogsignal(data, dataobj, uid, t_start)
+    def _create_analogsignal(self, data_group, group, t_start, uid, reader):
+        # for lazy
+        # data_shape = data_group.shape
+        # data_shape = (data_shape[1], data_shape[0])
+        dataobj = reader.get_uniform_data(uid, 'signal')
+        data = self._read_signal_data(dataobj, group)
+        signal = self._create_normal_analogsignal(data, dataobj, uid, t_start)
         return signal
 
     def _read_analogsignal_t_start(self, attrs, data_group):
@@ -531,13 +533,6 @@ class NSDFIO(BaseIO):
         return AnalogSignal(np.swapaxes(data, 0, 1), dtype=dataobj.dtype, units=dataobj.unit,
                             t_start=t_start, sampling_period=pq.Quantity(dataobj.dt, dataobj.tunit))
 
-    def _create_lazy_analogsignal(self, shape, data, uid, t_start):
-        attrs = data.attrs
-        signal = AnalogSignal([], dtype=data.dtype, units=attrs['unit'],
-                              t_start=t_start, sampling_period=pq.Quantity(attrs['dt'], attrs['tunit']))
-        signal.lazy_shape = shape
-        return signal
-
     def _create_channelindex(self, group):
         return ChannelIndex(index=self._read_array(group, 'index'),
                             channel_names=self._read_array(group, 'channel_names'),

+ 5 - 3
code/python-neo/neo/io/pickleio.py

@@ -32,15 +32,17 @@ class PickleIO(BaseIO):
     is_readable = True
     is_writable = True
     has_header = False
-    is_streameable = False # TODO - correct spelling to "is_streamable"
-    supported_objects = [Block, Segment, AnalogSignal, SpikeTrain] # should extend to other classes.
+    is_streameable = False  # TODO - correct spelling to "is_streamable"
+    # should extend to other classes.
+    supported_objects = [Block, Segment, AnalogSignal, SpikeTrain]
     readable_objects = supported_objects
     writeable_objects = supported_objects
     mode = 'file'
     name = "Python pickle file"
     extensions = ['pkl', 'pickle']
 
-    def read_block(self, lazy=False, cascade=True):
+    def read_block(self, lazy=False):
+        assert not lazy, 'Do not support lazy'
         with open(self.filename, "rb") as fp:
             block = pickle.load(fp)
         return block

+ 0 - 0
code/python-neo/neo/io/plexonio.py


Some files were not shown because too many files changed in this diff