Browse Source

Readds changes of a29c6df1969eecd28ff7e8c92f3491d0640b41ad

This reverts commit 8b1499049af418c9a39e8f5f5492745f9d8c0cd3.
Julia Sprenger 5 years ago
parent
commit
24cd5caee3
100 changed files with 28280 additions and 0 deletions
  1. 72 0
      code/elephant/.gitignore
  2. 44 0
      code/elephant/.travis.yml
  3. 8 0
      code/elephant/MANIFEST.in
  4. 6 0
      code/elephant/doc/reference/current_source_density.rst
  5. 1181 0
      code/elephant/elephant/cell_assembly_detection.py
  6. 494 0
      code/elephant/elephant/change_point_detection.py
  7. 171 0
      code/elephant/elephant/phase_analysis.py
  8. 1633 0
      code/elephant/elephant/spade.py
  9. 11 0
      code/elephant/elephant/spade_src/LICENSE
  10. 2 0
      code/elephant/elephant/spade_src/__init__.py
  11. 1126 0
      code/elephant/elephant/spade_src/fast_fca.py
  12. 3000 0
      code/elephant/elephant/test/spike_extraction_test_data.txt
  13. 209 0
      code/elephant/elephant/test/test_cell_assembly_detection.py
  14. 190 0
      code/elephant/elephant/test/test_change_point_detection.py
  15. 190 0
      code/elephant/elephant/test/test_phase_analysis.py
  16. 246 0
      code/elephant/elephant/test/test_spade.py
  17. 2 0
      code/elephant/requirements-docs.txt
  18. 2 0
      code/elephant/requirements-extras.txt
  19. 1 0
      code/elephant/requirements-tests.txt
  20. 125 0
      code/python-neo/.circleci/config.yml
  21. 12 0
      code/python-neo/.circleci/requirements_testing.txt
  22. 6 0
      code/python-neo/.pep8speaks.yml
  23. 46 0
      code/python-neo/CODE_OF_CONDUCT.md
  24. 1 0
      code/python-neo/CONTRIBUTING.md
  25. 905 0
      code/python-neo/doc/source/images/neo_ecosystem.svg
  26. 205 0
      code/python-neo/doc/source/rawio.rst
  27. 42 0
      code/python-neo/doc/source/releases/0.6.0.rst
  28. 41 0
      code/python-neo/doc/source/releases/0.7.0.rst
  29. 47 0
      code/python-neo/examples/read_files_neo_io.py
  30. 73 0
      code/python-neo/examples/read_files_neo_rawio.py
  31. 375 0
      code/python-neo/neo/core/dataobject.py
  32. 152 0
      code/python-neo/neo/io/axographio.py
  33. 509 0
      code/python-neo/neo/io/basefromrawio.py
  34. 13 0
      code/python-neo/neo/io/bci2000io.py
  35. 2567 0
      code/python-neo/neo/io/blackrockio_v4.py
  36. 13 0
      code/python-neo/neo/io/intanio.py
  37. 2409 0
      code/python-neo/neo/io/neuralynxio_v1.py
  38. 23 0
      code/python-neo/neo/io/nixio_fr.py
  39. 13 0
      code/python-neo/neo/io/openephysio.py
  40. 12 0
      code/python-neo/neo/io/rawmcsio.py
  41. 69 0
      code/python-neo/neo/rawio/__init__.py
  42. 895 0
      code/python-neo/neo/rawio/axonrawio.py
  43. 694 0
      code/python-neo/neo/rawio/baserawio.py
  44. 375 0
      code/python-neo/neo/rawio/bci2000rawio.py
  45. 1925 0
      code/python-neo/neo/rawio/blackrockrawio.py
  46. 199 0
      code/python-neo/neo/rawio/brainvisionrawio.py
  47. 233 0
      code/python-neo/neo/rawio/elanrawio.py
  48. 369 0
      code/python-neo/neo/rawio/examplerawio.py
  49. 530 0
      code/python-neo/neo/rawio/intanrawio.py
  50. 231 0
      code/python-neo/neo/rawio/micromedrawio.py
  51. 689 0
      code/python-neo/neo/rawio/neuralynxrawio.py
  52. 324 0
      code/python-neo/neo/rawio/neuroexplorerrawio.py
  53. 119 0
      code/python-neo/neo/rawio/neuroscoperawio.py
  54. 326 0
      code/python-neo/neo/rawio/nixrawio.py
  55. 523 0
      code/python-neo/neo/rawio/openephysrawio.py
  56. 514 0
      code/python-neo/neo/rawio/plexonrawio.py
  57. 107 0
      code/python-neo/neo/rawio/rawbinarysignalrawio.py
  58. 155 0
      code/python-neo/neo/rawio/rawmcsrawio.py
  59. 659 0
      code/python-neo/neo/rawio/spike2rawio.py
  60. 529 0
      code/python-neo/neo/rawio/tdtrawio.py
  61. 1 0
      code/python-neo/neo/rawio/tests/__init__.py
  62. 161 0
      code/python-neo/neo/rawio/tests/common_rawio_test.py
  63. 349 0
      code/python-neo/neo/rawio/tests/rawio_compliance.py
  64. 34 0
      code/python-neo/neo/rawio/tests/test_axonrawio.py
  65. 20 0
      code/python-neo/neo/rawio/tests/test_bci2000rawio.py
  66. 193 0
      code/python-neo/neo/rawio/tests/test_blackrockrawio.py
  67. 43 0
      code/python-neo/neo/rawio/tests/test_brainvisionrawio.py
  68. 24 0
      code/python-neo/neo/rawio/tests/test_elanrawio.py
  69. 48 0
      code/python-neo/neo/rawio/tests/test_examplerawio.py
  70. 23 0
      code/python-neo/neo/rawio/tests/test_intanrawio.py
  71. 23 0
      code/python-neo/neo/rawio/tests/test_micromedrawio.py
  72. 66 0
      code/python-neo/neo/rawio/tests/test_neuralynxrawio.py
  73. 23 0
      code/python-neo/neo/rawio/tests/test_neuroexplorerrawio.py
  74. 21 0
      code/python-neo/neo/rawio/tests/test_neuroscoperawio.py
  75. 16 0
      code/python-neo/neo/rawio/tests/test_nixrawio.py
  76. 82 0
      code/python-neo/neo/rawio/tests/test_openephysrawio.py
  77. 24 0
      code/python-neo/neo/rawio/tests/test_plexonrawio.py
  78. 19 0
      code/python-neo/neo/rawio/tests/test_rawbinarysignalrawio.py
  79. 19 0
      code/python-neo/neo/rawio/tests/test_rawmcsrawio.py
  80. 26 0
      code/python-neo/neo/rawio/tests/test_spike2rawio.py
  81. 31 0
      code/python-neo/neo/rawio/tests/test_tdtrawio.py
  82. 23 0
      code/python-neo/neo/rawio/tests/test_winedrrawio.py
  83. 19 0
      code/python-neo/neo/rawio/tests/test_winwcprawio.py
  84. 92 0
      code/python-neo/neo/rawio/tests/tools.py
  85. 122 0
      code/python-neo/neo/rawio/winedrrawio.py
  86. 171 0
      code/python-neo/neo/rawio/winwcprawio.py
  87. 188 0
      code/python-neo/neo/test/coretest/test_dataobject.py
  88. 28 0
      code/python-neo/neo/test/iotest/test_axographio.py
  89. 26 0
      code/python-neo/neo/test/iotest/test_bci2000.py
  90. 27 0
      code/python-neo/neo/test/iotest/test_intanio.py
  91. 104 0
      code/python-neo/neo/test/iotest/test_nixio_fr.py
  92. 29 0
      code/python-neo/neo/test/iotest/test_openephysio.py
  93. 21 0
      code/python-neo/neo/test/iotest/test_rawmcsio.py
  94. 35 0
      code/python-odml/.gitignore
  95. 70 0
      code/python-odml/.travis.yml
  96. 326 0
      code/python-odml/CHANGELOG.md
  97. 77 0
      code/python-odml/CONTRIBUTING.md
  98. 31 0
      code/python-odml/GSoC.md
  99. 3 0
      code/python-odml/MANIFEST.in
  100. 0 0
      code/python-odml/README.rst

+ 72 - 0
code/elephant/.gitignore

@@ -0,0 +1,72 @@
+#########################################
+# Editor temporary/working/backup files #
+.#*
+[#]*#
+*~
+*$
+*.bak
+.coverage
+*.kdev4
+*.komodoproject
+.mr.developer.cfg
+nosetests.xml
+*.orig
+.project
+.pydevproject
+.settings
+*.tmp*
+.idea
+
+# Compiled source #
+###################
+*.a
+*.com
+*.class
+*.dll
+*.exe
+*.mo
+*.o
+*.py[ocd]
+*.so
+
+# Python files #
+################
+# setup.py working directory
+build
+# other build directories
+bin
+parts
+var
+lib
+lib64
+# sphinx build directory
+doc/_build
+# setup.py dist directory
+dist
+sdist
+# Egg metadata
+*.egg-info
+*.egg
+*.EGG
+*.EGG-INFO
+eggs
+develop-eggs
+# tox testing tool
+.tox
+# Packages
+.installed.cfg
+pip-log.txt
+# coverage
+cover
+
+# OS generated files #
+######################
+.directory
+.gdb_history
+.DS_Store?
+ehthumbs.db
+Icon?
+Thumbs.db
+
+# Things specific to this project #
+###################################

+ 44 - 0
code/elephant/.travis.yml

@@ -0,0 +1,44 @@
+dist: precise
+language: python
+sudo: false
+
+addons:
+   apt:
+      packages:
+      - libatlas3gf-base
+      - libatlas-dev
+      - libatlas-base-dev
+      - liblapack-dev
+      - gfortran
+      - python-scipy
+
+python:
+  - 2.7.13     
+      
+env:
+  matrix:
+    # This environment tests the newest supported anaconda env
+    - DISTRIB="conda" PYTHON_VERSION="2.7" INSTALL_MKL="true"
+      NUMPY_VERSION="1.15.1" SCIPY_VERSION="1.1.0" PANDAS_VERSION="0.23.4"
+      SIX_VERSION="1.10.0" COVERAGE="true"
+    - DISTRIB="conda" PYTHON_VERSION="3.5" INSTALL_MKL="true"
+      NUMPY_VERSION="1.15.1" SCIPY_VERSION="1.1.0" PANDAS_VERSION="0.23.4"
+      SIX_VERSION="1.10.0" COVERAGE="true"
+    # This environment tests minimal dependency versions
+    - DISTRIB="conda_min" PYTHON_VERSION="2.7" INSTALL_MKL="false"
+      SIX_VERSION="1.10.0" NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.14.0" COVERAGE="true"
+    - DISTRIB="conda_min" PYTHON_VERSION="3.4" INSTALL_MKL="false"
+      SIX_VERSION="1.10.0" NUMPY_VERSION="1.8.2" SCIPY_VERSION="0.14.0" COVERAGE="true"
+    # basic Ubuntu build environment
+    - DISTRIB="ubuntu" PYTHON_VERSION="2.7" INSTALL_ATLAS="true"
+      COVERAGE="true"
+    # This environment tests for mpi
+    - DISTRIB="mpi" PYTHON_VERSION="3.5" INSTALL_MKL="false"
+      NUMPY_VERSION="1.15.1" SCIPY_VERSION="1.1.0" SIX_VERSION="1.10.0"
+      MPI_VERSION="2.0.0" COVERAGE="true" MPI="true"
+
+install: source continuous_integration/install.sh
+script: bash continuous_integration/test_script.sh
+after_success:
+    - if [[ "$COVERAGE" == "true" ]]; then coveralls || echo "failed"; fi
+cache: apt

+ 8 - 0
code/elephant/MANIFEST.in

@@ -0,0 +1,8 @@
+# Include requirements
+include requirement*.txt
+include README.rst
+include LICENSE.txt
+include AUTHORS.txt
+include elephant/test/spike_extraction_test_data.npz
+recursive-include doc *
+prune doc/build

+ 6 - 0
code/elephant/doc/reference/current_source_density.rst

@@ -0,0 +1,6 @@
+===============================
+Current source density analysis
+===============================
+
+.. automodule:: elephant.current_source_density
+   :members:

File diff suppressed because it is too large
+ 1181 - 0
code/elephant/elephant/cell_assembly_detection.py


+ 494 - 0
code/elephant/elephant/change_point_detection.py

@@ -0,0 +1,494 @@
+# -*- coding: utf-8 -*-
+
+"""
+This algorithm determines if a spike train `spk` can be considered as stationary
+process (constant firing rate) or not as stationary process (i.e. presence of
+one or more points at which the rate increases or decreases). In case of
+non-stationarity, the output is a list of detected Change Points (CPs).
+Essentially, a det of  two-sided window of width `h` (`_filter(t, h, spk)`)
+slides over the spike train within the time `[h, t_final-h]`. This generates a
+`_filter_process(dt, h, spk)` that assigns at each time `t` the difference 
+between a spike lying in the right and left window. If at any time `t` this 
+difference is large 'enough' is assumed the presence of a rate Change Point in 
+a neighborhood of `t`. A threshold `test_quantile` for the maximum of 
+the filter_process (max difference of spike count between the left and right 
+window) is derived based on asymptotic considerations. The procedure is repeated 
+for an arbitrary set of windows, with different size `h`.
+
+
+Examples
+--------
+The following applies multiple_filter_test to a spike trains. 
+
+    >>> import quantities as pq
+    >>> import neo
+    >>> from elephant.change_point_detection import multiple_filter_test
+
+    
+    >>> test_array = [1.1,1.2,1.4,   1.6,1.7,1.75,1.8,1.85,1.9,1.95]
+    >>> st = neo.SpikeTrain(test_array, units='s', t_stop = 2.1)
+    >>> window_size = [0.5]*pq.s
+    >>> t_fin = 2.1*pq.s
+    >>> alpha = 5.0
+    >>> num_surrogates = 10000
+    >>> change_points = multiple_filter_test(window_size, st, t_fin, alpha,
+                        num_surrogates, dt = 0.5*pq.s)
+
+
+
+References
+----------
+Messer, M., Kirchner, M., Schiemann, J., Roeper, J., Neininger, R., & Schneider,
+G. (2014). A multiple filter test for the detection of rate changes in renewal
+processes with varying variance. The Annals of Applied Statistics, 8(4),2027-2067.
+
+
+Original code
+-------------
+Adapted from the published R implementation:
+DOI: 10.1214/14-AOAS782SUPP;.r
+
+"""
+
+import numpy as np
+import quantities as pq
+
+
+def multiple_filter_test(window_sizes, spiketrain, t_final, alpha, n_surrogates,
+                         test_quantile=None, test_param=None, dt=None):
+    """
+    Detects change points.
+
+    This function returns the detected change points, that correspond to the 
+    maxima of the `_filter_processes`. These are the processes generated by 
+    sliding the windows of step `dt`; at each step the difference between spike
+    on the right and left window is calculated.
+
+    Parameters
+    ----------
+        window_sizes : list of quantity objects
+                    list that contains windows sizes
+        spiketrain : neo.SpikeTrain, numpy array or list
+            spiketrain objects to analyze
+        t_final : quantity
+            final time of the spike train which is to be analysed
+        alpha : float
+            alpha-quantile in range [0, 100] for the set of maxima of the limit
+            processes
+        n_surrogates : integer
+            numbers of simulated limit processes
+        test_quantile : float
+            threshold for the maxima of the filter derivative processes, if any 
+            of these maxima is larger than this value, it is assumed the 
+            presence of a cp at the time corresponding to that maximum
+        dt : quantity
+          resolution, time step at which the windows are slided
+        test_param : np.array of shape (3, num of window),
+            first row: list of `h`, second and third rows: empirical means and
+            variances of the limit process correspodning to `h`. This will be 
+            used to normalize the `filter_process` in order to give to the every
+            maximum the same impact on the global statistic.
+           
+
+    Returns:
+    --------
+        cps : list of lists
+           one list for each window size `h`, containing the points detected with 
+           the corresponding `filter_process`. N.B.: only cps whose h-neighborhood 
+           does not include previously detected cps (with smaller window h) are
+           added to the list.
+    """
+
+    if (test_quantile is None) and (test_param is None):
+        test_quantile, test_param = empirical_parameters(window_sizes, t_final,
+                                                         alpha, n_surrogates,
+                                                         dt)
+    elif test_quantile is None:
+        test_quantile = empirical_parameters(window_sizes, t_final, alpha,
+                                             n_surrogates, dt)[0]
+    elif test_param is None:
+        test_param = empirical_parameters(window_sizes, t_final, alpha,
+                                          n_surrogates, dt)[1]
+                                          
+    spk = spiketrain
+    
+    #  List of lists of detected change points (CPs), to be returned
+    cps = []  
+    
+    for i, h in enumerate(window_sizes):
+        # automatic setting of dt
+        dt_temp = h / 20 if dt is None else dt
+        # filter_process for window of size h
+        t, differences = _filter_process(dt_temp, h, spk, t_final, test_param)
+        time_index = np.arange(len(differences))
+        # Point detected with window h
+        cps_window = []
+        while np.max(differences) > test_quantile:
+            cp_index = np.argmax(differences)
+            # from index to time
+            cp = cp_index * dt_temp + h  
+            #print("detected point {0}".format(cp), "with filter {0}".format(h))
+            # before repeating the procedure, the h-neighbourgs of detected CP
+            # are discarded, because rate changes into it are alrady explained 
+            mask_fore = time_index > cp_index - int((h / dt_temp).simplified)
+            mask_back = time_index < cp_index + int((h / dt_temp).simplified)
+            differences[mask_fore & mask_back] = 0
+            # check if the neighbourhood of detected cp does not contain cps 
+            # detected with other windows
+            neighbourhood_free = True
+            # iterate on lists of cps detected with smaller window
+            for j in range(i):
+                # iterate on CPs detected with the j-th smallest window
+                for c_pre in cps[j]:
+                    if c_pre - h < cp < c_pre + h:
+                        neighbourhood_free = False
+                        break
+            # if none of the previously detected CPs falls in the h-
+            # neighbourhood
+            if neighbourhood_free:
+                # add the current CP to the list
+                cps_window.append(cp)
+        # add the present list to the grand list
+        cps.append(cps_window)
+
+    return cps
+
+
+def _brownian_motion(t_in, t_fin, x_in, dt):
+    """
+    Generate a Brownian Motion.
+
+    Parameters
+    ----------
+        t_in : quantities,
+            initial time
+        t_fin : quantities,
+             final time
+        x_in : float,
+            initial point of the process: _brownian_motio(0) = x_in
+        dt : quantities,
+          resolution, time step at which brownian increments are summed
+    Returns
+    -------
+    Brownian motion on [t_in, t_fin], with resolution dt and initial state x_in
+    """
+
+    u = 1 * pq.s
+    try:
+        t_in_sec = t_in.rescale(u).magnitude
+    except ValueError:
+        raise ValueError("t_in must be a time quantity")
+    try:
+        t_fin_sec = t_fin.rescale(u).magnitude
+    except ValueError:
+        raise ValueError("t_fin must be a time quantity")
+    try:
+        dt_sec = dt.rescale(u).magnitude
+    except ValueError:
+        raise ValueError("dt must be a time quantity")
+
+    x = np.random.normal(0, np.sqrt(dt_sec), size=int((t_fin_sec - t_in_sec) 
+                                                                     / dt_sec))
+    s = np.cumsum(x)
+    return s + x_in
+
+
+def _limit_processes(window_sizes, t_final, dt):
+    """
+    Generate the limit processes (depending only on t_final and h), one for
+    each window size `h` in H. The distribution of maxima of these processes
+    is used to derive threshold `test_quantile` and parameters `test_param`.
+
+    Parameters
+    ----------
+        window_sizes : list of quantities
+            set of windows' size
+        t_final : quantity object
+            end of limit process
+        dt : quantity object
+            resolution, time step at which the windows are slided
+
+    Returns
+    -------
+        limit_processes : list of numpy array
+            each entries contains the limit processes for each h,
+            evaluated in [h,T-h] with steps dt
+    """
+
+    limit_processes = []
+
+    u = 1 * pq.s
+    try:
+        window_sizes_sec = window_sizes.rescale(u).magnitude
+    except ValueError:
+        raise ValueError("window_sizes must be a list of times")
+    try:
+        dt_sec = dt.rescale(u).magnitude
+    except ValueError:
+        raise ValueError("dt must be a time quantity")
+    
+    w = _brownian_motion(0 * u, t_final, 0, dt)
+    
+    for h in window_sizes_sec:
+        # BM on [h,T-h], shifted in time t-->t+h
+        brownian_right = w[int(2 * h/dt_sec):]
+        # BM on [h,T-h], shifted in time t-->t-h                     
+        brownian_left = w[:int(-2 * h/dt_sec)]
+        # BM on [h,T-h]                       
+        brownian_center = w[int(h/dt_sec):int(-h/dt_sec)]  
+        
+        modul = np.abs(brownian_right + brownian_left - 2 * brownian_center)
+        limit_process_h = modul / (np.sqrt(2 * h))
+        limit_processes.append(limit_process_h)
+
+    return limit_processes
+
+
+def empirical_parameters(window_sizes, t_final, alpha, n_surrogates, dt = None):
+    """
+    This function generates the threshold and the null parameters.
+    The`_filter_process_h` has been proved to converge (for t_fin, h-->infinity)
+    to a continuous functional of a Brownaian motion ('limit_process').
+    Using a MonteCarlo technique, maxima of these limit_processes are
+    collected.
+
+    The threshold is defined as the alpha quantile of this set of maxima.
+    Namely:
+    test_quantile := alpha quantile of {max_(h in window_size)[
+                                 max_(t in [h, t_final-h])_limit_process_h(t)]}
+
+    Parameters
+    ----------
+        window_sizes : list of quantity objects
+            set of windows' size
+        t_final : quantity object
+            final time of the spike
+        alpha : float
+            alpha-quantile in range [0, 100]
+        n_surrogates : integer
+            numbers of simulated limit processes
+        dt : quantity object
+            resolution, time step at which the windows are slided
+
+    Returns
+    -------
+        test_quantile : float
+            threshold for the maxima of the filter derivative processes, if any 
+            of these maxima is larger than this value, it is assumed the 
+            presence of a cp at the time corresponding to that maximum
+            
+        test_param : np.array 3 * num of window,
+            first row: list of `h`, second and third rows: empirical means and
+            variances of the limit process correspodning to `h`. This will be 
+            used to normalize the `filter_process` in order to give to the every
+            maximum the same impact on the global statistic.
+    """
+
+    # try:
+    #     window_sizes_sec = window_sizes.rescale(u)
+    # except ValueError:
+    #     raise ValueError("H must be a list of times")
+    # window_sizes_mag = window_sizes_sec.magnitude
+    # try:
+    #     t_final_sec = t_final.rescale(u)
+    # except ValueError:
+    #     raise ValueError("T must be a time quantity")
+    # t_final_mag = t_final_sec.magnitude
+
+    if not isinstance(window_sizes, pq.Quantity):
+        raise ValueError("window_sizes must be a list of time quantities")
+    if not isinstance(t_final, pq.Quantity):
+        raise ValueError("t_final must be a time quantity")
+    if not isinstance(n_surrogates, int):
+        raise TypeError("n_surrogates must be an integer")
+    if not (isinstance(dt, pq.Quantity) or (dt is None)):
+        raise ValueError("dt must be a time quantity")
+
+    if t_final <= 0:
+        raise ValueError("t_final needs to be strictly positive")
+    if alpha * (100.0 - alpha) < 0:
+        raise ValueError("alpha needs to be in (0,100)")
+    if np.min(window_sizes) <= 0:
+        raise ValueError("window size needs to be strictly positive")
+    if np.max(window_sizes) >= t_final / 2:
+        raise ValueError("window size too large")
+    if dt is not None:
+        for h in window_sizes:
+            if int(h.rescale('us')) % int(dt.rescale('us')) != 0:
+                raise ValueError(
+                    "Every window size h must be a multiple of dt")
+
+    # Generate a matrix M*: n X m where n = n_surrogates is the number of
+    # simulated limit processes and m is the number of chosen window sizes.
+    # Elements are: M*(i,h) = max(t in T)[`limit_process_h`(t)],
+    # for each h in H and surrogate i
+    maxima_matrix = []
+
+    for i in range(n_surrogates):
+        # mh_star = []
+        simu = _limit_processes(window_sizes, t_final, dt)
+        # for i, h in enumerate(window_sizes_mag):
+        #     # max over time of the limit process generated with window h
+        #     m_h = np.max(simu[i])
+        #     mh_star.append(m_h)
+        mh_star = [np.max(x) for x in simu]  # max over time of the limit process generated with window h
+        maxima_matrix.append(mh_star)
+
+    maxima_matrix = np.asanyarray(maxima_matrix)
+
+    # these parameters will be used to normalize both the limit_processes (H0)
+    # and the filter_processes
+    null_mean = maxima_matrix.mean(axis=0)
+    null_var = maxima_matrix.var(axis=0)
+
+    # matrix normalization by mean and variance of the limit process, in order
+    # to give, for every h, the same impact on the global maximum
+    matrix_normalized = (maxima_matrix - null_mean) / np.sqrt(null_var)
+
+    great_maxs = np.max(matrix_normalized, axis=1)
+    test_quantile = np.percentile(great_maxs, 100.0 - alpha)
+    null_parameters = [window_sizes, null_mean, null_var]
+    test_param = np.asanyarray(null_parameters)
+
+    return test_quantile, test_param
+
+
+def _filter(t, h, spk):
+    """
+    This function calculates the difference of spike counts in the left and right
+    side of a window of size h centered in t and normalized by its variance.
+    The variance of this count can be expressed as a combination of mean and var
+    of the I.S.I. lying inside the window.
+
+    Parameters
+    ----------
+        h : quantity
+            window's size
+        t : quantity
+            time on which the window is centered
+        spk : list, numpy array or SpikeTrain
+            spike train to analyze
+
+    Returns
+    -------
+        difference : float,
+          difference of spike count normalized by its variance
+    """
+
+    u = 1 * pq.s
+    try:
+        t_sec = t.rescale(u).magnitude
+    except AttributeError:
+        raise ValueError("t must be a quantities object")
+    # tm = t_sec.magnitude
+    try:
+        h_sec = h.rescale(u).magnitude
+    except AttributeError:
+        raise ValueError("h must be a time quantity")
+    # hm = h_sec.magnitude
+    try:
+        spk_sec = spk.rescale(u).magnitude
+    except AttributeError:
+        raise ValueError(
+            "spiketrain must be a list (array) of times or a neo spiketrain")
+
+    # cut spike-train on the right
+    train_right = spk_sec[(t_sec < spk_sec) & (spk_sec < t_sec + h_sec)]
+    # cut spike-train on the left
+    train_left = spk_sec[(t_sec - h_sec < spk_sec) & (spk_sec < t_sec)]
+    # spike count in the right side
+    count_right = train_right.size
+    # spike count in the left side
+    count_left = train_left.size
+    # form spikes to I.S.I
+    isi_right = np.diff(train_right)  
+    isi_left = np.diff(train_left)
+
+    if isi_right.size == 0:
+        mu_ri = 0
+        sigma_ri = 0
+    else:
+        # mean of I.S.I inside the window
+        mu_ri = np.mean(isi_right)
+        # var of I.S.I inside the window
+        sigma_ri = np.var(isi_right)
+
+    if isi_left.size == 0:
+        mu_le = 0
+        sigma_le = 0
+    else:
+        mu_le = np.mean(isi_left)
+        sigma_le = np.var(isi_left)
+
+    if (sigma_le > 0) & (sigma_ri > 0):
+        s_quad = (sigma_ri / mu_ri**3) * h_sec + (sigma_le / mu_le**3) * h_sec
+    else:
+        s_quad = 0
+
+    if s_quad == 0:
+        difference = 0
+    else:
+        difference = (count_right - count_left) / np.sqrt(s_quad)
+
+    return difference
+
+
+def _filter_process(dt, h, spk, t_final, test_param):
+    """
+    Given a spike train `spk` and a window size `h`, this function generates
+    the `filter derivative process` by evaluating the function `_filter`
+    in steps of `dt`.
+
+    Parameters
+    ----------
+        h : quantity object
+         window's size
+        t_final : quantity,
+           time on which the window is centered
+        spk : list, array or SpikeTrain
+           spike train to analyze
+        dt : quantity object, time step at which the windows are slided
+          resolution
+        test_param : matrix, the means of the first row list of `h`,
+                    the second row Empirical and the third row variances of
+                    the limit processes `Lh` are used to normalize the number
+                    of elements inside the windows
+
+    Returns
+    -------
+        time_domain : numpy array
+                   time domain of the `filter derivative process`
+        filter_process : array,
+                      values of the `filter derivative process`
+    """
+
+    u = 1 * pq.s
+
+    try:
+        h_sec = h.rescale(u).magnitude
+    except AttributeError:
+        raise ValueError("h must be a time quantity")
+    try:
+        t_final_sec = t_final.rescale(u).magnitude
+    except AttributeError:
+        raise ValueError("t_final must be a time quanity")
+    try:
+        dt_sec = dt.rescale(u).magnitude
+    except AttributeError:
+        raise ValueError("dt must be a time quantity")
+    # domain of the process
+    time_domain = np.arange(h_sec, t_final_sec - h_sec, dt_sec)
+    filter_trajectrory = []
+    # taken from the function used to generate the threshold
+    emp_mean_h = test_param[1][test_param[0] == h]
+    emp_var_h = test_param[2][test_param[0] == h]
+
+    for t in time_domain:
+        filter_trajectrory.append(_filter(t*u, h, spk))
+
+    filter_trajectrory = np.asanyarray(filter_trajectrory)
+    # ordered normalization to give each process the same impact on the max
+    filter_process = (
+        np.abs(filter_trajectrory) - emp_mean_h) / np.sqrt(emp_var_h)
+
+    return time_domain, filter_process

+ 171 - 0
code/elephant/elephant/phase_analysis.py

@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+"""
+Methods for performing phase analysis.
+
+:copyright: Copyright 2014-2018 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+
+import numpy as np
+import quantities as pq
+
+
+def spike_triggered_phase(hilbert_transform, spiketrains, interpolate):
+    """
+    Calculate the set of spike-triggered phases of an AnalogSignal.
+
+    Parameters
+    ----------
+    hilbert_transform : AnalogSignal or list of AnalogSignal
+        AnalogSignal of the complex analytic signal (e.g., returned by the
+        elephant.signal_processing.hilbert()). All spike trains are compared to
+        this signal, if only one signal is given. Otherwise, length of
+        hilbert_transform must match the length of spiketrains.
+    spiketrains : Spiketrain or list of Spiketrain
+        Spiketrains on which to trigger hilbert_transform extraction
+    interpolate : bool
+        If True, the phases and amplitudes of hilbert_transform for spikes
+        falling between two samples of signal is interpolated. Otherwise, the
+        closest sample of hilbert_transform is used.
+
+    Returns
+    -------
+    phases : list of arrays
+        Spike-triggered phases. Entries in the list correspond to the
+        SpikeTrains in spiketrains. Each entry contains an array with the
+        spike-triggered angles (in rad) of the signal.
+    amp : list of arrays
+        Corresponding spike-triggered amplitudes.
+    times : list of arrays
+        A list of times corresponding to the signal
+        Corresponding times (corresponds to the spike times).
+
+    Example
+    -------
+    Create a 20 Hz oscillatory signal sampled at 1 kHz and a random Poisson
+    spike train:
+
+    >>> f_osc = 20. * pq.Hz
+    >>> f_sampling = 1 * pq.ms
+    >>> tlen = 100 * pq.s
+    >>> time_axis = np.arange(
+            0, tlen.magnitude,
+            f_sampling.rescale(pq.s).magnitude) * pq.s
+    >>> analogsignal = AnalogSignal(
+            np.sin(2 * np.pi * (f_osc * time_axis).simplified.magnitude),
+            units=pq.mV, t_start=0 * pq.ms, sampling_period=f_sampling)
+    >>> spiketrain = elephant.spike_train_generation.
+            homogeneous_poisson_process(
+                50 * pq.Hz, t_start=0.0 * ms, t_stop=tlen.rescale(pq.ms))
+
+    Calculate spike-triggered phases and amplitudes of the oscillation:
+    >>> phases, amps, times = elephant.phase_analysis.spike_triggered_phase(
+            elephant.signal_processing.hilbert(analogsignal),
+            spiketrain,
+            interpolate=True)
+    """
+
+    # Convert inputs to lists
+    if not isinstance(spiketrains, list):
+        spiketrains = [spiketrains]
+
+    if not isinstance(hilbert_transform, list):
+        hilbert_transform = [hilbert_transform]
+
+    # Number of signals
+    num_spiketrains = len(spiketrains)
+    num_phase = len(hilbert_transform)
+
+    if num_spiketrains != 1 and num_phase != 1 and \
+            num_spiketrains != num_phase:
+        raise ValueError(
+            "Number of spike trains and number of phase signals"
+            "must match, or either of the two must be a single signal.")
+
+    # For each trial, select the first input
+    start = [elem.t_start for elem in hilbert_transform]
+    stop = [elem.t_stop for elem in hilbert_transform]
+
+    result_phases = []
+    result_amps = []
+    result_times = []
+
+    # Step through each signal
+    for spiketrain_i, spiketrain in enumerate(spiketrains):
+        # Check which hilbert_transform AnalogSignal to look at - if there is
+        # only one then all spike trains relate to this one, otherwise the two
+        # lists of spike trains and phases are matched up
+        if num_phase > 1:
+            phase_i = spiketrain_i
+        else:
+            phase_i = 0
+
+        # Take only spikes which lie directly within the signal segment -
+        # ignore spikes sitting on the last sample
+        sttimeind = np.where(np.logical_and(
+            spiketrain >= start[phase_i], spiketrain < stop[phase_i]))[0]
+
+        # Find index into signal for each spike
+        ind_at_spike = np.round(
+            (spiketrain[sttimeind] - hilbert_transform[phase_i].t_start) /
+            hilbert_transform[phase_i].sampling_period).magnitude.astype(int)
+
+        # Extract times for speed reasons
+        times = hilbert_transform[phase_i].times
+
+        # Append new list to the results for this spiketrain
+        result_phases.append([])
+        result_amps.append([])
+        result_times.append([])
+
+        # Step through all spikes
+        for spike_i, ind_at_spike_j in enumerate(ind_at_spike):
+            # Difference vector between actual spike time and sample point,
+            # positive if spike time is later than sample point
+            dv = spiketrain[sttimeind[spike_i]] - times[ind_at_spike_j]
+
+            # Make sure ind_at_spike is to the left of the spike time
+            if dv < 0 and ind_at_spike_j > 0:
+                ind_at_spike_j = ind_at_spike_j - 1
+
+            if interpolate:
+                # Get relative spike occurrence between the two closest signal
+                # sample points
+                # if z->0 spike is more to the left sample
+                # if z->1 more to the right sample
+                z = (spiketrain[sttimeind[spike_i]] - times[ind_at_spike_j]) /\
+                    hilbert_transform[phase_i].sampling_period
+
+                # Save hilbert_transform (interpolate on circle)
+                p1 = np.angle(hilbert_transform[phase_i][ind_at_spike_j])
+                p2 = np.angle(hilbert_transform[phase_i][ind_at_spike_j + 1])
+                result_phases[spiketrain_i].append(
+                    np.angle(
+                        (1 - z) * np.exp(np.complex(0, p1)) +
+                        z * np.exp(np.complex(0, p2))))
+
+                # Save amplitude
+                result_amps[spiketrain_i].append(
+                    (1 - z) * np.abs(
+                        hilbert_transform[phase_i][ind_at_spike_j]) +
+                    z * np.abs(hilbert_transform[phase_i][ind_at_spike_j + 1]))
+            else:
+                p1 = np.angle(hilbert_transform[phase_i][ind_at_spike_j])
+                result_phases[spiketrain_i].append(p1)
+
+                # Save amplitude
+                result_amps[spiketrain_i].append(
+                    np.abs(hilbert_transform[phase_i][ind_at_spike_j]))
+
+            # Save time
+            result_times[spiketrain_i].append(spiketrain[sttimeind[spike_i]])
+
+    # Convert outputs to arrays
+    for i, entry in enumerate(result_phases):
+        result_phases[i] = np.array(entry).flatten()
+    for i, entry in enumerate(result_amps):
+        result_amps[i] = pq.Quantity(entry, units=entry[0].units).flatten()
+    for i, entry in enumerate(result_times):
+        result_times[i] = pq.Quantity(entry, units=entry[0].units).flatten()
+
+    return result_phases, result_amps, result_times

File diff suppressed because it is too large
+ 1633 - 0
code/elephant/elephant/spade.py


+ 11 - 0
code/elephant/elephant/spade_src/LICENSE

@@ -0,0 +1,11 @@
+For any version published on or after October 23, 2014:
+
+(MIT license, or more precisely Expat License; to be found in the file mit-license.txt in the directory <prgname>/doc in the source package of the program, see also opensource.org and wikipedia.org)
+
+© 1996-2014 Christian Borgelt
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 2 - 0
code/elephant/elephant/spade_src/__init__.py

@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+from . import fast_fca

File diff suppressed because it is too large
+ 1126 - 0
code/elephant/elephant/spade_src/fast_fca.py


File diff suppressed because it is too large
+ 3000 - 0
code/elephant/elephant/test/spike_extraction_test_data.txt


+ 209 - 0
code/elephant/elephant/test/test_cell_assembly_detection.py

@@ -0,0 +1,209 @@
+
+"""
+Unit test for cell_assembly_detection
+"""
+
+import unittest
+import numpy as np
+from numpy.testing.utils import assert_array_equal
+import neo
+import quantities as pq
+import elephant.conversion as conv
+import elephant.cell_assembly_detection as cad
+
+
+class CadTestCase(unittest.TestCase):
+
+    def setUp(self):
+
+        # Parameters
+        self.binsize = 1*pq.ms
+        self.alpha = 0.05
+        self.size_chunks = 100
+        self.maxlag = 10
+        self.reference_lag = 2
+        self.min_occ = 1
+        self.max_spikes = np.inf
+        self.significance_pruning = True
+        self.subgroup_pruning = True
+        self.flag_mypruning = False
+
+        # Input parameters
+
+        # Number of pattern occurrences
+        self.n_occ1 = 150
+        self.n_occ2 = 170
+        self.n_occ3 = 210
+
+        # Pattern lags
+        self.lags1 = [0, 0.001]
+        self.lags2 = [0, 0.002]
+        self.lags3 = [0, 0.003]
+
+        # Output pattern lags
+        self.output_lags1 = [0, 1]
+        self.output_lags2 = [0, 2]
+        self.output_lags3 = [0, 3]
+
+        # Length of the spiketrain
+        self.t_start = 0
+        self.t_stop = 1
+
+        # Patterns times
+        np.random.seed(1)
+        self.patt1_times = neo.SpikeTrain(
+            np.random.uniform(0, 1 - max(self.lags1), self.n_occ1) * pq.s,
+            t_start=0*pq.s, t_stop=1*pq.s)
+        self.patt2_times = neo.SpikeTrain(
+            np.random.uniform(0, 1 - max(self.lags2), self.n_occ2) * pq.s,
+            t_start=0*pq.s, t_stop=1*pq.s)
+        self.patt3_times = neo.SpikeTrain(
+            np.random.uniform(0, 1 - max(self.lags3), self.n_occ3) * pq.s,
+            t_start=0*pq.s, t_stop=1*pq.s)
+
+        # Patterns
+        self.patt1 = [self.patt1_times] + [neo.SpikeTrain(
+            self.patt1_times+l * pq.s, t_start=self.t_start * pq.s,
+            t_stop=self.t_stop * pq.s) for l in self.lags1]
+        self.patt2 = [self.patt2_times] + [neo.SpikeTrain(
+            self.patt2_times+l * pq.s,  t_start=self.t_start * pq.s,
+            t_stop=self.t_stop * pq.s) for l in self.lags2]
+        self.patt3 = [self.patt3_times] + [neo.SpikeTrain(
+            self.patt3_times+l * pq.s,  t_start=self.t_start * pq.s,
+            t_stop=self.t_stop * pq.s) for l in self.lags3]
+
+        # Binning spiketrains
+        self.bin_patt1 = conv.BinnedSpikeTrain(self.patt1,
+                                               binsize=self.binsize)
+
+        # Data
+        self.msip = self.patt1 + self.patt2 + self.patt3
+        self.msip = conv.BinnedSpikeTrain(self.msip, binsize=self.binsize)
+
+        # Expected results
+        self.n_spk1 = len(self.lags1) + 1
+        self.n_spk2 = len(self.lags2) + 1
+        self.n_spk3 = len(self.lags3) + 1
+        self.elements1 = range(self.n_spk1)
+        self.elements2 = range(self.n_spk2)
+        self.elements3 = range(self.n_spk3)
+        self.elements_msip = [
+            self.elements1, range(self.n_spk1, self.n_spk1 + self.n_spk2),
+            range(self.n_spk1 + self.n_spk2,
+                  self.n_spk1 + self.n_spk2 + self.n_spk3)]
+        self.occ1 = np.unique(conv.BinnedSpikeTrain(
+            self.patt1_times, self.binsize).spike_indices[0])
+        self.occ2 = np.unique(conv.BinnedSpikeTrain(
+            self.patt2_times, self.binsize).spike_indices[0])
+        self.occ3 = np.unique(conv.BinnedSpikeTrain(
+            self.patt3_times, self.binsize).spike_indices[0])
+        self.occ_msip = [list(self.occ1), list(self.occ2), list(self.occ3)]
+        self.lags_msip = [self.output_lags1,
+                          self.output_lags2,
+                          self.output_lags3]
+
+    # test for single pattern injection input
+    def test_cad_single_sip(self):
+        # collecting cad output
+        output_single = cad.\
+            cell_assembly_detection(data=self.bin_patt1, maxlag=self.maxlag)
+        # check neurons in the pattern
+        assert_array_equal(sorted(output_single[0]['neurons']),
+                           self.elements1)
+        # check the occurrences time of the patter
+        assert_array_equal(output_single[0]['times'],
+                           self.occ1)
+        # check the lags
+        assert_array_equal(sorted(output_single[0]['lags']),
+                           self.output_lags1)
+
+    # test with multiple (3) patterns injected in the data
+    def test_cad_msip(self):
+        # collecting cad output
+        output_msip = cad.\
+            cell_assembly_detection(data=self.msip, maxlag=self.maxlag)
+
+        elements_msip = []
+        occ_msip = []
+        lags_msip = []
+        for out in output_msip:
+            elements_msip.append(out['neurons'])
+            occ_msip.append(out['times'])
+            lags_msip.append(list(out['lags']))
+        elements_msip = sorted(elements_msip, key=lambda d: len(d))
+        occ_msip = sorted(occ_msip, key=lambda d: len(d))
+        lags_msip = sorted(lags_msip, key=lambda d: len(d))
+        elements_msip = [sorted(e) for e in elements_msip]
+        # check neurons in the patterns
+        assert_array_equal(elements_msip, self.elements_msip)
+        # check the occurrences time of the patters
+        assert_array_equal(occ_msip[0], self.occ_msip[0])
+        assert_array_equal(occ_msip[1], self.occ_msip[1])
+        assert_array_equal(occ_msip[2], self.occ_msip[2])
+        lags_msip = [sorted(e) for e in lags_msip]
+        # check the lags
+        assert_array_equal(lags_msip, self.lags_msip)
+
+    # test the errors raised
+    def test_cad_raise_error(self):
+        # test error data input format
+        self.assertRaises(TypeError, cad.cell_assembly_detection,
+                          data=[[1, 2, 3], [3, 4, 5]],
+                          maxlag=self.maxlag)
+        # test error significance level
+        self.assertRaises(ValueError, cad.cell_assembly_detection,
+                          data=conv.BinnedSpikeTrain(
+                              [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s),
+                               neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)],
+                              binsize=self.binsize),
+                          maxlag=self.maxlag,
+                          alpha=-3)
+        # test error minimum number of occurrences
+        self.assertRaises(ValueError, cad.cell_assembly_detection,
+                          data=conv.BinnedSpikeTrain(
+                              [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s),
+                               neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)],
+                              binsize=self.binsize),
+                          maxlag=self.maxlag,
+                          min_occ=-1)
+        # test error minimum number of spikes in a pattern
+        self.assertRaises(ValueError, cad.cell_assembly_detection,
+                          data=conv.BinnedSpikeTrain(
+                              [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s),
+                               neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)],
+                              binsize=self.binsize),
+                          maxlag=self.maxlag,
+                          max_spikes=1)
+        # test error chunk size for variance computation
+        self.assertRaises(ValueError, cad.cell_assembly_detection,
+                          data=conv.BinnedSpikeTrain(
+                              [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s),
+                               neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)],
+                              binsize=self.binsize),
+                          maxlag=self.maxlag,
+                          size_chunks=1)
+        # test error maximum lag
+        self.assertRaises(ValueError, cad.cell_assembly_detection,
+                          data=conv.BinnedSpikeTrain(
+                              [neo.SpikeTrain([1, 2, 3]*pq.s, t_stop=5*pq.s),
+                               neo.SpikeTrain([3, 4, 5]*pq.s, t_stop=5*pq.s)],
+                              binsize=self.binsize),
+                          maxlag=1)
+        # test error minimum length spike train
+        self.assertRaises(ValueError, cad.cell_assembly_detection,
+                          data=conv.BinnedSpikeTrain(
+                              [neo.SpikeTrain([1, 2, 3]*pq.ms, t_stop=6*pq.ms),
+                               neo.SpikeTrain([3, 4, 5]*pq.ms,
+                                              t_stop=6*pq.ms)],
+                              binsize=1*pq.ms),
+                          maxlag=self.maxlag)
+
+
+def suite():
+    suite = unittest.makeSuite(CadTestCase, 'test')
+    return suite
+
+
+if __name__ == "__main__":
+    runner = unittest.TextTestRunner(verbosity=2)
+    runner.run(suite())

+ 190 - 0
code/elephant/elephant/test/test_change_point_detection.py

@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -*-
+import neo
+import numpy as np
+import quantities as pq
+
+import unittest
+import elephant.change_point_detection as mft
+from numpy.testing.utils import assert_array_almost_equal, assert_allclose
+                                     
+                                     
+#np.random.seed(13)
+
+class FilterTestCase(unittest.TestCase):
+    def setUp(self):
+        self.test_array = [0.4, 0.5, 0.65, 0.7, 0.9, 1.15, 1.2, 1.9]
+        '''
+        spks_ri = [0.9, 1.15, 1.2]
+        spk_le = [0.4, 0.5, 0.65, 0.7]
+        '''
+        mu_ri = (0.25 + 0.05) / 2
+        mu_le = (0.1 + 0.15 + 0.05) / 3
+        sigma_ri = ((0.25 - 0.15) ** 2 + (0.05 - 0.15) ** 2) / 2
+        sigma_le = ((0.1 - 0.1) ** 2 + (0.15 - 0.1) ** 2 + (
+                0.05 - 0.1) ** 2) / 3
+        self.targ_t08_h025 = 0
+        self.targ_t08_h05 = (3 - 4) / np.sqrt(
+            (sigma_ri / mu_ri ** (3)) * 0.5 + (sigma_le / mu_le ** (3)) * 0.5)
+
+    # Window Large #
+    def test_filter_with_spiketrain_h05(self):
+        st = neo.SpikeTrain(self.test_array, units='s', t_stop=2.0)
+        target = self.targ_t08_h05
+        res = mft._filter(0.8 * pq.s, 0.5 * pq.s, st)
+        assert_array_almost_equal(res, target, decimal=9)
+        self.assertRaises(ValueError, mft._filter, 0.8, 0.5 * pq.s, st)
+        self.assertRaises(ValueError, mft._filter, 0.8 * pq.s, 0.5, st)
+        self.assertRaises(ValueError, mft._filter, 0.8 * pq.s, 0.5 * pq.s,
+                          self.test_array)
+        
+    # Window Small #
+    def test_filter_with_spiketrain_h025(self):
+        st = neo.SpikeTrain(self.test_array, units='s', t_stop=2.0)
+        target = self.targ_t08_h025
+        res = mft._filter(0.8 * pq.s, 0.25 * pq.s, st)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_filter_with_quantities_h025(self):
+        st = pq.Quantity(self.test_array, units='s')
+        target = self.targ_t08_h025
+        res = mft._filter(0.8 * pq.s, 0.25 * pq.s, st)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_filter_with_plain_array_h025(self):
+        st = self.test_array
+        target = self.targ_t08_h025
+        res = mft._filter(0.8 * pq.s, 0.25 * pq.s, st * pq.s)
+        assert_array_almost_equal(res, target, decimal=9)
+        
+    def test_isi_with_quantities_h05(self):
+        st = pq.Quantity(self.test_array, units='s')
+        target = self.targ_t08_h05
+        res = mft._filter(0.8 * pq.s, 0.5 * pq.s, st)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_isi_with_plain_array_h05(self):
+        st = self.test_array
+        target = self.targ_t08_h05
+        res = mft._filter(0.8 * pq.s, 0.5 * pq.s, st * pq.s)
+        assert_array_almost_equal(res, target, decimal=9)
+
+
+class FilterProcessTestCase(unittest.TestCase):
+    def setUp(self):
+        self.test_array = [1.1, 1.2, 1.4, 1.6, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95]
+        x = (7 - 3) / np.sqrt(
+            (0.0025 / 0.15 ** 3) * 0.5 + (0.0003472 / 0.05833 ** 3) * 0.5)
+        self.targ_h05 = [[0.5, 1, 1.5],
+                         [(0 - 1.7) / np.sqrt(0.4), (0 - 1.7) / np.sqrt(0.4),
+                          (x - 1.7) / np.sqrt(0.4)]]
+
+    def test_filter_process_with_spiketrain_h05(self):
+        st = neo.SpikeTrain(self.test_array, units='s', t_stop=2.1)
+        target = self.targ_h05
+        res = mft._filter_process(0.5 * pq.s, 0.5 * pq.s, st, 2.01 * pq.s,
+                                  np.array([[0.5], [1.7], [0.4]]))
+        assert_array_almost_equal(res[1], target[1], decimal=3)
+        
+        self.assertRaises(ValueError, mft._filter_process, 0.5 , 0.5 * pq.s,
+                              st, 2.01 * pq.s, np.array([[0.5], [1.7], [0.4]]))
+        self.assertRaises(ValueError, mft._filter_process, 0.5 * pq.s, 0.5,
+                              st, 2.01 * pq.s, np.array([[0.5], [1.7], [0.4]]))
+        self.assertRaises(ValueError, mft._filter_process, 0.5 * pq.s,
+                          0.5 * pq.s, self.test_array, 2.01 * pq.s,
+                          np.array([[0.5], [1.7], [0.4]]))
+      
+    def test_filter_proces_with_quantities_h05(self):
+        st = pq.Quantity(self.test_array, units='s')
+        target = self.targ_h05
+        res = mft._filter_process(0.5 * pq.s, 0.5 * pq.s, st, 2.01 * pq.s,
+                                  np.array([[0.5], [1.7], [0.4]]))
+        assert_array_almost_equal(res[0], target[0], decimal=3)
+
+    def test_filter_proces_with_plain_array_h05(self):
+        st = self.test_array
+        target = self.targ_h05
+        res = mft._filter_process(0.5 * pq.s, 0.5 * pq.s, st * pq.s,
+                                  2.01 * pq.s, np.array([[0.5], [1.7], [0.4]]))
+        self.assertNotIsInstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=3)
+
+
+class MultipleFilterAlgorithmTestCase(unittest.TestCase):
+    def setUp(self):
+        self.test_array = [1.1, 1.2, 1.4, 1.6, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95]
+        self.targ_h05_dt05 = [1.5 * pq.s]
+        
+        # to speed up the test, the following `test_param` and `test_quantile` 
+        # paramters have been calculated offline using the function:
+        # empirical_parameters([10, 25, 50, 75, 100, 125, 150]*pq.s,700*pq.s,5, 
+        #                                                                10000)
+        # the user should do the same, if the metohd has to be applied to several
+        # spike trains of the same length `T` and with the same set of window.
+        self.test_param = np.array([[10., 25.,  50.,  75.,   100., 125., 150.],
+                            [3.167, 2.955,  2.721, 2.548, 2.412, 2.293, 2.180],
+                            [0.150, 0.185, 0.224, 0.249, 0.269, 0.288, 0.301]])
+        self.test_quantile = 2.75
+
+    def test_MultipleFilterAlgorithm_with_spiketrain_h05(self):
+        st = neo.SpikeTrain(self.test_array, units='s', t_stop=2.1)
+        target = [self.targ_h05_dt05]
+        res = mft.multiple_filter_test([0.5] * pq.s, st, 2.1 * pq.s, 5, 100,
+                                       dt=0.1 * pq.s)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_MultipleFilterAlgorithm_with_quantities_h05(self):
+        st = pq.Quantity(self.test_array, units='s')
+        target = [self.targ_h05_dt05]
+        res = mft.multiple_filter_test([0.5] * pq.s, st, 2.1 * pq.s, 5, 100,
+                                       dt=0.5 * pq.s)
+        assert_array_almost_equal(res, target, decimal=9)
+
+    def test_MultipleFilterAlgorithm_with_plain_array_h05(self):
+        st = self.test_array
+        target = [self.targ_h05_dt05]
+        res = mft.multiple_filter_test([0.5] * pq.s, st * pq.s, 2.1 * pq.s, 5,
+                                       100, dt=0.5 * pq.s)
+        self.assertNotIsInstance(res, pq.Quantity)
+        assert_array_almost_equal(res, target, decimal=9)
+	 
+    def test_MultipleFilterAlgorithm_with_longdata(self):
+        
+        def gamma_train(k, teta, tmax):
+            x = np.random.gamma(k, teta, int(tmax * (k * teta) ** (-1) * 3))
+            s = np.cumsum(x)
+            idx = np.where(s < tmax)
+            s = s[idx]  # gamma process
+            return s
+	
+        def alternative_hypothesis(k1, teta1, c1, k2, teta2, c2, k3, teta3, c3,
+                                   k4, teta4, T):
+            s1 = gamma_train(k1, teta1, c1)
+            s2 = gamma_train(k2, teta2, c2) + c1
+            s3 = gamma_train(k3, teta3, c3) + c1 + c2
+            s4 = gamma_train(k4, teta4, T) + c1 + c2 + c3
+            return np.concatenate((s1, s2, s3, s4)), [s1[-1], s2[-1], s3[-1]]
+
+        st = self.h1 = alternative_hypothesis(1, 1 / 4., 150, 2, 1 / 26., 30,
+                                              1, 1 / 36., 320,
+                                              2, 1 / 33., 200)[0]
+
+        window_size = [10, 25, 50, 75, 100, 125, 150] * pq.s
+        self.target_points = [150, 180, 500] 
+        target = self.target_points
+                        
+        result = mft.multiple_filter_test(window_size, st * pq.s, 700 * pq.s, 5,
+        10000, test_quantile=self.test_quantile, test_param=self.test_param, 
+                                                                   dt=1 * pq.s)
+        self.assertNotIsInstance(result, pq.Quantity)
+
+        result_concatenated = []
+        for i in result:
+            result_concatenated = np.hstack([result_concatenated, i])
+        result_concatenated = np.sort(result_concatenated)   
+        assert_allclose(result_concatenated[:3], target[:3], rtol=0,
+                        atol=5)
+        print('detected {0} cps: {1}'.format(len(result_concatenated),
+                                                           result_concatenated))
+                                                
+if __name__ == '__main__':
+    unittest.main()

+ 190 - 0
code/elephant/elephant/test/test_phase_analysis.py

@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -*-
+"""
+Unit tests for the phase analysis module.
+
+:copyright: Copyright 2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+from __future__ import division, print_function
+
+import unittest
+
+from neo import SpikeTrain, AnalogSignal
+import numpy as np
+import quantities as pq
+
+import elephant.phase_analysis
+
+from numpy.ma.testutils import assert_allclose
+
+
+class SpikeTriggeredPhaseTestCase(unittest.TestCase):
+
+    def setUp(self):
+        tlen0 = 100 * pq.s
+        f0 = 20. * pq.Hz
+        fs0 = 1 * pq.ms
+        t0 = np.arange(
+            0, tlen0.rescale(pq.s).magnitude,
+            fs0.rescale(pq.s).magnitude) * pq.s
+        self.anasig0 = AnalogSignal(
+            np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
+            units=pq.mV, t_start=0 * pq.ms, sampling_period=fs0)
+        self.st0 = SpikeTrain(
+            np.arange(50, tlen0.rescale(pq.ms).magnitude - 50, 50) * pq.ms,
+            t_start=0 * pq.ms, t_stop=tlen0)
+        self.st1 = SpikeTrain(
+            [100., 100.1, 100.2, 100.3, 100.9, 101.] * pq.ms,
+            t_start=0 * pq.ms, t_stop=tlen0)
+
+    def test_perfect_locking_one_spiketrain_one_signal(self):
+        phases, amps, times = elephant.phase_analysis.spike_triggered_phase(
+            elephant.signal_processing.hilbert(self.anasig0),
+            self.st0,
+            interpolate=True)
+
+        assert_allclose(phases[0], - np.pi / 2.)
+        assert_allclose(amps[0], 1, atol=0.1)
+        assert_allclose(times[0].magnitude, self.st0.magnitude)
+        self.assertEqual(len(phases[0]), len(self.st0))
+        self.assertEqual(len(amps[0]), len(self.st0))
+        self.assertEqual(len(times[0]), len(self.st0))
+
+    def test_perfect_locking_many_spiketrains_many_signals(self):
+        phases, amps, times = elephant.phase_analysis.spike_triggered_phase(
+            [
+                elephant.signal_processing.hilbert(self.anasig0),
+                elephant.signal_processing.hilbert(self.anasig0)],
+            [self.st0, self.st0],
+            interpolate=True)
+
+        assert_allclose(phases[0], -np.pi / 2.)
+        assert_allclose(amps[0], 1, atol=0.1)
+        assert_allclose(times[0].magnitude, self.st0.magnitude)
+        self.assertEqual(len(phases[0]), len(self.st0))
+        self.assertEqual(len(amps[0]), len(self.st0))
+        self.assertEqual(len(times[0]), len(self.st0))
+
+    def test_perfect_locking_one_spiketrains_many_signals(self):
+        phases, amps, times = elephant.phase_analysis.spike_triggered_phase(
+            [
+                elephant.signal_processing.hilbert(self.anasig0),
+                elephant.signal_processing.hilbert(self.anasig0)],
+            [self.st0],
+            interpolate=True)
+
+        assert_allclose(phases[0], -np.pi / 2.)
+        assert_allclose(amps[0], 1, atol=0.1)
+        assert_allclose(times[0].magnitude, self.st0.magnitude)
+        self.assertEqual(len(phases[0]), len(self.st0))
+        self.assertEqual(len(amps[0]), len(self.st0))
+        self.assertEqual(len(times[0]), len(self.st0))
+
+    def test_perfect_locking_many_spiketrains_one_signal(self):
+        phases, amps, times = elephant.phase_analysis.spike_triggered_phase(
+            elephant.signal_processing.hilbert(self.anasig0),
+            [self.st0, self.st0],
+            interpolate=True)
+
+        assert_allclose(phases[0], -np.pi / 2.)
+        assert_allclose(amps[0], 1, atol=0.1)
+        assert_allclose(times[0].magnitude, self.st0.magnitude)
+        self.assertEqual(len(phases[0]), len(self.st0))
+        self.assertEqual(len(amps[0]), len(self.st0))
+        self.assertEqual(len(times[0]), len(self.st0))
+
+    def test_interpolate(self):
+        phases_int, _, _ = elephant.phase_analysis.spike_triggered_phase(
+            elephant.signal_processing.hilbert(self.anasig0),
+            self.st1,
+            interpolate=True)
+
+        self.assertLess(phases_int[0][0], phases_int[0][1])
+        self.assertLess(phases_int[0][1], phases_int[0][2])
+        self.assertLess(phases_int[0][2], phases_int[0][3])
+        self.assertLess(phases_int[0][3], phases_int[0][4])
+        self.assertLess(phases_int[0][4], phases_int[0][5])
+
+        phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(
+            elephant.signal_processing.hilbert(self.anasig0),
+            self.st1,
+            interpolate=False)
+
+        self.assertEqual(phases_noint[0][0], phases_noint[0][1])
+        self.assertEqual(phases_noint[0][1], phases_noint[0][2])
+        self.assertEqual(phases_noint[0][2], phases_noint[0][3])
+        self.assertEqual(phases_noint[0][3], phases_noint[0][4])
+        self.assertNotEqual(phases_noint[0][4], phases_noint[0][5])
+
+        # Verify that when using interpolation and the spike sits on the sample
+        # of the Hilbert transform, this is the same result as when not using
+        # interpolation with a spike slightly to the right
+        self.assertEqual(phases_noint[0][2], phases_int[0][0])
+        self.assertEqual(phases_noint[0][4], phases_int[0][0])
+
+    def test_inconsistent_numbers_spiketrains_hilbert(self):
+        self.assertRaises(
+            ValueError, elephant.phase_analysis.spike_triggered_phase,
+            [
+                elephant.signal_processing.hilbert(self.anasig0),
+                elephant.signal_processing.hilbert(self.anasig0)],
+            [self.st0, self.st0, self.st0], False)
+
+        self.assertRaises(
+            ValueError, elephant.phase_analysis.spike_triggered_phase,
+            [
+                elephant.signal_processing.hilbert(self.anasig0),
+                elephant.signal_processing.hilbert(self.anasig0)],
+            [self.st0, self.st0, self.st0], False)
+
+    def test_spike_earlier_than_hilbert(self):
+        # This is a spike clearly outside the bounds
+        st = SpikeTrain(
+            [-50, 50],
+            units='s', t_start=-100*pq.s, t_stop=100*pq.s)
+        phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(
+            elephant.signal_processing.hilbert(self.anasig0),
+            st,
+            interpolate=False)
+        self.assertEqual(len(phases_noint[0]), 1)
+
+        # This is a spike right on the border (start of the signal is at 0s,
+        # spike sits at t=0s). By definition of intervals in
+        # Elephant (left borders inclusive, right borders exclusive), this
+        # spike is to be considered.
+        st = SpikeTrain(
+            [0, 50],
+            units='s', t_start=-100*pq.s, t_stop=100*pq.s)
+        phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(
+            elephant.signal_processing.hilbert(self.anasig0),
+            st,
+            interpolate=False)
+        self.assertEqual(len(phases_noint[0]), 2)
+
+    def test_spike_later_than_hilbert(self):
+        # This is a spike clearly outside the bounds
+        st = SpikeTrain(
+            [1, 250],
+            units='s', t_start=-1*pq.s, t_stop=300*pq.s)
+        phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(
+            elephant.signal_processing.hilbert(self.anasig0),
+            st,
+            interpolate=False)
+        self.assertEqual(len(phases_noint[0]), 1)
+
+        # This is a spike right on the border (length of the signal is 100s,
+        # spike sits at t=100s). However, by definition of intervals in
+        # Elephant (left borders inclusive, right borders exclusive), this
+        # spike is not to be considered.
+        st = SpikeTrain(
+            [1, 100],
+            units='s', t_start=-1*pq.s, t_stop=200*pq.s)
+        phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(
+            elephant.signal_processing.hilbert(self.anasig0),
+            st,
+            interpolate=False)
+        self.assertEqual(len(phases_noint[0]), 1)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 246 - 0
code/elephant/elephant/test/test_spade.py

@@ -0,0 +1,246 @@
+"""
+Unit tests for the spade module.
+
+:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
+:license: Modified BSD, see LICENSE.txt for details.
+"""
+from __future__ import division
+import unittest
+
+import neo
+import numpy as np
+from numpy.testing.utils import assert_array_equal
+import quantities as pq
+import elephant.spade as spade
+import elephant.conversion as conv
+import elephant.spike_train_generation as stg
+
+try:
+    from elephant.spade_src import fim
+    HAVE_FIM = True
+except ImportError:
+    HAVE_FIM = False
+
+
+class SpadeTestCase(unittest.TestCase):
+    def setUp(self):
+        # Spade parameters
+        self.binsize = 1 * pq.ms
+        self.winlen = 10
+        self.n_subset = 10
+        self.n_surr = 10
+        self.alpha = 0.05
+        self.stability_thresh = [0.1, 0.1]
+        self.psr_param = [0, 0, 0]
+        self.min_occ = 4
+        self.min_spikes = 4
+        self.min_neu = 4
+        # Test data parameters
+        # CPP parameters
+        self.n_neu = 100
+        self.amplitude = [0] * self.n_neu + [1]
+        self.cpp = stg.cpp(rate=3*pq.Hz, A=self.amplitude, t_stop=5*pq.s)
+        # Number of patterns' occurrences
+        self.n_occ1 = 10
+        self.n_occ2 = 12
+        self.n_occ3 = 15
+        # Patterns lags
+        self.lags1 = [2]
+        self.lags2 = [1, 2]
+        self.lags3 = [1, 2, 3, 4, 5]
+        # Length of the spiketrain
+        self.t_stop = 3000
+        # Patterns times
+        self.patt1_times = neo.SpikeTrain(
+            np.arange(
+                0, 1000, 1000//self.n_occ1) *
+            pq.ms, t_stop=self.t_stop*pq.ms)
+        self.patt2_times = neo.SpikeTrain(
+            np.arange(
+                1000, 2000, 1000 // self.n_occ2) *
+            pq.ms, t_stop=self.t_stop * pq.ms)
+        self.patt3_times = neo.SpikeTrain(
+            np.arange(
+                2000, 3000, 1000 // self.n_occ3) *
+            pq.ms, t_stop=self.t_stop * pq.ms)
+        # Patterns
+        self.patt1 = [self.patt1_times] + [neo.SpikeTrain(
+            self.patt1_times.view(pq.Quantity)+l * pq.ms,
+            t_stop=self.t_stop*pq.ms) for l in self.lags1]
+        self.patt2 = [self.patt2_times] + [neo.SpikeTrain(
+            self.patt2_times.view(pq.Quantity)+l * pq.ms,
+            t_stop=self.t_stop*pq.ms) for l in self.lags2]
+        self.patt3 = [self.patt3_times] + [neo.SpikeTrain(
+            self.patt3_times.view(pq.Quantity)+l * pq.ms,
+            t_stop=self.t_stop*pq.ms) for l in self.lags3]
+        # Data
+        self.msip = self.patt1 + self.patt2 + self.patt3
+        # Expected results
+        self.n_spk1 = len(self.lags1) + 1
+        self.n_spk2 = len(self.lags2) + 1
+        self.n_spk3 = len(self.lags3) + 1
+        self.elements1 = list(range(self.n_spk1))
+        self.elements2 = list(range(self.n_spk2))
+        self.elements3 = list(range(self.n_spk3))
+        self.elements_msip = [
+            self.elements1, list(range(self.n_spk1, self.n_spk1 + self.n_spk2)),
+                list(range(self.n_spk1 + self.n_spk2, self.n_spk1 +
+                      self.n_spk2 + self.n_spk3))]
+        self.occ1 = np.unique(conv.BinnedSpikeTrain(
+            self.patt1_times, self.binsize).spike_indices[0])
+        self.occ2 = np.unique(conv.BinnedSpikeTrain(
+            self.patt2_times, self.binsize).spike_indices[0])
+        self.occ3 = np.unique(conv.BinnedSpikeTrain(
+            self.patt3_times, self.binsize).spike_indices[0])
+        self.occ_msip = [
+            list(self.occ1), list(self.occ2), list(self.occ3)]
+        self.lags_msip = [self.lags1, self.lags2, self.lags3]
+
+    # Testing cpp
+    def test_spade_cpp(self):
+        output_cpp = spade.spade(self.cpp, self.binsize,
+                                  1,
+                                  n_subsets=self.n_subset,
+                                  stability_thresh=self.stability_thresh,
+                                  n_surr=self.n_surr, alpha=self.alpha,
+                                  psr_param=self.psr_param,
+                                  output_format='patterns')['patterns']
+        elements_cpp = []
+        lags_cpp = []
+        # collecting spade output
+        for out in output_cpp:
+            elements_cpp.append(sorted(out['neurons']))
+            lags_cpp.append(list(out['lags'].magnitude))
+        # check neurons in the patterns
+        assert_array_equal(elements_cpp, [range(self.n_neu)])
+        # check the lags
+        assert_array_equal(lags_cpp, [np.array([0]*(self.n_neu - 1))])
+
+    # Testing spectrum cpp
+    def test_spade_cpp(self):
+        # Computing Spectrum
+        spectrum_cpp = spade.concepts_mining(self.cpp, self.binsize,
+                                  1,report='#')[0]
+        # Check spectrum
+        assert_array_equal(spectrum_cpp, [(len(self.cpp), len(self.cpp[0]), 1)])
+
+    # Testing with multiple patterns input
+    def test_spade_msip(self):
+        output_msip = spade.spade(self.msip, self.binsize,
+                                  self.winlen,
+                                  n_subsets=self.n_subset,
+                                  stability_thresh=self.stability_thresh,
+                                  n_surr=self.n_surr, alpha=self.alpha,
+                                  psr_param=self.psr_param,
+                                  output_format='patterns')['patterns']
+        elements_msip = []
+        occ_msip = []
+        lags_msip = []
+        # collecting spade output
+        for out in output_msip:
+            elements_msip.append(out['neurons'])
+            occ_msip.append(list(out['times'].magnitude))
+            lags_msip.append(list(out['lags'].magnitude))
+        elements_msip = sorted(elements_msip, key=lambda d: len(d))
+        occ_msip = sorted(occ_msip, key=lambda d: len(d))
+        lags_msip = sorted(lags_msip, key=lambda d: len(d))
+        # check neurons in the patterns
+        assert_array_equal(elements_msip, self.elements_msip)
+        # check the occurrences time of the patters
+        assert_array_equal(occ_msip, self.occ_msip)
+        # check the lags
+        assert_array_equal(lags_msip, self.lags_msip)
+
+    # test under different configuration of parameters than the default one
+    def test_parameters(self):
+        # test min_spikes parameter
+        output_msip_min_spikes = spade.spade(self.msip, self.binsize,
+                                        self.winlen,
+                                        n_subsets=self.n_subset,
+                                        n_surr=self.n_surr, alpha=self.alpha,
+                                        min_spikes=self.min_spikes,
+                                        psr_param=self.psr_param,
+                                        output_format='patterns')['patterns']
+        # collecting spade output
+        elements_msip_min_spikes= []
+        for out in output_msip_min_spikes:
+            elements_msip_min_spikes.append(out['neurons'])
+        elements_msip_min_spikes = sorted(elements_msip_min_spikes, key=lambda d: len(d))
+        lags_msip_min_spikes= []
+        for out in output_msip_min_spikes:
+            lags_msip_min_spikes.append(list(out['lags'].magnitude))
+        lags_msip_min_spikes = sorted(lags_msip_min_spikes, key=lambda d: len(d))
+        # check the lags
+        assert_array_equal(lags_msip_min_spikes, [
+            l for l in self.lags_msip if len(l)+1>=self.min_spikes])
+        # check the neurons in the patterns
+        assert_array_equal(elements_msip_min_spikes, [
+            el for el in self.elements_msip if len(el)>=self.min_neu and len(
+                el)>=self.min_spikes])
+
+        # test min_occ parameter
+        output_msip_min_occ = spade.spade(self.msip, self.binsize,
+                                        self.winlen,
+                                        n_subsets=self.n_subset,
+                                        n_surr=self.n_surr, alpha=self.alpha,
+                                        min_occ=self.min_occ,
+                                        psr_param=self.psr_param,
+                                        output_format='patterns')['patterns']
+        # collect spade output
+        occ_msip_min_occ= []
+        for out in output_msip_min_occ:
+            occ_msip_min_occ.append(list(out['times'].magnitude))
+        occ_msip_min_occ = sorted(occ_msip_min_occ, key=lambda d: len(d))
+        # test occurrences time
+        assert_array_equal(occ_msip_min_occ, [
+            occ for occ in self.occ_msip if len(occ)>=self.min_occ])
+
+    # test to compare the python and the C implementation of FIM
+    # skip this test if C code not available
+    @unittest.skipIf(HAVE_FIM == False, 'Requires fim.so')
+    def test_fpgrowth_fca(self):
+        binary_matrix = conv.BinnedSpikeTrain(
+            self.patt1, self.binsize).to_bool_array()
+        context, transactions, rel_matrix = spade._build_context(
+            binary_matrix, self.winlen)
+        # mining the data with python fast_fca
+        mining_results_fpg = spade._fpgrowth(
+            transactions,
+            rel_matrix=rel_matrix)
+        # mining the data with C fim
+        mining_results_ffca = spade._fast_fca(context)
+
+        # testing that the outputs are identical
+        assert_array_equal(sorted(mining_results_ffca[0][0]), sorted(
+            mining_results_fpg[0][0]))
+        assert_array_equal(sorted(mining_results_ffca[0][1]), sorted(
+            mining_results_fpg[0][1]))
+
+    # test the errors raised
+    def test_spade_raise_error(self):
+        self.assertRaises(TypeError, spade.spade, [[1,2,3],[3,4,5]], 1*pq.ms, 4)
+        self.assertRaises(AttributeError, spade.spade, [neo.SpikeTrain(
+            [1,2,3]*pq.s, t_stop=5*pq.s), neo.SpikeTrain(
+            [3,4,5]*pq.s, t_stop=6*pq.s)], 1*pq.ms, 4)
+        self.assertRaises(AttributeError, spade.spade, [neo.SpikeTrain(
+            [1, 2, 3] * pq.s, t_stop=5 * pq.s), neo.SpikeTrain(
+            [3, 4, 5] * pq.s, t_stop=5 * pq.s)], 1 * pq.ms, 4, min_neu=-3)
+        self.assertRaises(AttributeError, spade.pvalue_spectrum, [
+            neo.SpikeTrain([1, 2, 3] * pq.s, t_stop=5 * pq.s), neo.SpikeTrain(
+            [3, 4, 5] * pq.s, t_stop=5 * pq.s)], 1 * pq.ms, 4, 3*pq.ms,
+            n_surr=-3)
+        self.assertRaises(AttributeError, spade.test_signature_significance, (
+            (2, 3, 0.2), (2, 4, 0.1)), 0.01, corr='try')
+        self.assertRaises(AttributeError, spade.approximate_stability, (),
+        np.array([]), n_subsets=-3)
+
+
+def suite():
+    suite = unittest.makeSuite(SpadeTestCase, 'test')
+    return suite
+
+
+if __name__ == "__main__":
+    runner = unittest.TextTestRunner(verbosity=2)
+    runner.run(suite())
+    globals()

+ 2 - 0
code/elephant/requirements-docs.txt

@@ -0,0 +1,2 @@
+numpydoc>=0.5
+sphinx>=1.2.2

+ 2 - 0
code/elephant/requirements-extras.txt

@@ -0,0 +1,2 @@
+pandas>=0.14.1
+scikit-learn

+ 1 - 0
code/elephant/requirements-tests.txt

@@ -0,0 +1 @@
+nose>=1.3.3

+ 125 - 0
code/python-neo/.circleci/config.yml

@@ -0,0 +1,125 @@
+# Python CircleCI 2.0 configuration file
+#
+# Check https://circleci.com/docs/2.0/language-python/ for more details
+#
+version: 2
+workflows:
+  version: 2
+  test:
+    jobs:
+      - test-3.6
+      - test-2.7
+jobs:
+  test-3.6:
+    docker:
+      - image: circleci/python:3.6.1
+
+    environment:
+      - NEO_TEST_FILE_DIR: "/home/circleci/repo/files_for_testing_neo"
+
+    working_directory: ~/repo
+
+    steps:
+      - checkout
+
+      # Download and cache dependencies
+      - restore_cache:
+          keys:
+          - v1-dependencies-{{ checksum "requirements.txt" }}
+          # fallback to using the latest cache if no exact match is found
+          - v1-dependencies-
+
+      - restore-cache:
+          keys:
+            - test-files-f7905c85d1
+
+      - run:
+          name: install dependencies
+          command: |
+            python3 -m venv venv
+            . venv/bin/activate
+            pip install -r requirements.txt
+            pip install -r .circleci/requirements_testing.txt
+            pip install . 
+
+      - save_cache:
+          paths:
+            - ./venv
+          key: v1-dependencies-{{ checksum "requirements.txt" }}
+
+      - save_cache:
+          paths:
+            - ./files_for_testing_neo
+          key: test-files-f7905c85d1
+
+      # run tests!
+      - run:
+          name: run tests
+          command: |
+            . venv/bin/activate
+            nosetests -v --with-coverage --cover-package=neo
+
+      - run:
+          name: coveralls
+          command: |
+            . venv/bin/activate
+            coveralls || true  # a coveralls failure shouldn't cause a build failure
+
+      - store_artifacts:
+          path: test-reports
+          destination: test-reports
+
+  test-2.7:
+    docker:
+      - image: circleci/python:2.7-stretch
+      
+    environment:
+      - NEO_TEST_FILE_DIR: "/home/circleci/repo/files_for_testing_neo"
+
+    working_directory: ~/repo
+
+    steps:
+      - checkout
+
+      # Download and cache dependencies
+      - restore_cache:
+          keys:
+          - v1-py2-dependencies-{{ checksum "requirements.txt" }}
+          # fallback to using the latest cache if no exact match is found
+          - v1-py2-dependencies-
+
+      - restore-cache:
+          keys:
+            - test-files-f7905c85d1
+
+      - run:
+          name: install dependencies
+          command: |
+            virtualenv venv2
+            . venv2/bin/activate
+            pip install -r requirements.txt
+            pip install -r .circleci/requirements_testing.txt
+            pip install mock  # only needed for Python 2
+            pip install . 
+
+      - save_cache:
+          paths:
+            - ./venv2
+          key: v1-py2-dependencies-{{ checksum "requirements.txt" }}
+
+      - save_cache:
+          paths:
+            - ./files_for_testing_neo
+          key: test-files-f7905c85d1
+
+      # run tests!
+      - run:
+          name: run tests
+          command: |
+            . venv2/bin/activate
+            nosetests -v --with-coverage --cover-package=neo
+
+      - store_artifacts:
+          path: test-reports
+          destination: test-reports
+          

+ 12 - 0
code/python-neo/.circleci/requirements_testing.txt

@@ -0,0 +1,12 @@
+scipy>=0.12.0
+h5py
+igor
+klusta
+tqdm
+nixio>=1.5.0b2
+axographio>=0.3.1
+matplotlib
+ipython
+https://github.com/nsdf/nsdf/archive/0.1.tar.gz
+coverage
+coveralls

+ 6 - 0
code/python-neo/.pep8speaks.yml

@@ -0,0 +1,6 @@
+pycodestyle:
+    max-line-length: 99  # Default is 79 in PEP8
+    ignore:
+      - W503 # Change in PEP8, this warning is replaced by W504
+      - E127
+      - E128

+ 46 - 0
code/python-neo/CODE_OF_CONDUCT.md

@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at neo-maintainers@protonmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/

+ 1 - 0
code/python-neo/CONTRIBUTING.md

@@ -0,0 +1 @@
+See http://neo.readthedocs.io/en/latest/developers_guide.html

File diff suppressed because it is too large
+ 905 - 0
code/python-neo/doc/source/images/neo_ecosystem.svg


+ 205 - 0
code/python-neo/doc/source/rawio.rst

@@ -0,0 +1,205 @@
+*********
+Neo RawIO
+*********
+
+.. currentmodule:: neo.rawio
+
+
+.. _neo_rawio_API:
+
+
+For performance and memory consumption reasons a new layer has been added to Neo.
+
+In brief:
+    * **neo.io** is the user-oriented read/write layer. Reading consists of getting a tree
+      of Neo objects from a data source (file, url, or directory). 
+      When  reading, all Neo objects are correctly scaled to the correct units.
+      Writing consists of making a set of Neo objects persistent in a file format.
+    * **neo.rawio** is a low-level layer for reading data only. Reading consists of getting
+      NumPy buffers (often int16/int64) of signals/spikes/events.
+      Scaling to real values (microV, times, ...) is done in a second step.
+      Here the underlying objects must be consistent across Blocks and Segments for a given
+      data source.
+
+      
+The neo.rawio API has been added for developers.
+The neo.rawio is close to what could be a C API for reading data but in Python/NumPy.
+
+
+Not all IOs are implemented in :mod:`neo.rawio` but all classes implemented in :mod:`neo.rawio` are
+also available in :mod:`neo.io`.
+
+
+Possible uses of the :mod:`neo.rawio` API are:
+    * fast reading chunks of signals in int16 and do the scaling of units (uV)
+      on a GPU while scaling the zoom. This should improve bandwith HD to RAM
+      and RAM to GPU memory.
+    * load only some small chunk of data for heavy computations. For instance
+      the spike sorting module tridesclous_ does this.
+
+
+The :mod:`neo.rawio` API is less flexible than :mod:`neo.io` and has some limitations:
+  * read-only
+  * AnalogSignals must have the same characteristcs across all Blocks and Segments:
+    ``sampling_rate``, ``shape[1]``, ``dtype``
+  * AnalogSignals should all have the same value of ``sampling_rate``, otherwise they won't be read
+    at the same time.
+  * Units must have SpikeTrain event if empty across all Block and Segment
+  * Epoch and Event are processed the same way (with ``durations=None`` for Event).
+
+    
+For an intuitive comparison of :mod:`neo.io` and :mod:`neo.rawio` see:
+  * :file:`example/read_file_neo_io.py`
+  * :file:`example/read_file_neo_rawio.py`
+
+  
+One speculative benefit of the :mod:`neo.rawio` API should be that a developer 
+should be able to code a new RawIO class with little knowledge of the Neo tree of 
+objects or of the :mod:`quantities` package.
+
+
+Basic usage
+===========
+
+
+First create a reader from a class::
+
+    >>> from neo.rawio import PlexonRawIO
+    >>> reader = PlexonRawIO(filename='File_plexon_3.plx')
+
+Then browse the internal header and display information::
+
+    >>> reader.parse_header()
+    >>> print(reader)
+    PlexonRawIO: File_plexon_3.plx
+    nb_block: 1
+    nb_segment:  [1]
+    signal_channels: [V1]
+    unit_channels: [Wspk1u, Wspk2u, Wspk4u, Wspk5u ... Wspk29u Wspk30u Wspk31u Wspk32u]
+    event_channels: []
+
+You get the number of blocks and segments per block. You have information
+about channels: **signal_channels**, **unit_channels**, **event_channels**.
+
+All this information is internally available in the *header* dict::
+
+    >>> for k, v in reader.header.items():
+    ...    print(k, v)
+    signal_channels [('V1', 0,  1000., 'int16', '',  2.44140625,  0., 0)]
+    event_channels []
+    nb_segment [1]
+    nb_block 1
+    unit_channels [('Wspk1u', 'ch1#0', '',  0.00146484,  0., 0,  30000.)
+    ('Wspk2u', 'ch2#0', '',  0.00146484,  0., 0,  30000.)
+    ...
+
+
+Read signal chunks of data and scale them::
+
+    >>> channel_indexes = None  #could be channel_indexes = [0]
+    >>> raw_sigs = reader.get_analogsignal_chunk(block_index=0, seg_index=0, 
+                        i_start=1024, i_stop=2048, channel_indexes=channel_indexes)
+    >>> float_sigs = reader.rescale_signal_raw_to_float(raw_sigs, dtype='float64')
+    >>> sampling_rate = reader.get_signal_sampling_rate()
+    >>> t_start = reader.get_signal_t_start(block_index=0, seg_index=0)
+    >>> units =reader.header['signal_channels'][0]['units']
+    >>> print(raw_sigs.shape, raw_sigs.dtype)
+    >>> print(float_sigs.shape, float_sigs.dtype)
+    >>> print(sampling_rate, t_start, units)
+    (1024, 1) int16
+    (1024, 1) float64
+    1000.0 0.0 V
+
+
+There are 3 ways to select a subset of channels: by index (0 based), by id or by name.
+By index is not ambiguous 0 to n-1 (included), for some IOs channel_names (and sometimes channel_ids) have no guarantees to
+be unique, in such cases it would raise an error.
+
+Example with BlackrockRawIO for the file FileSpec2.3001::
+
+    >>> raw_sigs = reader.get_analogsignal_chunk(channel_indexes=None) #Take all channels
+    >>> raw_sigs1 = reader.get_analogsignal_chunk(channel_indexes=[0,  2, 4])) #Take 0 2 and 4
+    >>> raw_sigs2 = reader.get_analogsignal_chunk(channel_ids=[1, 3, 5]) # Same but with there id (1 based)
+    >>> raw_sigs3 = reader.get_analogsignal_chunk(channel_names=['chan1', 'chan3', 'chan5'])) # Same but with there name
+    print(raw_sigs1.shape[1], raw_sigs2.shape[1], raw_sigs3.shape[1])
+    3, 3, 3
+
+
+
+Inspect units channel. Each channel gives a SpikeTrain for each Segment.
+Note that for many formats a physical channel can have several units after spike
+sorting. So the nb_unit could be more than physical channel or signal channels.
+
+    >>> nb_unit = reader.unit_channels_count()
+    >>> print('nb_unit', nb_unit)
+    nb_unit 30
+    >>> for unit_index in range(nb_unit):
+    ...     nb_spike = reader.spike_count(block_index=0, seg_index=0, unit_index=unit_index)
+    ...     print('unit_index', unit_index, 'nb_spike', nb_spike)
+    unit_index 0 nb_spike 701
+    unit_index 1 nb_spike 716
+    unit_index 2 nb_spike 69
+    unit_index 3 nb_spike 12
+    unit_index 4 nb_spike 95
+    unit_index 5 nb_spike 37
+    unit_index 6 nb_spike 25
+    unit_index 7 nb_spike 15
+    unit_index 8 nb_spike 33
+    ...
+
+    
+Get spike timestamps only between 0 and 10 seconds and convert them to spike times::
+
+    >>> spike_timestamps = reader.spike_timestamps(block_index=0, seg_index=0, unit_index=0,
+                        t_start=0., t_stop=10.)
+    >>> print(spike_timestamps.shape, spike_timestamps.dtype, spike_timestamps[:5])
+    (424,) int64 [  90  420  708 1020 1310]
+    >>> spike_times =  reader.rescale_spike_timestamp( spike_timestamps, dtype='float64')
+    >>> print(spike_times.shape, spike_times.dtype, spike_times[:5])
+    (424,) float64 [ 0.003       0.014       0.0236      0.034       0.04366667]
+
+
+Get spike waveforms between 0 and 10 s::
+
+    >>> raw_waveforms = reader.spike_raw_waveforms(  block_index=0, seg_index=0, unit_index=0,
+                        t_start=0., t_stop=10.)
+    >>> print(raw_waveforms.shape, raw_waveforms.dtype, raw_waveforms[0,0,:4])
+    (424, 1, 64) int16 [-449 -206   34   40]
+    >>> float_waveforms = reader.rescale_waveforms_to_float(raw_waveforms, dtype='float32', unit_index=0)
+    >>> print(float_waveforms.shape, float_waveforms.dtype, float_waveforms[0,0,:4])
+    (424, 1, 64) float32 [-0.65771484 -0.30175781  0.04980469  0.05859375]
+
+
+
+Count events per channel::
+
+    >>> reader = PlexonRawIO(filename='File_plexon_2.plx')
+    >>> reader.parse_header()
+    >>> nb_event_channel = reader.event_channels_count()
+    nb_event_channel 28
+    >>> print('nb_event_channel', nb_event_channel)
+    >>> for chan_index in range(nb_event_channel):
+    ...     nb_event = reader.event_count(block_index=0, seg_index=0, event_channel_index=chan_index)
+    ...     print('chan_index',chan_index, 'nb_event', nb_event)
+    chan_index 0 nb_event 1
+    chan_index 1 nb_event 0
+    chan_index 2 nb_event 0
+    chan_index 3 nb_event 0
+    ...
+
+   
+
+Read event timestamps and times for chanindex=0 and with time limits (t_start=None, t_stop=None)::
+
+    >>> ev_timestamps, ev_durations, ev_labels = reader.event_timestamps(block_index=0, seg_index=0, event_channel_index=0,
+                        t_start=None, t_stop=None)
+    >>> print(ev_timestamps, ev_durations, ev_labels)
+    [1268] None ['0']
+    >>> ev_times = reader.rescale_event_timestamp(ev_timestamps, dtype='float64')
+    >>> print(ev_times)
+    [ 0.0317]
+
+
+
+
+.. _tridesclous: https://github.com/tridesclous/tridesclous

+ 42 - 0
code/python-neo/doc/source/releases/0.6.0.rst

@@ -0,0 +1,42 @@
+=======================
+Neo 0.6.0 release notes
+=======================
+
+XXth March 2018
+
+This is a draft.
+
+Major changes:
+  * Introduced :mod:`neo.rawio`: a low-level reader for various data formats
+  * Added continuous integration for all IOs using CircleCI
+    (previously only :mod:`neo.core` was tested, using Travis CI)
+  * Moved the test file repository to https://web.gin.g-node.org/NeuralEnsemble/ephy_testing_data
+    - this makes it easier for people to contribute new files for testing.
+
+Other important changes:
+  * Added :func:`time_index()` and :func:`splice()` methods to :class:`AnalogSignal`
+  * IO fixes and improvements: Blackrock, TDT, Axon, Spike2, Brainvision, Neuralynx
+  * Implemented `__deepcopy__` for all data classes
+  * New IO: BCI2000
+  * Lots of PEP8 fixes!
+  * Implemented `__getitem__` for :class:`Epoch`
+  * Removed "cascade" support from all IOs
+  * Removed lazy loading except for IOs based on rawio
+  * Marked lazy option as deprecated
+  * Added :func:`time_slice` in read_segment() for IOs based on rawio
+  * Made :attr:`SpikeTrain.times` return a :class:`Quantity` instead of a :class:`SpikeTrain`
+  * Raise a :class:`ValueError` if ``t_stop`` is earlier than ``t_start`` when creating an empty :class:`SpikeTrain`
+  * Changed filter behaviour to return all objects if no filter parameters are specified
+  * Fix pickling/unpickling of :class:`Events`
+
+Deprecated IO classes:
+    * :class:`KlustaKwikIO` (use :class:`KwikIO` instead)
+    * :class:`PyNNTextIO`, :class:`PyNNNumpyIO`
+
+(Full `list of closed issues`_)
+
+Thanks to Björn Müller, Andrew Davison, Achilleas Koutsou, Chadwick Boulay, Julia Sprenger,
+ Matthieu Senoville, Michael Denker and especially Samuel Garcia for their contributions to this release.
+
+
+.. _`list of closed issues`: https://github.com/NeuralEnsemble/python-neo/issues?q=is%3Aissue+milestone%3A0.6.0+is%3Aclosed

+ 41 - 0
code/python-neo/doc/source/releases/0.7.0.rst

@@ -0,0 +1,41 @@
+=======================
+Neo 0.7.0 release notes
+=======================
+
+26th November 2018
+
+
+Main added features:
+  * array annotations
+
+Other features:
+  * `Event.to_epoch()`
+  * Change the behaviour of `SpikeTrain.__add__` and `SpikeTrain.__sub__`
+  * bug fix for `Epoch.time_slice()`
+
+New IO classes:
+  * RawMCSRawIO (raw multi channel system file format)
+  * OpenEphys format
+  * Intanrawio (both RHD and RHS)
+  * AxographIO
+
+Many bug fixes and improvements in IO:
+  * AxonIO
+  * WinWCPIO
+  * NixIO
+  * ElphyIO
+  * Spike2IO
+  * NeoMatlab
+  * NeuralynxIO
+  * BlackrockIO (V2.3)
+  * NixIO (rewritten)
+
+Removed:
+  * PyNNIO
+
+(Full `list of closed issues`_)
+
+Thanks to Achilleas Koutsou, Andrew Davison, Björn Müller, Chadwick Boulay, erikli, Jeffrey Gill, Julia Sprenger, Lucas (lkoelman), 
+Mark Histed, Michael Denker, Mike Sintsov, Samuel Garcia, Scott W Harden and William Hart for their contributions to this release.
+
+.. _`list of closed issues`: https://github.com/NeuralEnsemble/python-neo/issues?q=is%3Aissue+milestone%3A0.7.0+is%3Aclosed

+ 47 - 0
code/python-neo/examples/read_files_neo_io.py

@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+"""
+This is an example for reading files with neo.io
+"""
+
+import urllib
+
+import neo
+
+url_repo = 'https://web.gin.g-node.org/NeuralEnsemble/ephy_testing_data/raw/master/'
+
+# Plexon files
+distantfile = url_repo + 'plexon/File_plexon_3.plx'
+localfile = './File_plexon_3.plx'
+urllib.request.urlretrieve(distantfile, localfile)
+
+# create a reader
+reader = neo.io.PlexonIO(filename='File_plexon_3.plx')
+# read the blocks
+blks = reader.read(lazy=False)
+print(blks)
+# access to segments
+for blk in blks:
+    for seg in blk.segments:
+        print(seg)
+        for asig in seg.analogsignals:
+            print(asig)
+        for st in seg.spiketrains:
+            print(st)
+
+# CED Spike2 files
+distantfile = url_repo + 'spike2/File_spike2_1.smr'
+localfile = './File_spike2_1.smr'
+urllib.request.urlretrieve(distantfile, localfile)
+
+# create a reader
+reader = neo.io.Spike2IO(filename='File_spike2_1.smr')
+# read the block
+bl = reader.read(lazy=False)[0]
+print(bl)
+# access to segments
+for seg in bl.segments:
+    print(seg)
+    for asig in seg.analogsignals:
+        print(asig)
+    for st in seg.spiketrains:
+        print(st)

+ 73 - 0
code/python-neo/examples/read_files_neo_rawio.py

@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+"""
+This is an example for reading files with neo.rawio
+compare with read_files_neo_io.py
+"""
+
+import urllib
+from neo.rawio import PlexonRawIO
+
+# Get Plexon files
+distantfile = 'https://portal.g-node.org/neo/plexon/File_plexon_3.plx'
+localfile = './File_plexon_3.plx'
+urllib.request.urlretrieve(distantfile, localfile)
+
+# create a reader
+reader = PlexonRawIO(filename='File_plexon_3.plx')
+reader.parse_header()
+print(reader)
+print(reader.header)
+
+# Read signal chunks
+channel_indexes = None  # could be channel_indexes = [0]
+raw_sigs = reader.get_analogsignal_chunk(block_index=0, seg_index=0, i_start=1024, i_stop=2048,
+                                         channel_indexes=channel_indexes)
+float_sigs = reader.rescale_signal_raw_to_float(raw_sigs, dtype='float64')
+sampling_rate = reader.get_signal_sampling_rate()
+t_start = reader.get_signal_t_start(block_index=0, seg_index=0)
+units = reader.header['signal_channels'][0]['units']
+print(raw_sigs.shape, raw_sigs.dtype)
+print(float_sigs.shape, float_sigs.dtype)
+print(sampling_rate, t_start, units)
+
+# Count unit and spike per units
+nb_unit = reader.unit_channels_count()
+print('nb_unit', nb_unit)
+for unit_index in range(nb_unit):
+    nb_spike = reader.spike_count(block_index=0, seg_index=0, unit_index=unit_index)
+    print('unit_index', unit_index, 'nb_spike', nb_spike)
+
+# Read spike times
+spike_timestamps = reader.get_spike_timestamps(block_index=0, seg_index=0, unit_index=0,
+                                               t_start=0., t_stop=10.)
+print(spike_timestamps.shape, spike_timestamps.dtype, spike_timestamps[:5])
+spike_times = reader.rescale_spike_timestamp(spike_timestamps, dtype='float64')
+print(spike_times.shape, spike_times.dtype, spike_times[:5])
+
+# Read spike waveforms
+raw_waveforms = reader.get_spike_raw_waveforms(block_index=0, seg_index=0, unit_index=0,
+                                               t_start=0., t_stop=10.)
+print(raw_waveforms.shape, raw_waveforms.dtype, raw_waveforms[0, 0, :4])
+float_waveforms = reader.rescale_waveforms_to_float(raw_waveforms, dtype='float32', unit_index=0)
+print(float_waveforms.shape, float_waveforms.dtype, float_waveforms[0, 0, :4])
+
+# Read event timestamps and times (take anotehr file)
+distantfile = 'https://portal.g-node.org/neo/plexon/File_plexon_2.plx'
+localfile = './File_plexon_2.plx'
+urllib.request.urlretrieve(distantfile, localfile)
+
+# Count event per channel
+reader = PlexonRawIO(filename='File_plexon_2.plx')
+reader.parse_header()
+nb_event_channel = reader.event_channels_count()
+print('nb_event_channel', nb_event_channel)
+for chan_index in range(nb_event_channel):
+    nb_event = reader.event_count(block_index=0, seg_index=0, event_channel_index=chan_index)
+    print('chan_index', chan_index, 'nb_event', nb_event)
+
+ev_timestamps, ev_durations, ev_labels = reader.get_event_timestamps(block_index=0, seg_index=0,
+                                                                     event_channel_index=0,
+                                                                     t_start=None, t_stop=None)
+print(ev_timestamps, ev_durations, ev_labels)
+ev_times = reader.rescale_event_timestamp(ev_timestamps, dtype='float64')
+print(ev_times)

+ 375 - 0
code/python-neo/neo/core/dataobject.py

@@ -0,0 +1,375 @@
+# -*- coding: utf-8 -*-
+"""
+This module defines :class:`DataObject`, the abstract base class
+used by all :module:`neo.core` classes that can contain data (i.e. are not container classes).
+It contains basic functionality that is shared among all those data objects.
+
+"""
+import copy
+import warnings
+
+import quantities as pq
+import numpy as np
+from neo.core.baseneo import BaseNeo, _check_annotations
+
+
+def _normalize_array_annotations(value, length):
+    """Check consistency of array annotations
+
+    Recursively check that value is either an array or list containing only "simple" types
+    (number, string, date/time) or is a dict of those.
+
+    Args:
+        :value: (np.ndarray, list or dict) value to be checked for consistency
+        :length: (int) required length of the array annotation
+
+    Returns:
+        np.ndarray The array_annotations from value in correct form
+
+    Raises:
+        ValueError: In case value is not accepted as array_annotation(s)
+
+    """
+
+    # First stage, resolve dict of annotations into single annotations
+    if isinstance(value, dict):
+        for key in value.keys():
+            if isinstance(value[key], dict):
+                raise ValueError("Nested dicts are not allowed as array annotations")
+            value[key] = _normalize_array_annotations(value[key], length)
+
+    elif value is None:
+        raise ValueError("Array annotations must not be None")
+    # If not array annotation, pass on to regular check and make it a list, that is checked again
+    # This covers array annotations with length 1
+    elif not isinstance(value, (list, np.ndarray)) or (
+            isinstance(value, pq.Quantity) and value.shape == ()):
+        _check_annotations(value)
+        value = _normalize_array_annotations(np.array([value]), length)
+
+    # If array annotation, check for correct length, only single dimension and allowed data
+    else:
+        # Get length that is required for array annotations, which is equal to the length
+        # of the object's data
+        own_length = length
+
+        # Escape check if empty array or list and just annotate an empty array (length 0)
+        # This enables the user to easily create dummy array annotations that will be filled
+        # with data later on
+        if len(value) == 0:
+            if not isinstance(value, np.ndarray):
+                value = np.ndarray((0,))
+            val_length = own_length
+        else:
+            # Note: len(o) also works for np.ndarray, it then uses the first dimension,
+            # which is exactly the desired behaviour here
+            val_length = len(value)
+
+        if not own_length == val_length:
+            raise ValueError(
+                "Incorrect length of array annotation: {} != {}".format(val_length, own_length))
+
+        # Local function used to check single elements of a list or an array
+        # They must not be lists or arrays and fit the usual annotation data types
+        def _check_single_elem(element):
+            # Nested array annotations not allowed currently
+            # If element is a list or a np.ndarray, it's not conform except if it's a quantity of
+            # length 1
+            if isinstance(element, list) or (isinstance(element, np.ndarray) and not (
+                    isinstance(element, pq.Quantity) and (
+                    element.shape == () or element.shape == (1,)))):
+                raise ValueError("Array annotations should only be 1-dimensional")
+            if isinstance(element, dict):
+                raise ValueError("Dictionaries are not supported as array annotations")
+
+            # Perform regular check for elements of array or list
+            _check_annotations(element)
+
+        # Arrays only need testing of single element to make sure the others are the same
+        if isinstance(value, np.ndarray):
+            # Type of first element is representative for all others
+            # Thus just performing a check on the first element is enough
+            # Even if it's a pq.Quantity, which can be scalar or array, this is still true
+            # Because a np.ndarray cannot contain scalars and sequences simultaneously
+
+            # If length of data is 0, then nothing needs to be checked
+            if len(value):
+                # Perform check on first element
+                _check_single_elem(value[0])
+
+            return value
+
+        # In case of list, it needs to be ensured that all data are of the same type
+        else:
+            # Conversion to numpy array makes all elements same type
+            # Converts elements to most general type
+
+            try:
+                value = np.array(value)
+            # Except when scalar and non-scalar values are mixed, this causes conversion to fail
+            except ValueError as e:
+                msg = str(e)
+                if "setting an array element with a sequence." in msg:
+                    raise ValueError("Scalar values and arrays/lists cannot be "
+                                     "combined into a single array annotation")
+                else:
+                    raise e
+
+            # If most specialized data type that possibly fits all elements is object,
+            # raise an Error with a telling error message, because this means the elements
+            # are not compatible
+            if value.dtype == object:
+                raise ValueError("Cannot convert list of incompatible types into a single"
+                                 " array annotation")
+
+            # Check the first element for correctness
+            # If its type is correct for annotations, all others are correct as well
+            # Note: Emtpy lists cannot reach this point
+            _check_single_elem(value[0])
+
+    return value
+
+
+class DataObject(BaseNeo, pq.Quantity):
+    '''
+    This is the base class from which all objects containing data inherit
+    It contains common functionality for all those objects and handles array_annotations.
+
+    Common functionality that is not included in BaseNeo includes:
+    - duplicating with new data
+    - rescaling the object
+    - copying the object
+    - returning it as pq.Quantity or np.ndarray
+    - handling of array_annotations
+
+    Array_annotations are a kind of annotation that contains metadata for every data point,
+    i.e. per timestamp (in SpikeTrain, Event and Epoch) or signal channel (in AnalogSignal
+    and IrregularlySampledSignal).
+    They can contain the same data types as regular annotations, but are always represented
+    as numpy arrays of the same length as the number of data points of the annotated neo object.
+
+    Args:
+        name (str, optional): Name of the Neo object
+        description (str, optional): Human readable string description of the Neo object
+        file_origin (str, optional): Origin of the data contained in this Neo object
+        array_annotations (dict, optional): Dictionary containing arrays / lists which annotate
+            individual data points of the Neo object.
+        kwargs: regular annotations stored in a separate annotation dictionary
+    '''
+
+    def __init__(self, name=None, description=None, file_origin=None, array_annotations=None,
+                 **annotations):
+        """
+        This method is called by each data object and initializes the newly created object by
+        adding array annotations and calling __init__ of the super class, where more annotations
+        and attributes are processed.
+        """
+
+        if not hasattr(self, 'array_annotations') or not self.array_annotations:
+            self.array_annotations = ArrayDict(self._get_arr_ann_length())
+        if array_annotations is not None:
+            self.array_annotate(**array_annotations)
+
+        BaseNeo.__init__(self, name=name, description=description, file_origin=file_origin,
+                         **annotations)
+
+    def array_annotate(self, **array_annotations):
+        """
+        Add array annotations (annotations for individual data points) as arrays to a Neo data
+        object.
+
+        Example:
+
+        >>> obj.array_annotate(code=['a', 'b', 'a'], category=[2, 1, 1])
+        >>> obj.array_annotations['code'][1]
+        'b'
+        """
+
+        self.array_annotations.update(array_annotations)
+
+    def array_annotations_at_index(self, index):
+        """
+        Return dictionary of array annotations at a given index or list of indices
+        :param index: int, list, numpy array: The index (indices) from which the annotations
+                      are extracted
+        :return: dictionary of values or numpy arrays containing all array annotations
+                 for given index/indices
+
+        Example:
+        >>> obj.array_annotate(code=['a', 'b', 'a'], category=[2, 1, 1])
+        >>> obj.array_annotations_at_index(1)
+        {code='b', category=1}
+        """
+
+        # Taking only a part of the array annotations
+        # Thus not using ArrayDict here, because checks for length are not needed
+        index_annotations = {}
+
+        # Use what is given as an index to determine the corresponding annotations,
+        # if not possible, numpy raises an Error
+        for ann in self.array_annotations.keys():
+            # NO deepcopy, because someone might want to alter the actual object using this
+            try:
+                index_annotations[ann] = self.array_annotations[ann][index]
+            except IndexError as e:
+                # IndexError caused by 'dummy' array annotations should not result in failure
+                # Taking a slice from nothing results in nothing
+                if len(self.array_annotations[ann]) == 0 and not self._get_arr_ann_length() == 0:
+                    index_annotations[ann] = self.array_annotations[ann]
+                else:
+                    raise e
+
+        return index_annotations
+
+    def _merge_array_annotations(self, other):
+        '''
+        Merges array annotations of 2 different objects.
+        The merge happens in such a way that the result fits the merged data
+        In general this means concatenating the arrays from the 2 objects.
+        If an annotation is only present in one of the objects, it will be omitted
+        :return Merged array_annotations
+        '''
+
+        merged_array_annotations = {}
+        omitted_keys_self = []
+        # Concatenating arrays for each key
+        for key in self.array_annotations:
+            try:
+                value = copy.deepcopy(self.array_annotations[key])
+                other_value = copy.deepcopy(other.array_annotations[key])
+                # Quantities need to be rescaled to common unit
+                if isinstance(value, pq.Quantity):
+                    try:
+                        other_value = other_value.rescale(value.units)
+                    except ValueError:
+                        raise ValueError("Could not merge array annotations "
+                                         "due to different units")
+                    merged_array_annotations[key] = np.append(value, other_value) * value.units
+                else:
+                    merged_array_annotations[key] = np.append(value, other_value)
+
+            except KeyError:
+                # Save the  omitted keys to be able to print them
+                omitted_keys_self.append(key)
+                continue
+        # Also save omitted keys from 'other'
+        omitted_keys_other = [key for key in other.array_annotations if
+                              key not in self.array_annotations]
+
+        # Warn if keys were omitted
+        if omitted_keys_other or omitted_keys_self:
+            warnings.warn("The following array annotations were omitted, because they were only "
+                          "present in one of the merged objects: {} from the one that was merged "
+                          "into and {} from the one that was merged into the other"
+                          "".format(omitted_keys_self, omitted_keys_other), UserWarning)
+
+        # Return the merged array_annotations
+        return merged_array_annotations
+
+    def rescale(self, units):
+        '''
+        Return a copy of the object converted to the specified
+        units
+        :return: Copy of self with specified units
+        '''
+        # Use simpler functionality, if nothing will be changed
+        dim = pq.quantity.validate_dimensionality(units)
+        if self.dimensionality == dim:
+            return self.copy()
+
+        # Rescale the object into a new object
+        obj = self.duplicate_with_new_data(signal=self.view(pq.Quantity).rescale(dim), units=units)
+
+        # Expected behavior is deepcopy, so deepcopying array_annotations
+        obj.array_annotations = copy.deepcopy(self.array_annotations)
+
+        obj.segment = self.segment
+
+        return obj
+
+    # Needed to implement this so array annotations are copied as well, ONLY WHEN copying 1:1
+    def copy(self, **kwargs):
+        '''
+        Returns a copy of the object
+        :return: Copy of self
+        '''
+
+        obj = super(DataObject, self).copy(**kwargs)
+        obj.array_annotations = self.array_annotations
+        return obj
+
+    def as_array(self, units=None):
+        """
+        Return the object's data as a plain NumPy array.
+
+        If `units` is specified, first rescale to those units.
+        """
+        if units:
+            return self.rescale(units).magnitude
+        else:
+            return self.magnitude
+
+    def as_quantity(self):
+        """
+        Return the object's data as a quantities array.
+        """
+        return self.view(pq.Quantity)
+
+    def _get_arr_ann_length(self):
+        """
+        Return the length of the object's data as required for array annotations
+        This is the last dimension of every object.
+        :return Required length of array annotations for this object
+        """
+        # Number of items is last dimension in of data object
+        # This method should be overridden in case this changes
+        try:
+            length = self.shape[-1]
+        # Note: This is because __getitem__[int] returns a scalar Epoch/Event/SpikeTrain
+        # To be removed if __getitem__[int] is changed
+        except IndexError:
+            length = 1
+        return length
+
+    def duplicate_with_new_array(self, signal, units=None):
+        warnings.warn("Use of the `duplicate_with_new_array function is deprecated. "
+                      "Please use `duplicate_with_new_data` instead.",
+                      DeprecationWarning)
+        return self.duplicate_with_new_data(signal, units=units)
+
+
+class ArrayDict(dict):
+    """Dictionary subclass to handle array annotations
+
+       When setting `obj.array_annotations[key]=value`, checks for consistency
+       should not be bypassed.
+       This class overrides __setitem__ from dict to perform these checks every time.
+       The method used for these checks is given as an argument for __init__.
+    """
+
+    def __init__(self, length, check_function=_normalize_array_annotations, *args, **kwargs):
+        super(ArrayDict, self).__init__(*args, **kwargs)
+        self.check_function = check_function
+        self.length = length
+
+    def __setitem__(self, key, value):
+        # Directly call the defined function
+        # Need to wrap key and value in a dict in order to make sure
+        # that nested dicts are detected
+        value = self.check_function({key: value}, self.length)[key]
+        super(ArrayDict, self).__setitem__(key, value)
+
+    # Updating the dict also needs to perform checks, so rerouting this to __setitem__
+    def update(self, *args, **kwargs):
+        if args:
+            if len(args) > 1:
+                raise TypeError("update expected at most 1 arguments, "
+                                "got %d" % len(args))
+            other = dict(args[0])
+            for key in other:
+                self[key] = other[key]
+        for key in kwargs:
+            self[key] = kwargs[key]
+
+    def __reduce__(self):
+        return super(ArrayDict, self).__reduce__()

+ 152 - 0
code/python-neo/neo/io/axographio.py

@@ -0,0 +1,152 @@
+"""
+README
+===============================================================================
+This is an adapter to represent axographio objects as neo objects.
+
+axographio is a file i/o Python module that can read in axograph ".axgx" files.
+It is available under a BSD-3-Clause license and can be installed from pip.
+The following file types are supported:
+
+ - AXGX/AXGD (Axograph X file format)
+
+Based on stimfitio.pyfrom neo.io
+
+11 JUL 2018, W. Hart, Swinburne University, Australia
+"""
+
+# needed for python 3 compatibility
+from __future__ import absolute_import
+
+from datetime import datetime
+import os
+import sys
+
+import numpy as np
+import quantities as pq
+
+from neo.io.baseio import BaseIO
+from neo.core import Block, Segment, AnalogSignal
+
+try:
+    import axographio
+except ImportError as err:
+    HAS_AXOGRAPHIO = False
+    AXOGRAPHIO_ERR = err
+else:
+    HAS_AXOGRAPHIO = True
+    AXOGRAPHIO_ERR = None
+
+
+class AxographIO(BaseIO):
+    """
+    Class for converting an Axographio object to a Neo object.
+    Provides a standardized representation of the data as defined by the neo
+    project; this is useful to explore the data with an increasing number of
+    electrophysiology software tools that rely on the Neo standard.
+
+    axographio is a file i/o Python module that can read in axograph ".axgx" files.
+    It is available under a BSD-3-Clause license and can be installed from pip.
+    The following file types are supported:
+
+    - AXGX/AXGD (Axograph X file format)
+
+    Example usage:
+        >>> import neo
+        >>> neo_obj = neo.io.AxographIO("file.axgx")
+        or
+        >>> import axographio
+        >>> axo_obj = axographio.read("file.axgx")
+        >>> neo_obj = neo.io.AxographIO(axo_obj)
+    """
+
+    is_readable = True
+    is_writable = False
+
+    supported_objects = [Block, Segment, AnalogSignal]
+    readable_objects = [Block]
+    writeable_objects = []
+
+    has_header = False
+    is_streameable = False
+
+    read_params = {Block: []}
+    write_params = None
+
+    name = 'AXOGRAPH'
+    extensions = ['axgx', 'axgd']
+
+    mode = 'file'
+
+    def __init__(self, filename=None):
+        """
+        Arguments:
+            filename : Either a filename or an axographio object
+        """
+        if not HAS_AXOGRAPHIO:
+            raise AXOGRAPHIO_ERR
+
+        BaseIO.__init__(self)
+
+        if hasattr(filename, 'lower'):
+            self.filename = filename
+            self.axo_obj = None
+        else:
+            self.axo_obj = filename
+            self.filename = None
+
+    def read_block(self, **kargs):
+        if self.filename is not None:
+            self.axo_obj = axographio.read(self.filename)
+
+        # Build up the block
+        blk = Block()
+
+        blk.rec_datetime = None
+        if self.filename is not None:
+            # modified time is not ideal but less prone to
+            # cross-platform issues than created time (ctime)
+            blk.file_datetime = datetime.fromtimestamp(os.path.getmtime(self.filename))
+
+            # store the filename if it is available
+            blk.file_origin = self.filename
+
+        # determine the channel names and counts
+        _, channel_ordering = np.unique(self.axo_obj.names[1:], return_index=True)
+        channel_names = np.array(self.axo_obj.names[1:])[np.sort(channel_ordering)]
+        channel_count = len(channel_names)
+
+        # determine the time signal and sample period
+        sample_period = self.axo_obj.data[0].step * pq.s
+        start_time = self.axo_obj.data[0].start * pq.s
+
+        # Attempt to read units from the channel names
+        channel_unit_names = [x.split()[-1].strip('()') for x in channel_names]
+        channel_units = []
+
+        for unit in channel_unit_names:
+            try:
+                channel_units.append(pq.Quantity(1, unit))
+            except LookupError:
+                channel_units.append(None)
+
+        # Strip units from channel names
+        channel_names = [' '.join(x.split()[:-1]) for x in channel_names]
+
+        # build up segments by grouping axograph columns
+        for seg_idx in range(1, len(self.axo_obj.data), channel_count):
+            seg = Segment(index=seg_idx)
+
+            # add in the channels
+            for chan_idx in range(0, channel_count):
+                signal = pq.Quantity(
+                    self.axo_obj.data[seg_idx + chan_idx], channel_units[chan_idx])
+                analog = AnalogSignal(signal,
+                                      sampling_period=sample_period, t_start=start_time,
+                                      name=channel_names[chan_idx], channel_index=chan_idx)
+                seg.analogsignals.append(analog)
+
+            blk.segments.append(seg)
+
+        blk.create_many_to_one_relationship()
+
+        return blk

+ 509 - 0
code/python-neo/neo/io/basefromrawio.py

@@ -0,0 +1,509 @@
+# -*- coding: utf-8 -*-
+"""
+BaseFromRaw
+======
+
+BaseFromRaw implement a bridge between the new neo.rawio API
+and the neo.io legacy that give neo.core object.
+The neo.rawio API is more restricted and limited and do not cover tricky
+cases with asymetrical tree of neo object.
+But if a format is done in neo.rawio the neo.io is done for free
+by inheritance of this class.
+
+
+"""
+# needed for python 3 compatibility
+from __future__ import print_function, division, absolute_import
+# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
+
+import warnings
+import collections
+import logging
+import numpy as np
+
+from neo import logging_handler
+from neo.core import (AnalogSignal, Block,
+                      Epoch, Event,
+                      IrregularlySampledSignal,
+                      ChannelIndex,
+                      Segment, SpikeTrain, Unit)
+from neo.io.baseio import BaseIO
+
+import quantities as pq
+
+
+class BaseFromRaw(BaseIO):
+    """
+    This implement generic reader on top of RawIO reader.
+
+    Arguments depend on `mode` (dir or file)
+
+    File case::
+
+        reader = BlackRockIO(filename='FileSpec2.3001.nev')
+
+    Dir case::
+
+        reader = NeuralynxIO(dirname='Cheetah_v5.7.4/original_data')
+
+    Other arguments are IO specific.
+
+    """
+    is_readable = True
+    is_writable = False
+
+    supported_objects = [Block, Segment, AnalogSignal,
+                         SpikeTrain, Unit, ChannelIndex, Event, Epoch]
+    readable_objects = [Block, Segment]
+    writeable_objects = []
+
+    support_lazy = True
+
+    name = 'BaseIO'
+    description = ''
+    extentions = []
+
+    mode = 'file'
+
+    _prefered_signal_group_mode = 'split-all'  # 'group-by-same-units'
+    _prefered_units_group_mode = 'split-all'  # 'all-in-one'
+
+    def __init__(self, *args, **kargs):
+        BaseIO.__init__(self, *args, **kargs)
+        self.parse_header()
+
+    def read_block(self, block_index=0, lazy=False, signal_group_mode=None,
+                   units_group_mode=None, load_waveforms=False):
+        """
+
+
+        :param block_index: int default 0. In case of several block block_index can be specified.
+
+        :param lazy: False by default.
+
+        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
+        This control behavior for grouping channels in AnalogSignal.
+            * 'split-all': each channel will give an AnalogSignal
+            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
+            a 2D AnalogSignal
+
+        :param units_group_mode: 'split-all' or 'all-in-one'(default depend IO)
+        This control behavior for grouping Unit in ChannelIndex:
+            * 'split-all': each neo.Unit is assigned to a new neo.ChannelIndex
+            * 'all-in-one': all neo.Unit are grouped in the same neo.ChannelIndex
+              (global spike sorting for instance)
+
+        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.
+
+        """
+
+        if lazy:
+            warnings.warn(
+                "Lazy is deprecated and will be replaced by ProxyObject functionality.",
+                DeprecationWarning)
+
+        if signal_group_mode is None:
+            signal_group_mode = self._prefered_signal_group_mode
+
+        if units_group_mode is None:
+            units_group_mode = self._prefered_units_group_mode
+
+        # annotations
+        bl_annotations = dict(self.raw_annotations['blocks'][block_index])
+        bl_annotations.pop('segments')
+        bl_annotations = check_annotations(bl_annotations)
+
+        bl = Block(**bl_annotations)
+
+        # ChannelIndex are plit in 2 parts:
+        #  * some for AnalogSignals
+        #  * some for Units
+
+        # ChannelIndex for AnalogSignals
+        all_channels = self.header['signal_channels']
+        channel_indexes_list = self.get_group_channel_indexes()
+        for channel_index in channel_indexes_list:
+            for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups(
+                    channel_index, signal_group_mode=signal_group_mode).items():
+                chidx_annotations = {}
+                if signal_group_mode == "split-all":
+                    chidx_annotations = self.raw_annotations['signal_channels'][i]
+                elif signal_group_mode == "group-by-same-units":
+                    for key in list(self.raw_annotations['signal_channels'][i].keys()):
+                        chidx_annotations[key] = []
+                    for j in ind_abs:
+                        for key in list(self.raw_annotations['signal_channels'][i].keys()):
+                            chidx_annotations[key].append(self.raw_annotations[
+                                'signal_channels'][j][key])
+                if 'name' in list(chidx_annotations.keys()):
+                    chidx_annotations.pop('name')
+                chidx_annotations = check_annotations(chidx_annotations)
+                ch_names = all_channels[ind_abs]['name'].astype('S')
+                neo_channel_index = ChannelIndex(index=ind_within,
+                                                 channel_names=ch_names,
+                                                 channel_ids=all_channels[ind_abs]['id'],
+                                                 name='Channel group {}'.format(i),
+                                                 **chidx_annotations)
+
+                bl.channel_indexes.append(neo_channel_index)
+
+        # ChannelIndex and Unit
+        # 2 case are possible in neo defifferent IO have choosen one or other:
+        #  * All units are grouped in the same ChannelIndex and indexes are all channels:
+        #    'all-in-one'
+        #  * Each units is assigned to one ChannelIndex: 'split-all'
+        # This is kept for compatibility
+        unit_channels = self.header['unit_channels']
+        if units_group_mode == 'all-in-one':
+            if unit_channels.size > 0:
+                channel_index = ChannelIndex(index=np.array([], dtype='i'),
+                                             name='ChannelIndex for all Unit')
+                bl.channel_indexes.append(channel_index)
+            for c in range(unit_channels.size):
+                unit_annotations = self.raw_annotations['unit_channels'][c]
+                unit_annotations = check_annotations(unit_annotations)
+                unit = Unit(**unit_annotations)
+                channel_index.units.append(unit)
+
+        elif units_group_mode == 'split-all':
+            for c in range(len(unit_channels)):
+                unit_annotations = self.raw_annotations['unit_channels'][c]
+                unit_annotations = check_annotations(unit_annotations)
+                unit = Unit(**unit_annotations)
+                channel_index = ChannelIndex(index=np.array([], dtype='i'),
+                                             name='ChannelIndex for Unit')
+                channel_index.units.append(unit)
+                bl.channel_indexes.append(channel_index)
+
+        # Read all segments
+        for seg_index in range(self.segment_count(block_index)):
+            seg = self.read_segment(block_index=block_index, seg_index=seg_index,
+                                    lazy=lazy, signal_group_mode=signal_group_mode,
+                                    load_waveforms=load_waveforms)
+            bl.segments.append(seg)
+
+        # create link to other containers ChannelIndex and Units
+        for seg in bl.segments:
+            for c, anasig in enumerate(seg.analogsignals):
+                bl.channel_indexes[c].analogsignals.append(anasig)
+
+            nsig = len(seg.analogsignals)
+            for c, sptr in enumerate(seg.spiketrains):
+                if units_group_mode == 'all-in-one':
+                    bl.channel_indexes[nsig].units[c].spiketrains.append(sptr)
+                elif units_group_mode == 'split-all':
+                    bl.channel_indexes[nsig + c].units[0].spiketrains.append(sptr)
+
+        bl.create_many_to_one_relationship()
+
+        return bl
+
+    def read_segment(self, block_index=0, seg_index=0, lazy=False,
+                     signal_group_mode=None, load_waveforms=False, time_slice=None):
+        """
+        :param block_index: int default 0. In case of several block block_index can be specified.
+
+        :param seg_index: int default 0. Index of segment.
+
+        :param lazy: False by default.
+
+        :param signal_group_mode: 'split-all' or 'group-by-same-units' (default depend IO):
+        This control behavior for grouping channels in AnalogSignal.
+            * 'split-all': each channel will give an AnalogSignal
+            * 'group-by-same-units' all channel sharing the same quantity units ar grouped in
+            a 2D AnalogSignal
+
+        :param load_waveforms: False by default. Control SpikeTrains.waveforms is None or not.
+
+        :param time_slice: None by default means no limit.
+            A time slice is (t_start, t_stop) both are quantities.
+            All object AnalogSignal, SpikeTrain, Event, Epoch will load only in the slice.
+        """
+
+        if lazy:
+            warnings.warn(
+                "Lazy is deprecated and will be replaced by ProxyObject functionality.",
+                DeprecationWarning)
+
+        if signal_group_mode is None:
+            signal_group_mode = self._prefered_signal_group_mode
+
+        # annotations
+        seg_annotations = dict(self.raw_annotations['blocks'][block_index]['segments'][seg_index])
+        for k in ('signals', 'units', 'events'):
+            seg_annotations.pop(k)
+        seg_annotations = check_annotations(seg_annotations)
+
+        seg = Segment(index=seg_index, **seg_annotations)
+
+        seg_t_start = self.segment_t_start(block_index, seg_index) * pq.s
+        seg_t_stop = self.segment_t_stop(block_index, seg_index) * pq.s
+
+        # get only a slice of objects limited by t_start and t_stop time_slice = (t_start, t_stop)
+        if time_slice is None:
+            t_start, t_stop = None, None
+            t_start_, t_stop_ = None, None
+        else:
+            assert not lazy, 'time slice only work when not lazy'
+            t_start, t_stop = time_slice
+
+            t_start = ensure_second(t_start)
+            t_stop = ensure_second(t_stop)
+
+            # checks limits
+            if t_start < seg_t_start:
+                t_start = seg_t_start
+            if t_stop > seg_t_stop:
+                t_stop = seg_t_stop
+
+            # in float format in second (for rawio clip)
+            t_start_, t_stop_ = float(t_start.magnitude), float(t_stop.magnitude)
+
+            # new spiketrain limits
+            seg_t_start = t_start
+            seg_t_stop = t_stop
+
+        # AnalogSignal
+        signal_channels = self.header['signal_channels']
+
+        if signal_channels.size > 0:
+            channel_indexes_list = self.get_group_channel_indexes()
+            for channel_indexes in channel_indexes_list:
+                sr = self.get_signal_sampling_rate(channel_indexes) * pq.Hz
+                sig_t_start = self.get_signal_t_start(
+                    block_index, seg_index, channel_indexes) * pq.s
+
+                sig_size = self.get_signal_size(block_index=block_index, seg_index=seg_index,
+                                                channel_indexes=channel_indexes)
+                if not lazy:
+                    # in case of time_slice get: get i_start, i_stop, new sig_t_start
+                    if t_stop is not None:
+                        i_stop = int((t_stop - sig_t_start).magnitude * sr.magnitude)
+                        if i_stop > sig_size:
+                            i_stop = sig_size
+                    else:
+                        i_stop = None
+                    if t_start is not None:
+                        i_start = int((t_start - sig_t_start).magnitude * sr.magnitude)
+                        if i_start < 0:
+                            i_start = 0
+                        sig_t_start += (i_start / sr).rescale('s')
+                    else:
+                        i_start = None
+
+                    raw_signal = self.get_analogsignal_chunk(block_index=block_index,
+                                                             seg_index=seg_index, i_start=i_start,
+                                                             i_stop=i_stop,
+                                                             channel_indexes=channel_indexes)
+                    float_signal = self.rescale_signal_raw_to_float(
+                        raw_signal,
+                        dtype='float32',
+                        channel_indexes=channel_indexes)
+
+                for i, (ind_within, ind_abs) in self._make_signal_channel_subgroups(
+                        channel_indexes,
+                        signal_group_mode=signal_group_mode).items():
+                    units = np.unique(signal_channels[ind_abs]['units'])
+                    assert len(units) == 1
+                    units = ensure_signal_units(units[0])
+
+                    if signal_group_mode == 'split-all':
+                        # in that case annotations by channel is OK
+                        chan_index = ind_abs[0]
+                        d = self.raw_annotations['blocks'][block_index]['segments'][seg_index][
+                            'signals'][chan_index]
+                        annotations = dict(d)
+                        if 'name' not in annotations:
+                            annotations['name'] = signal_channels['name'][chan_index]
+                    else:
+                        # when channel are grouped by same unit
+                        # annotations have channel_names and channel_ids array
+                        # this will be moved in array annotations soon
+                        annotations = {}
+                        annotations['name'] = 'Channel bundle ({}) '.format(
+                            ','.join(signal_channels[ind_abs]['name']))
+                        annotations['channel_names'] = signal_channels[ind_abs]['name']
+                        annotations['channel_ids'] = signal_channels[ind_abs]['id']
+                    annotations = check_annotations(annotations)
+                    if lazy:
+                        anasig = AnalogSignal(np.array([]), units=units, copy=False,
+                                              sampling_rate=sr, t_start=sig_t_start, **annotations)
+                        anasig.lazy_shape = (sig_size, len(ind_within))
+                    else:
+                        anasig = AnalogSignal(float_signal[:, ind_within], units=units, copy=False,
+                                              sampling_rate=sr, t_start=sig_t_start, **annotations)
+                    seg.analogsignals.append(anasig)
+
+        # SpikeTrain and waveforms (optional)
+        unit_channels = self.header['unit_channels']
+        for unit_index in range(len(unit_channels)):
+            if not lazy and load_waveforms:
+                raw_waveforms = self.get_spike_raw_waveforms(block_index=block_index,
+                                                             seg_index=seg_index,
+                                                             unit_index=unit_index,
+                                                             t_start=t_start_, t_stop=t_stop_)
+                float_waveforms = self.rescale_waveforms_to_float(raw_waveforms, dtype='float32',
+                                                                  unit_index=unit_index)
+                wf_units = ensure_signal_units(unit_channels['wf_units'][unit_index])
+                waveforms = pq.Quantity(float_waveforms, units=wf_units,
+                                        dtype='float32', copy=False)
+                wf_sampling_rate = unit_channels['wf_sampling_rate'][unit_index]
+                wf_left_sweep = unit_channels['wf_left_sweep'][unit_index]
+                if wf_left_sweep > 0:
+                    wf_left_sweep = float(wf_left_sweep) / wf_sampling_rate * pq.s
+                else:
+                    wf_left_sweep = None
+                wf_sampling_rate = wf_sampling_rate * pq.Hz
+            else:
+                waveforms = None
+                wf_left_sweep = None
+                wf_sampling_rate = None
+
+            d = self.raw_annotations['blocks'][block_index]['segments'][seg_index]['units'][
+                unit_index]
+            annotations = dict(d)
+            if 'name' not in annotations:
+                annotations['name'] = unit_channels['name'][c]
+            annotations = check_annotations(annotations)
+
+            if not lazy:
+                spike_timestamp = self.get_spike_timestamps(block_index=block_index,
+                                                            seg_index=seg_index,
+                                                            unit_index=unit_index,
+                                                            t_start=t_start_, t_stop=t_stop_)
+                spike_times = self.rescale_spike_timestamp(spike_timestamp, 'float64')
+                sptr = SpikeTrain(spike_times, units='s', copy=False,
+                                  t_start=seg_t_start, t_stop=seg_t_stop,
+                                  waveforms=waveforms, left_sweep=wf_left_sweep,
+                                  sampling_rate=wf_sampling_rate, **annotations)
+            else:
+                nb = self.spike_count(block_index=block_index, seg_index=seg_index,
+                                      unit_index=unit_index)
+                sptr = SpikeTrain(np.array([]), units='s', copy=False, t_start=seg_t_start,
+                                  t_stop=seg_t_stop, **annotations)
+                sptr.lazy_shape = (nb,)
+
+            seg.spiketrains.append(sptr)
+
+        # Events/Epoch
+        event_channels = self.header['event_channels']
+        for chan_ind in range(len(event_channels)):
+            if not lazy:
+                ev_timestamp, ev_raw_durations, ev_labels = self.get_event_timestamps(
+                    block_index=block_index,
+                    seg_index=seg_index, event_channel_index=chan_ind,
+                    t_start=t_start_, t_stop=t_stop_)
+                ev_times = self.rescale_event_timestamp(ev_timestamp, 'float64') * pq.s
+                if ev_raw_durations is None:
+                    ev_durations = None
+                else:
+                    ev_durations = self.rescale_epoch_duration(ev_raw_durations, 'float64') * pq.s
+                ev_labels = ev_labels.astype('S')
+            else:
+                nb = self.event_count(block_index=block_index, seg_index=seg_index,
+                                      event_channel_index=chan_ind)
+                lazy_shape = (nb,)
+                ev_times = np.array([]) * pq.s
+                ev_labels = np.array([], dtype='S')
+                ev_durations = np.array([]) * pq.s
+
+            d = self.raw_annotations['blocks'][block_index]['segments'][seg_index]['events'][
+                chan_ind]
+            annotations = dict(d)
+            if 'name' not in annotations:
+                annotations['name'] = event_channels['name'][chan_ind]
+
+            annotations = check_annotations(annotations)
+
+            if event_channels['type'][chan_ind] == b'event':
+                e = Event(times=ev_times, labels=ev_labels, units='s', copy=False, **annotations)
+                e.segment = seg
+                seg.events.append(e)
+            elif event_channels['type'][chan_ind] == b'epoch':
+                e = Epoch(times=ev_times, durations=ev_durations, labels=ev_labels,
+                          units='s', copy=False, **annotations)
+                e.segment = seg
+                seg.epochs.append(e)
+
+            if lazy:
+                e.lazy_shape = lazy_shape
+
+        seg.create_many_to_one_relationship()
+        return seg
+
+    def _make_signal_channel_subgroups(self, channel_indexes,
+                                       signal_group_mode='group-by-same-units'):
+        """
+        For some RawIO channel are already splitted in groups.
+        But in any cases, channel need to be splitted again in sub groups
+        because they do not have the same units.
+
+        They can also be splitted one by one to match previous behavior for
+        some IOs in older version of neo (<=0.5).
+
+        This method aggregate signal channels with same units or split them all.
+        """
+        all_channels = self.header['signal_channels']
+        if channel_indexes is None:
+            channel_indexes = np.arange(all_channels.size, dtype=int)
+        channels = all_channels[channel_indexes]
+
+        groups = collections.OrderedDict()
+        if signal_group_mode == 'group-by-same-units':
+            all_units = np.unique(channels['units'])
+
+            for i, unit in enumerate(all_units):
+                ind_within, = np.nonzero(channels['units'] == unit)
+                ind_abs = channel_indexes[ind_within]
+                groups[i] = (ind_within, ind_abs)
+
+        elif signal_group_mode == 'split-all':
+            for i, chan_index in enumerate(channel_indexes):
+                ind_within = [i]
+                ind_abs = channel_indexes[ind_within]
+                groups[i] = (ind_within, ind_abs)
+        else:
+            raise (NotImplementedError)
+        return groups
+
+
+unit_convert = {'Volts': 'V', 'volts': 'V', 'Volt': 'V',
+                'volt': 'V', ' Volt': 'V', 'microV': 'V'}
+
+
+def ensure_signal_units(units):
+    # test units
+    units = units.replace(' ', '')
+    if units in unit_convert:
+        units = unit_convert[units]
+    try:
+        units = pq.Quantity(1, units)
+    except:
+        logging.warning('Units "{}" can not be converted to a quantity. Using dimensionless '
+                        'instead'.format(units))
+        units = ''
+    return units
+
+
+def check_annotations(annotations):
+    # force type to str for some keys
+    # imposed for tests
+    for k in ('name', 'description', 'file_origin'):
+        if k in annotations:
+            annotations[k] = str(annotations[k])
+
+    if 'coordinates' in annotations:
+        # some rawio expose some coordinates in annotations but is not standardized
+        # (x, y, z) or polar, at the moment it is more resonable to remove them
+        annotations.pop('coordinates')
+
+    return annotations
+
+
+def ensure_second(v):
+    if isinstance(v, float):
+        return v * pq.s
+    elif isinstance(v, pq.Quantity):
+        return v.rescale('s')
+    elif isinstance(v, int):
+        return float(v) * pq.s

+ 13 - 0
code/python-neo/neo/io/bci2000io.py

@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.bci2000rawio import BCI2000RawIO
+
+
+class BCI2000IO(BCI2000RawIO, BaseFromRaw):
+    """Class for reading data from a BCI2000 .dat file, either version 1.0 or 1.1"""
+    _prefered_signal_group_mode = 'split-all'
+
+    def __init__(self, filename):
+        BCI2000RawIO.__init__(self, filename=filename)
+        BaseFromRaw.__init__(self, filename)

File diff suppressed because it is too large
+ 2567 - 0
code/python-neo/neo/io/blackrockio_v4.py


+ 13 - 0
code/python-neo/neo/io/intanio.py

@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.intanrawio import IntanRawIO
+
+
+class IntanIO(IntanRawIO, BaseFromRaw):
+    __doc__ = IntanRawIO.__doc__
+    _prefered_signal_group_mode = 'group-by-same-units'
+
+    def __init__(self, filename):
+        IntanRawIO.__init__(self, filename=filename)
+        BaseFromRaw.__init__(self, filename)

File diff suppressed because it is too large
+ 2409 - 0
code/python-neo/neo/io/neuralynxio_v1.py


+ 23 - 0
code/python-neo/neo/io/nixio_fr.py

@@ -0,0 +1,23 @@
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.nixrawio import NIXRawIO
+
+# This class subjects to limitations when there are multiple asymmetric blocks
+
+
+class NixIO(NIXRawIO, BaseFromRaw):
+
+    name = 'NIX IO'
+
+    _prefered_signal_group_mode = 'group-by-same-units'
+    _prefered_units_group_mode = 'split-all'
+
+    def __init__(self, filename):
+        NIXRawIO.__init__(self, filename)
+        BaseFromRaw.__init__(self, filename)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.header = None
+        self.file.close()

+ 13 - 0
code/python-neo/neo/io/openephysio.py

@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.openephysrawio import OpenEphysRawIO
+
+
+class OpenEphysIO(OpenEphysRawIO, BaseFromRaw):
+    _prefered_signal_group_mode = 'group-by-same-units'
+    mode = 'dir'
+
+    def __init__(self, dirname):
+        OpenEphysRawIO.__init__(self, dirname=dirname)
+        BaseFromRaw.__init__(self, dirname)

+ 12 - 0
code/python-neo/neo/io/rawmcsio.py

@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+
+from neo.io.basefromrawio import BaseFromRaw
+from neo.rawio.rawmcsrawio import RawMCSRawIO
+
+
+class RawMCSIO(RawMCSRawIO, BaseFromRaw):
+    _prefered_signal_group_mode = 'group-by-same-units'
+
+    def __init__(self, filename):
+        RawMCSRawIO.__init__(self, filename=filename)
+        BaseFromRaw.__init__(self, filename)

+ 69 - 0
code/python-neo/neo/rawio/__init__.py

@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+"""
+:mod:`neo.rawio` provides classes for reading with low level API
+electrophysiological data files.
+
+
+Classes:
+
+
+.. autoclass:: neo.rawio.BlackrockRawIO
+
+"""
+
+from neo.rawio.axonrawio import AxonRawIO
+from neo.rawio.blackrockrawio import BlackrockRawIO
+from neo.rawio.brainvisionrawio import BrainVisionRawIO
+from neo.rawio.elanrawio import ElanRawIO
+from neo.rawio.intanrawio import IntanRawIO
+from neo.rawio.micromedrawio import MicromedRawIO
+from neo.rawio.neuralynxrawio import NeuralynxRawIO
+from neo.rawio.neuroexplorerrawio import NeuroExplorerRawIO
+from neo.rawio.neuroscoperawio import NeuroScopeRawIO
+from neo.rawio.nixrawio import NIXRawIO
+from neo.rawio.plexonrawio import PlexonRawIO
+from neo.rawio.rawbinarysignalrawio import RawBinarySignalRawIO
+from neo.rawio.rawmcsrawio import RawMCSRawIO
+from neo.rawio.spike2rawio import Spike2RawIO
+from neo.rawio.tdtrawio import TdtRawIO
+from neo.rawio.winedrrawio import WinEdrRawIO
+from neo.rawio.winwcprawio import WinWcpRawIO
+
+rawiolist = [
+    AxonRawIO,
+    BlackrockRawIO,
+    BrainVisionRawIO,
+    ElanRawIO,
+    IntanRawIO,
+    MicromedRawIO,
+    NeuralynxRawIO,
+    NeuroExplorerRawIO,
+    NeuroScopeRawIO,
+    NIXRawIO,
+    PlexonRawIO,
+    RawBinarySignalRawIO,
+    RawMCSRawIO,
+    Spike2RawIO,
+    TdtRawIO,
+    WinEdrRawIO,
+    WinWcpRawIO,
+]
+
+import os
+
+
+def get_rawio_class(filename_or_dirname):
+    """
+    Return a neo.rawio class guess from file extention.
+    """
+    _, ext = os.path.splitext(filename_or_dirname)
+    ext = ext[1:]
+    possibles = []
+    for rawio in rawiolist:
+        if any(ext.lower() == ext2.lower() for ext2 in rawio.extensions):
+            possibles.append(rawio)
+
+    if len(possibles) == 1:
+        return possibles[0]
+    else:
+        return None

+ 895 - 0
code/python-neo/neo/rawio/axonrawio.py

@@ -0,0 +1,895 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading data from pCLAMP and AxoScope
+files (.abf version 1 and 2), developed by Molecular device/Axon technologies.
+
+- abf = Axon binary file
+- atf is a text file based format from axon that could be
+  read by AsciiIO (but this file is less efficient.)
+
+
+This code is a port of abfload and abf2load
+written in Matlab (BSD-2-Clause licence) by :
+ - Copyright (c) 2009, Forrest Collman, fcollman@princeton.edu
+ - Copyright (c) 2004, Harald Hentschke
+and available here:
+http://www.mathworks.com/matlabcentral/fileexchange/22114-abf2load
+
+Information on abf 1 and 2 formats is available here:
+http://www.moleculardevices.com/pages/software/developer_info.html
+
+This file supports the old (ABF1) and new (ABF2) format.
+ABF1 (clampfit <=9) and ABF2 (clampfit >10)
+
+All possible mode are possible :
+    - event-driven variable-length mode 1 -> return several Segments per Block
+    - event-driven fixed-length mode 2 or 5 -> return several Segments
+    - gap free mode -> return one (or sevral) Segment in the Block
+
+Supported : Read
+
+Author: Samuel Garcia, JS Nowacki
+
+Note: j.s.nowacki@gmail.com has a C++ library with SWIG bindings which also
+reads abf files - would be good to cross-check
+
+"""
+from __future__ import print_function, division, absolute_import
+# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+
+import struct
+import datetime
+import os
+from io import open, BufferedReader
+
+import numpy as np
+
+
+class AxonRawIO(BaseRawIO):
+    extensions = ['abf']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+    def _parse_header(self):
+        info = self._axon_info = parse_axon_soup(self.filename)
+
+        version = info['fFileVersionNumber']
+
+        # file format
+        if info['nDataFormat'] == 0:
+            sig_dtype = np.dtype('i2')
+        elif info['nDataFormat'] == 1:
+            sig_dtype = np.dtype('f4')
+
+        if version < 2.:
+            nbchannel = info['nADCNumChannels']
+            head_offset = info['lDataSectionPtr'] * BLOCKSIZE + info[
+                'nNumPointsIgnored'] * sig_dtype.itemsize
+            totalsize = info['lActualAcqLength']
+        elif version >= 2.:
+            nbchannel = info['sections']['ADCSection']['llNumEntries']
+            head_offset = info['sections']['DataSection'][
+                'uBlockIndex'] * BLOCKSIZE
+            totalsize = info['sections']['DataSection']['llNumEntries']
+
+        self._raw_data = np.memmap(self.filename, dtype=sig_dtype, mode='r',
+                                   shape=(totalsize,), offset=head_offset)
+
+        # 3 possible modes
+        if version < 2.:
+            mode = info['nOperationMode']
+        elif version >= 2.:
+            mode = info['protocol']['nOperationMode']
+
+        assert mode in [1, 2, 3, 5], 'Mode {} is not supported'.formagt(mode)
+        # event-driven variable-length mode (mode 1)
+        # event-driven fixed-length mode (mode 2 or 5)
+        # gap free mode (mode 3) can be in several episodes
+
+        # read sweep pos
+        if version < 2.:
+            nbepisod = info['lSynchArraySize']
+            offset_episode = info['lSynchArrayPtr'] * BLOCKSIZE
+        elif version >= 2.:
+            nbepisod = info['sections']['SynchArraySection'][
+                'llNumEntries']
+            offset_episode = info['sections']['SynchArraySection'][
+                'uBlockIndex'] * BLOCKSIZE
+        if nbepisod > 0:
+            episode_array = np.memmap(
+                self.filename, [('offset', 'i4'), ('len', 'i4')], 'r',
+                shape=nbepisod, offset=offset_episode)
+        else:
+            episode_array = np.empty(1, [('offset', 'i4'), ('len', 'i4')])
+            episode_array[0]['len'] = self._raw_data.size
+            episode_array[0]['offset'] = 0
+
+        # sampling_rate
+        if version < 2.:
+            self._sampling_rate = 1. / (info['fADCSampleInterval'] * nbchannel * 1.e-6)
+        elif version >= 2.:
+            self._sampling_rate = 1.e6 / info['protocol']['fADCSequenceInterval']
+
+        # one sweep = one segment
+        nb_segment = episode_array.size
+
+        # Get raw data by segment
+        self._raw_signals = {}
+        self._t_starts = {}
+        pos = 0
+        for seg_index in range(nb_segment):
+            length = episode_array[seg_index]['len']
+
+            if version < 2.:
+                fSynchTimeUnit = info['fSynchTimeUnit']
+            elif version >= 2.:
+                fSynchTimeUnit = info['protocol']['fSynchTimeUnit']
+
+            if (fSynchTimeUnit != 0) and (mode == 1):
+                length /= fSynchTimeUnit
+
+            self._raw_signals[seg_index] = self._raw_data[pos:pos + length].reshape(-1, nbchannel)
+            pos += length
+
+            t_start = float(episode_array[seg_index]['offset'])
+            if (fSynchTimeUnit == 0):
+                t_start = t_start / self._sampling_rate
+            else:
+                t_start = t_start * fSynchTimeUnit * 1e-6
+            self._t_starts[seg_index] = t_start
+
+        # Create channel header
+        if version < 2.:
+            channel_ids = [chan_num for chan_num in
+                           info['nADCSamplingSeq'] if chan_num >= 0]
+        else:
+            channel_ids = list(range(nbchannel))
+
+        sig_channels = []
+        adc_nums = []
+        for chan_index, chan_id in enumerate(channel_ids):
+            if version < 2.:
+                name = info['sADCChannelName'][chan_id].replace(b' ', b'')
+                units = info['sADCUnits'][chan_id].replace(b'\xb5', b'u'). \
+                    replace(b' ', b'').decode('utf-8')  # \xb5 is µ
+                adc_num = info['nADCPtoLChannelMap'][chan_id]
+            elif version >= 2.:
+                ADCInfo = info['listADCInfo'][chan_id]
+                name = ADCInfo['ADCChNames'].replace(b' ', b'')
+                units = ADCInfo['ADCChUnits'].replace(b'\xb5', b'u'). \
+                    replace(b' ', b'').decode('utf-8')
+                adc_num = ADCInfo['nADCNum']
+            adc_nums.append(adc_num)
+
+            if info['nDataFormat'] == 0:
+                # int16 gain/offset
+                if version < 2.:
+                    gain = info['fADCRange']
+                    gain /= info['fInstrumentScaleFactor'][chan_id]
+                    gain /= info['fSignalGain'][chan_id]
+                    gain /= info['fADCProgrammableGain'][chan_id]
+                    gain /= info['lADCResolution']
+                    if info['nTelegraphEnable'][chan_id] == 0:
+                        pass
+                    elif info['nTelegraphEnable'][chan_id] == 1:
+                        gain /= info['fTelegraphAdditGain'][chan_id]
+                    else:
+                        logger.warning('ignoring buggy nTelegraphEnable')
+                    offset = info['fInstrumentOffset'][chan_id]
+                    offset -= info['fSignalOffset'][chan_id]
+                elif version >= 2.:
+                    gain = info['protocol']['fADCRange']
+                    gain /= info['listADCInfo'][chan_id]['fInstrumentScaleFactor']
+                    gain /= info['listADCInfo'][chan_id]['fSignalGain']
+                    gain /= info['listADCInfo'][chan_id]['fADCProgrammableGain']
+                    gain /= info['protocol']['lADCResolution']
+                    if info['listADCInfo'][chan_id]['nTelegraphEnable']:
+                        gain /= info['listADCInfo'][chan_id]['fTelegraphAdditGain']
+                    offset = info['listADCInfo'][chan_id]['fInstrumentOffset']
+                    offset -= info['listADCInfo'][chan_id]['fSignalOffset']
+            else:
+                gain, offset = 1., 0.
+            group_id = 0
+            sig_channels.append((name, chan_id, self._sampling_rate,
+                                 sig_dtype, units, gain, offset, group_id))
+
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        # only one events channel : tag
+        if mode in [3, 5]:  # TODO check if tags exits in other mode
+            # In ABF timstamps are not attached too any particular segment
+            # so each segment acess all event
+            timestamps = []
+            labels = []
+            comments = []
+            for i, tag in enumerate(info['listTag']):
+                timestamps.append(tag['lTagTime'])
+                labels.append(str(tag['nTagType']))
+                comments.append(clean_string(tag['sComment']))
+            self._raw_ev_timestamps = np.array(timestamps)
+            self._ev_labels = np.array(labels, dtype='U')
+            self._ev_comments = np.array(comments, dtype='U')
+
+        event_channels = []
+        event_channels.append(('Tag', '', 'event'))
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # No spikes
+        unit_channels = []
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [nb_segment]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # insert some annotation at some place
+        self._generate_minimal_annotations()
+        bl_annotations = self.raw_annotations['blocks'][0]
+
+        bl_annotations['rec_datetime'] = info['rec_datetime']
+        bl_annotations['abf_version'] = version
+
+        for seg_index in range(nb_segment):
+            seg_annotations = bl_annotations['segments'][seg_index]
+            seg_annotations['abf_version'] = version
+
+            for c in range(sig_channels.size):
+                anasig_an = seg_annotations['signals'][c]
+                anasig_an['nADCNum'] = adc_nums[c]
+
+            for c in range(event_channels.size):
+                ev_ann = seg_annotations['events'][c]
+                ev_ann['comments'] = self._ev_comments
+
+    def _source_name(self):
+        return self.filename
+
+    def _segment_t_start(self, block_index, seg_index):
+        return self._t_starts[seg_index]
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = self._t_starts[seg_index] + \
+            self._raw_signals[seg_index].shape[0] / self._sampling_rate
+        return t_stop
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        shape = self._raw_signals[seg_index].shape
+        return shape[0]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return self._t_starts[seg_index]
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if channel_indexes is None:
+            channel_indexes = slice(None)
+        raw_signals = self._raw_signals[seg_index][slice(i_start, i_stop), channel_indexes]
+        return raw_signals
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        return self._raw_ev_timestamps.size
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        # In ABF timstamps are not attached too any particular segment
+        # so each segmetn acees all event
+        timestamp = self._raw_ev_timestamps
+        labels = self._ev_labels
+        durations = None
+
+        if t_start is not None:
+            keep = timestamp >= int(t_start * self._sampling_rate)
+            timestamp = timestamp[keep]
+            labels = labels[keep]
+
+        if t_stop is not None:
+            keep = timestamp <= int(t_stop * self._sampling_rate)
+            timestamp = timestamp[keep]
+            labels = labels[keep]
+
+        return timestamp, durations, labels
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        event_times = event_timestamps.astype(dtype) / self._sampling_rate
+        return event_times
+
+    def read_raw_protocol(self):
+        """
+        Read the protocol waveform of the file, if present;
+        function works with ABF2 only. Protocols can be reconstructed
+        from the ABF1 header.
+
+        Returns: list of segments (one for every episode)
+                 with list of analog signls (one for every DAC).
+
+        Author:  JS Nowacki
+        """
+        info = self._axon_info
+
+        if info['fFileVersionNumber'] < 2.:
+            raise IOError("Protocol section is only present in ABF2 files.")
+
+        nADC = info['sections']['ADCSection'][
+            'llNumEntries']  # Number of ADC channels
+        nDAC = info['sections']['DACSection'][
+            'llNumEntries']  # Number of DAC channels
+        nSam = int(info['protocol'][
+            'lNumSamplesPerEpisode'] / nADC)  # Number of samples per episode
+        nEpi = info['lActualEpisodes']  # Actual number of episodes
+
+        # Make a list of segments with analog signals with just holding levels
+        # List of segments relates to number of episodes, as for recorded data
+        sigs_by_segments = []
+        for epiNum in range(nEpi):
+            # One analog signal for each DAC in segment (episode)
+            signals = []
+            for DACNum in range(nDAC):
+                sig = np.ones(nSam) * info['listDACInfo'][DACNum]['fDACHoldingLevel']
+                # If there are epoch infos for this DAC
+                if DACNum in info['dictEpochInfoPerDAC']:
+                    # Save last sample index
+                    i_last = int(nSam * 15625 / 10 ** 6)
+                    # TODO guess for first holding
+                    # Go over EpochInfoPerDAC and change the analog signal
+                    # according to the epochs
+                    epochInfo = info['dictEpochInfoPerDAC'][DACNum]
+                    for epochNum, epoch in epochInfo.items():
+                        i_begin = i_last
+                        i_end = i_last + epoch['lEpochInitDuration'] + \
+                            epoch['lEpochDurationInc'] * epiNum
+                        dif = i_end - i_begin
+                        sig[i_begin:i_end] = np.ones((dif)) * \
+                            (epoch['fEpochInitLevel'] + epoch['fEpochLevelInc'] * epiNum)
+                        i_last += epoch['lEpochInitDuration'] + \
+                            epoch['lEpochDurationInc'] * epiNum
+                signals.append(sig)
+            sigs_by_segments.append(signals)
+
+        sig_names = []
+        sig_units = []
+        for DACNum in range(nDAC):
+            name = info['listDACInfo'][DACNum]['DACChNames'].decode("utf-8")
+            units = info['listDACInfo'][DACNum]['DACChUnits']. \
+                replace(b'\xb5', b'u').decode('utf-8')  # \xb5 is µ
+            sig_names.append(name)
+            sig_units.append(units)
+
+        return sigs_by_segments, sig_names, sig_units
+
+
+def parse_axon_soup(filename):
+    """
+    read the header of the file
+
+    The strategy here differs from the original script under Matlab.
+    In the original script for ABF2, it completes the header with
+    information that is located in other structures.
+
+    In ABF2 this function returns info with sub dict:
+        sections             (ABF2)
+        protocol             (ABF2)
+        listTags             (ABF1&2)
+        listADCInfo          (ABF2)
+        listDACInfo          (ABF2)
+        dictEpochInfoPerDAC  (ABF2)
+    that contains more information.
+    """
+    with open(filename, 'rb') as fid:
+        f = StructFile(fid)
+
+        # version
+        f_file_signature = f.read(4)
+        if f_file_signature == b'ABF ':
+            header_description = headerDescriptionV1
+        elif f_file_signature == b'ABF2':
+            header_description = headerDescriptionV2
+        else:
+            return None
+
+        # construct dict
+        header = {}
+        for key, offset, fmt in header_description:
+            val = f.read_f(fmt, offset=offset)
+            if len(val) == 1:
+                header[key] = val[0]
+            else:
+                header[key] = np.array(val)
+
+        # correction of version number and starttime
+        if f_file_signature == b'ABF ':
+            header['lFileStartTime'] += header[
+                'nFileStartMillisecs'] * .001
+        elif f_file_signature == b'ABF2':
+            n = header['fFileVersionNumber']
+            header['fFileVersionNumber'] = n[3] + 0.1 * n[2] + \
+                0.01 * n[1] + 0.001 * n[0]
+            header['lFileStartTime'] = header['uFileStartTimeMS'] * .001
+
+        if header['fFileVersionNumber'] < 2.:
+            # tags
+            listTag = []
+            for i in range(header['lNumTagEntries']):
+                f.seek(header['lTagSectionPtr'] + i * 64)
+                tag = {}
+                for key, fmt in TagInfoDescription:
+                    val = f.read_f(fmt)
+                    if len(val) == 1:
+                        tag[key] = val[0]
+                    else:
+                        tag[key] = np.array(val)
+                listTag.append(tag)
+            header['listTag'] = listTag
+            # protocol name formatting
+            header['sProtocolPath'] = clean_string(header['sProtocolPath'])
+            header['sProtocolPath'] = header['sProtocolPath']. \
+                replace(b'\\', b'/')
+
+        elif header['fFileVersionNumber'] >= 2.:
+            # in abf2 some info are in other place
+
+            # sections
+            sections = {}
+            for s, sectionName in enumerate(sectionNames):
+                uBlockIndex, uBytes, llNumEntries = \
+                    f.read_f('IIl', offset=76 + s * 16)
+                sections[sectionName] = {}
+                sections[sectionName]['uBlockIndex'] = uBlockIndex
+                sections[sectionName]['uBytes'] = uBytes
+                sections[sectionName]['llNumEntries'] = llNumEntries
+            header['sections'] = sections
+
+            # strings sections
+            # hack for reading channels names and units
+            # this section is not very detailed and so the code
+            # not very robust. The idea is to remove the first
+            # part by find ing one of th fowoling KEY
+            # unfortunatly the later part contains a the file
+            # taht can contain by accident also one of theses keys...
+            f.seek(sections['StringsSection']['uBlockIndex'] * BLOCKSIZE)
+            big_string = f.read(sections['StringsSection']['uBytes'])
+            goodstart = -1
+            for key in [b'AXENGN', b'clampex', b'Clampex',
+                        b'CLAMPEX', b'axoscope', b'AxoScope', b'Clampfit']:
+                # goodstart = big_string.lower().find(key)
+                goodstart = big_string.find(b'\x00' + key)
+                if goodstart != -1:
+                    break
+            assert goodstart != -1, \
+                'This file does not contain clampex, axoscope or clampfit in the header'
+            big_string = big_string[goodstart + 1:]
+            strings = big_string.split(b'\x00')
+
+            # ADC sections
+            header['listADCInfo'] = []
+            for i in range(sections['ADCSection']['llNumEntries']):
+                # read ADCInfo
+                f.seek(sections['ADCSection']['uBlockIndex']
+                       * BLOCKSIZE + sections['ADCSection']['uBytes'] * i)
+                ADCInfo = {}
+                for key, fmt in ADCInfoDescription:
+                    val = f.read_f(fmt)
+                    if len(val) == 1:
+                        ADCInfo[key] = val[0]
+                    else:
+                        ADCInfo[key] = np.array(val)
+                ADCInfo['ADCChNames'] = strings[ADCInfo['lADCChannelNameIndex'] - 1]
+                ADCInfo['ADCChUnits'] = strings[ADCInfo['lADCUnitsIndex'] - 1]
+                header['listADCInfo'].append(ADCInfo)
+
+            # protocol sections
+            protocol = {}
+            f.seek(sections['ProtocolSection']['uBlockIndex'] * BLOCKSIZE)
+            for key, fmt in protocolInfoDescription:
+                val = f.read_f(fmt)
+                if len(val) == 1:
+                    protocol[key] = val[0]
+                else:
+                    protocol[key] = np.array(val)
+            header['protocol'] = protocol
+            header['sProtocolPath'] = strings[header['uProtocolPathIndex'] - 1]
+
+            # tags
+            listTag = []
+            for i in range(sections['TagSection']['llNumEntries']):
+                f.seek(sections['TagSection']['uBlockIndex']
+                       * BLOCKSIZE + sections['TagSection']['uBytes'] * i)
+                tag = {}
+                for key, fmt in TagInfoDescription:
+                    val = f.read_f(fmt)
+                    if len(val) == 1:
+                        tag[key] = val[0]
+                    else:
+                        tag[key] = np.array(val)
+                listTag.append(tag)
+
+            header['listTag'] = listTag
+
+            # DAC sections
+            header['listDACInfo'] = []
+            for i in range(sections['DACSection']['llNumEntries']):
+                # read DACInfo
+                f.seek(sections['DACSection']['uBlockIndex']
+                       * BLOCKSIZE + sections['DACSection']['uBytes'] * i)
+                DACInfo = {}
+                for key, fmt in DACInfoDescription:
+                    val = f.read_f(fmt)
+                    if len(val) == 1:
+                        DACInfo[key] = val[0]
+                    else:
+                        DACInfo[key] = np.array(val)
+                DACInfo['DACChNames'] = strings[DACInfo['lDACChannelNameIndex']
+                                                - 1]
+                DACInfo['DACChUnits'] = strings[
+                    DACInfo['lDACChannelUnitsIndex'] - 1]
+
+                header['listDACInfo'].append(DACInfo)
+
+            # EpochPerDAC  sections
+            # header['dictEpochInfoPerDAC'] is dict of dicts:
+            #  - the first index is the DAC number
+            #  - the second index is the epoch number
+            # It has to be done like that because data may not exist
+            # and may not be in sorted order
+            header['dictEpochInfoPerDAC'] = {}
+            for i in range(sections['EpochPerDACSection']['llNumEntries']):
+                #  read DACInfo
+                f.seek(sections['EpochPerDACSection']['uBlockIndex']
+                       * BLOCKSIZE + sections['EpochPerDACSection']['uBytes'] * i)
+                EpochInfoPerDAC = {}
+                for key, fmt in EpochInfoPerDACDescription:
+                    val = f.read_f(fmt)
+                    if len(val) == 1:
+                        EpochInfoPerDAC[key] = val[0]
+                    else:
+                        EpochInfoPerDAC[key] = np.array(val)
+
+                DACNum = EpochInfoPerDAC['nDACNum']
+                EpochNum = EpochInfoPerDAC['nEpochNum']
+                # Checking if the key exists, if not, the value is empty
+                # so we have to create empty dict to populate
+                if DACNum not in header['dictEpochInfoPerDAC']:
+                    header['dictEpochInfoPerDAC'][DACNum] = {}
+
+                header['dictEpochInfoPerDAC'][DACNum][EpochNum] = \
+                    EpochInfoPerDAC
+
+            # Epoch sections
+            header['EpochInfo'] = []
+            for i in range(sections['EpochSection']['llNumEntries']):
+                # read EpochInfo
+                f.seek(sections['EpochSection']['uBlockIndex']
+                       * BLOCKSIZE + sections['EpochSection']['uBytes'] * i)
+                EpochInfo = {}
+                for key, fmt in EpochInfoDescription:
+                    val = f.read_f(fmt)
+                    if len(val) == 1:
+                        EpochInfo[key] = val[0]
+                    else:
+                        EpochInfo[key] = np.array(val)
+                header['EpochInfo'].append(EpochInfo)
+
+        # date and time
+        if header['fFileVersionNumber'] < 2.:
+            YY = 1900
+            MM = 1
+            DD = 1
+            hh = int(header['lFileStartTime'] / 3600.)
+            mm = int((header['lFileStartTime'] - hh * 3600) / 60)
+            ss = header['lFileStartTime'] - hh * 3600 - mm * 60
+            ms = int(np.mod(ss, 1) * 1e6)
+            ss = int(ss)
+        elif header['fFileVersionNumber'] >= 2.:
+            YY = int(header['uFileStartDate'] / 10000)
+            MM = int((header['uFileStartDate'] - YY * 10000) / 100)
+            DD = int(header['uFileStartDate'] - YY * 10000 - MM * 100)
+            hh = int(header['uFileStartTimeMS'] / 1000. / 3600.)
+            mm = int((header['uFileStartTimeMS'] / 1000. - hh * 3600) / 60)
+            ss = header['uFileStartTimeMS'] / 1000. - hh * 3600 - mm * 60
+            ms = int(np.mod(ss, 1) * 1e6)
+            ss = int(ss)
+        header['rec_datetime'] = datetime.datetime(YY, MM, DD, hh, mm, ss, ms)
+
+    return header
+
+
+class StructFile(BufferedReader):
+    def read_f(self, fmt, offset=None):
+        if offset is not None:
+            self.seek(offset)
+        return struct.unpack(fmt, self.read(struct.calcsize(fmt)))
+
+
+def clean_string(s):
+    s = s.rstrip(b'\x00')
+    s = s.rstrip(b' ')
+    return s
+
+
+BLOCKSIZE = 512
+
+headerDescriptionV1 = [
+    ('fFileSignature', 0, '4s'),
+    ('fFileVersionNumber', 4, 'f'),
+    ('nOperationMode', 8, 'h'),
+    ('lActualAcqLength', 10, 'i'),
+    ('nNumPointsIgnored', 14, 'h'),
+    ('lActualEpisodes', 16, 'i'),
+    ('lFileStartTime', 24, 'i'),
+    ('lDataSectionPtr', 40, 'i'),
+    ('lTagSectionPtr', 44, 'i'),
+    ('lNumTagEntries', 48, 'i'),
+    ('lSynchArrayPtr', 92, 'i'),
+    ('lSynchArraySize', 96, 'i'),
+    ('nDataFormat', 100, 'h'),
+    ('nADCNumChannels', 120, 'h'),
+    ('fADCSampleInterval', 122, 'f'),
+    ('fSynchTimeUnit', 130, 'f'),
+    ('lNumSamplesPerEpisode', 138, 'i'),
+    ('lPreTriggerSamples', 142, 'i'),
+    ('lEpisodesPerRun', 146, 'i'),
+    ('fADCRange', 244, 'f'),
+    ('lADCResolution', 252, 'i'),
+    ('nFileStartMillisecs', 366, 'h'),
+    ('nADCPtoLChannelMap', 378, '16h'),
+    ('nADCSamplingSeq', 410, '16h'),
+    ('sADCChannelName', 442, '10s' * 16),
+    ('sADCUnits', 602, '8s' * 16),
+    ('fADCProgrammableGain', 730, '16f'),
+    ('fInstrumentScaleFactor', 922, '16f'),
+    ('fInstrumentOffset', 986, '16f'),
+    ('fSignalGain', 1050, '16f'),
+    ('fSignalOffset', 1114, '16f'),
+
+    ('nDigitalEnable', 1436, 'h'),
+    ('nActiveDACChannel', 1440, 'h'),
+    ('nDigitalHolding', 1584, 'h'),
+    ('nDigitalInterEpisode', 1586, 'h'),
+    ('nDigitalValue', 2588, '10h'),
+    ('lDACFilePtr', 2048, '2i'),
+    ('lDACFileNumEpisodes', 2056, '2i'),
+    ('fDACCalibrationFactor', 2074, '4f'),
+    ('fDACCalibrationOffset', 2090, '4f'),
+    ('nWaveformEnable', 2296, '2h'),
+    ('nWaveformSource', 2300, '2h'),
+    ('nInterEpisodeLevel', 2304, '2h'),
+    ('nEpochType', 2308, '20h'),
+    ('fEpochInitLevel', 2348, '20f'),
+    ('fEpochLevelInc', 2428, '20f'),
+    ('lEpochInitDuration', 2508, '20i'),
+    ('lEpochDurationInc', 2588, '20i'),
+
+    ('nTelegraphEnable', 4512, '16h'),
+    ('fTelegraphAdditGain', 4576, '16f'),
+    ('sProtocolPath', 4898, '384s'),
+]
+
+headerDescriptionV2 = [
+    ('fFileSignature', 0, '4s'),
+    ('fFileVersionNumber', 4, '4b'),
+    ('uFileInfoSize', 8, 'I'),
+    ('lActualEpisodes', 12, 'I'),
+    ('uFileStartDate', 16, 'I'),
+    ('uFileStartTimeMS', 20, 'I'),
+    ('uStopwatchTime', 24, 'I'),
+    ('nFileType', 28, 'H'),
+    ('nDataFormat', 30, 'H'),
+    ('nSimultaneousScan', 32, 'H'),
+    ('nCRCEnable', 34, 'H'),
+    ('uFileCRC', 36, 'I'),
+    ('FileGUID', 40, 'I'),
+    ('uCreatorVersion', 56, 'I'),
+    ('uCreatorNameIndex', 60, 'I'),
+    ('uModifierVersion', 64, 'I'),
+    ('uModifierNameIndex', 68, 'I'),
+    ('uProtocolPathIndex', 72, 'I'),
+]
+
+sectionNames = [
+    'ProtocolSection',
+    'ADCSection',
+    'DACSection',
+    'EpochSection',
+    'ADCPerDACSection',
+    'EpochPerDACSection',
+    'UserListSection',
+    'StatsRegionSection',
+    'MathSection',
+    'StringsSection',
+    'DataSection',
+    'TagSection',
+    'ScopeSection',
+    'DeltaSection',
+    'VoiceTagSection',
+    'SynchArraySection',
+    'AnnotationSection',
+    'StatsSection',
+]
+
+protocolInfoDescription = [
+    ('nOperationMode', 'h'),
+    ('fADCSequenceInterval', 'f'),
+    ('bEnableFileCompression', 'b'),
+    ('sUnused1', '3s'),
+    ('uFileCompressionRatio', 'I'),
+    ('fSynchTimeUnit', 'f'),
+    ('fSecondsPerRun', 'f'),
+    ('lNumSamplesPerEpisode', 'i'),
+    ('lPreTriggerSamples', 'i'),
+    ('lEpisodesPerRun', 'i'),
+    ('lRunsPerTrial', 'i'),
+    ('lNumberOfTrials', 'i'),
+    ('nAveragingMode', 'h'),
+    ('nUndoRunCount', 'h'),
+    ('nFirstEpisodeInRun', 'h'),
+    ('fTriggerThreshold', 'f'),
+    ('nTriggerSource', 'h'),
+    ('nTriggerAction', 'h'),
+    ('nTriggerPolarity', 'h'),
+    ('fScopeOutputInterval', 'f'),
+    ('fEpisodeStartToStart', 'f'),
+    ('fRunStartToStart', 'f'),
+    ('lAverageCount', 'i'),
+    ('fTrialStartToStart', 'f'),
+    ('nAutoTriggerStrategy', 'h'),
+    ('fFirstRunDelayS', 'f'),
+    ('nChannelStatsStrategy', 'h'),
+    ('lSamplesPerTrace', 'i'),
+    ('lStartDisplayNum', 'i'),
+    ('lFinishDisplayNum', 'i'),
+    ('nShowPNRawData', 'h'),
+    ('fStatisticsPeriod', 'f'),
+    ('lStatisticsMeasurements', 'i'),
+    ('nStatisticsSaveStrategy', 'h'),
+    ('fADCRange', 'f'),
+    ('fDACRange', 'f'),
+    ('lADCResolution', 'i'),
+    ('lDACResolution', 'i'),
+    ('nExperimentType', 'h'),
+    ('nManualInfoStrategy', 'h'),
+    ('nCommentsEnable', 'h'),
+    ('lFileCommentIndex', 'i'),
+    ('nAutoAnalyseEnable', 'h'),
+    ('nSignalType', 'h'),
+    ('nDigitalEnable', 'h'),
+    ('nActiveDACChannel', 'h'),
+    ('nDigitalHolding', 'h'),
+    ('nDigitalInterEpisode', 'h'),
+    ('nDigitalDACChannel', 'h'),
+    ('nDigitalTrainActiveLogic', 'h'),
+    ('nStatsEnable', 'h'),
+    ('nStatisticsClearStrategy', 'h'),
+    ('nLevelHysteresis', 'h'),
+    ('lTimeHysteresis', 'i'),
+    ('nAllowExternalTags', 'h'),
+    ('nAverageAlgorithm', 'h'),
+    ('fAverageWeighting', 'f'),
+    ('nUndoPromptStrategy', 'h'),
+    ('nTrialTriggerSource', 'h'),
+    ('nStatisticsDisplayStrategy', 'h'),
+    ('nExternalTagType', 'h'),
+    ('nScopeTriggerOut', 'h'),
+    ('nLTPType', 'h'),
+    ('nAlternateDACOutputState', 'h'),
+    ('nAlternateDigitalOutputState', 'h'),
+    ('fCellID', '3f'),
+    ('nDigitizerADCs', 'h'),
+    ('nDigitizerDACs', 'h'),
+    ('nDigitizerTotalDigitalOuts', 'h'),
+    ('nDigitizerSynchDigitalOuts', 'h'),
+    ('nDigitizerType', 'h'),
+]
+
+ADCInfoDescription = [
+    ('nADCNum', 'h'),
+    ('nTelegraphEnable', 'h'),
+    ('nTelegraphInstrument', 'h'),
+    ('fTelegraphAdditGain', 'f'),
+    ('fTelegraphFilter', 'f'),
+    ('fTelegraphMembraneCap', 'f'),
+    ('nTelegraphMode', 'h'),
+    ('fTelegraphAccessResistance', 'f'),
+    ('nADCPtoLChannelMap', 'h'),
+    ('nADCSamplingSeq', 'h'),
+    ('fADCProgrammableGain', 'f'),
+    ('fADCDisplayAmplification', 'f'),
+    ('fADCDisplayOffset', 'f'),
+    ('fInstrumentScaleFactor', 'f'),
+    ('fInstrumentOffset', 'f'),
+    ('fSignalGain', 'f'),
+    ('fSignalOffset', 'f'),
+    ('fSignalLowpassFilter', 'f'),
+    ('fSignalHighpassFilter', 'f'),
+    ('nLowpassFilterType', 'b'),
+    ('nHighpassFilterType', 'b'),
+    ('fPostProcessLowpassFilter', 'f'),
+    ('nPostProcessLowpassFilterType', 'c'),
+    ('bEnabledDuringPN', 'b'),
+    ('nStatsChannelPolarity', 'h'),
+    ('lADCChannelNameIndex', 'i'),
+    ('lADCUnitsIndex', 'i'),
+]
+
+TagInfoDescription = [
+    ('lTagTime', 'i'),
+    ('sComment', '56s'),
+    ('nTagType', 'h'),
+    ('nVoiceTagNumber_or_AnnotationIndex', 'h'),
+]
+
+DACInfoDescription = [
+    ('nDACNum', 'h'),
+    ('nTelegraphDACScaleFactorEnable', 'h'),
+    ('fInstrumentHoldingLevel', 'f'),
+    ('fDACScaleFactor', 'f'),
+    ('fDACHoldingLevel', 'f'),
+    ('fDACCalibrationFactor', 'f'),
+    ('fDACCalibrationOffset', 'f'),
+    ('lDACChannelNameIndex', 'i'),
+    ('lDACChannelUnitsIndex', 'i'),
+    ('lDACFilePtr', 'i'),
+    ('lDACFileNumEpisodes', 'i'),
+    ('nWaveformEnable', 'h'),
+    ('nWaveformSource', 'h'),
+    ('nInterEpisodeLevel', 'h'),
+    ('fDACFileScale', 'f'),
+    ('fDACFileOffset', 'f'),
+    ('lDACFileEpisodeNum', 'i'),
+    ('nDACFileADCNum', 'h'),
+    ('nConditEnable', 'h'),
+    ('lConditNumPulses', 'i'),
+    ('fBaselineDuration', 'f'),
+    ('fBaselineLevel', 'f'),
+    ('fStepDuration', 'f'),
+    ('fStepLevel', 'f'),
+    ('fPostTrainPeriod', 'f'),
+    ('fPostTrainLevel', 'f'),
+    ('nMembTestEnable', 'h'),
+    ('nLeakSubtractType', 'h'),
+    ('nPNPolarity', 'h'),
+    ('fPNHoldingLevel', 'f'),
+    ('nPNNumADCChannels', 'h'),
+    ('nPNPosition', 'h'),
+    ('nPNNumPulses', 'h'),
+    ('fPNSettlingTime', 'f'),
+    ('fPNInterpulse', 'f'),
+    ('nLTPUsageOfDAC', 'h'),
+    ('nLTPPresynapticPulses', 'h'),
+    ('lDACFilePathIndex', 'i'),
+    ('fMembTestPreSettlingTimeMS', 'f'),
+    ('fMembTestPostSettlingTimeMS', 'f'),
+    ('nLeakSubtractADCIndex', 'h'),
+    ('sUnused', '124s'),
+]
+
+EpochInfoPerDACDescription = [
+    ('nEpochNum', 'h'),
+    ('nDACNum', 'h'),
+    ('nEpochType', 'h'),
+    ('fEpochInitLevel', 'f'),
+    ('fEpochLevelInc', 'f'),
+    ('lEpochInitDuration', 'i'),
+    ('lEpochDurationInc', 'i'),
+    ('lEpochPulsePeriod', 'i'),
+    ('lEpochPulseWidth', 'i'),
+    ('sUnused', '18s'),
+]
+
+EpochInfoDescription = [
+    ('nEpochNum', 'h'),
+    ('nDigitalValue', 'h'),
+    ('nDigitalTrainValue', 'h'),
+    ('nAlternateDigitalValue', 'h'),
+    ('nAlternateDigitalTrainValue', 'h'),
+    ('bEpochCompression', 'b'),
+    ('sUnused', '21s'),
+]

+ 694 - 0
code/python-neo/neo/rawio/baserawio.py

@@ -0,0 +1,694 @@
+# -*- coding: utf-8 -*-
+"""
+baserawio
+======
+
+Classes
+-------
+
+BaseRawIO
+abstract class which should be overridden to write a RawIO.
+
+RawIO is a new API in neo that is supposed to acces as fast as possible
+raw data. All IO with theses carractéristics should/could be rewritten:
+  * internally use of memmap (or hdf5)
+  * reading header is quite cheap (not read all the file)
+  * neo tree object is symetric and logical: same channel/units/event
+    along all block and segments.
+
+
+So this handle **only** one simplified but very frequent case of dataset:
+    * Only one channel set  for AnalogSignal (aka ChannelIndex) stable along Segment
+    * Only one channel set  for SpikeTrain (aka Unit) stable along Segment
+    * AnalogSignal have all the same sampling_rate acroos all Segment
+    * t_start/t_stop are the same for many object (SpikeTrain, Event) inside a Segment
+    * AnalogSignal should all have the same sampling_rate otherwise the won't be read
+      a the same time. So signal_group_mode=='split-all' in BaseFromRaw
+
+
+An helper class `neo.io.basefromrawio.BaseFromRaw` should transform a RawIO to
+neo legacy IO from free.
+
+With this API the IO have an attributes `header` with necessary keys.
+See ExampleRawIO as example.
+
+
+BaseRawIO implement a possible presistent cache system that can be used
+by some IOs to avoid very long parse_header(). The idea is that some variable
+or vector can be store somewhere (near the fiel, /tmp, any path)
+
+
+"""
+
+# from __future__ import unicode_literals, print_function, division, absolute_import
+from __future__ import print_function, division, absolute_import
+
+import logging
+import numpy as np
+import os
+import sys
+
+from neo import logging_handler
+
+try:
+    import joblib
+
+    HAVE_JOBLIB = True
+except ImportError:
+    HAVE_JOBLIB = False
+
+possible_raw_modes = ['one-file', 'multi-file', 'one-dir', ]  # 'multi-dir', 'url', 'other'
+
+error_header = 'Header is not read yet, do parse_header() first'
+
+_signal_channel_dtype = [
+    ('name', 'U64'),
+    ('id', 'int64'),
+    ('sampling_rate', 'float64'),
+    ('dtype', 'U16'),
+    ('units', 'U64'),
+    ('gain', 'float64'),
+    ('offset', 'float64'),
+    ('group_id', 'int64'),
+]
+
+_common_sig_characteristics = ['sampling_rate', 'dtype', 'group_id']
+
+_unit_channel_dtype = [
+    ('name', 'U64'),
+    ('id', 'U64'),
+    # for waveform
+    ('wf_units', 'U64'),
+    ('wf_gain', 'float64'),
+    ('wf_offset', 'float64'),
+    ('wf_left_sweep', 'int64'),
+    ('wf_sampling_rate', 'float64'),
+]
+
+_event_channel_dtype = [
+    ('name', 'U64'),
+    ('id', 'U64'),
+    ('type', 'S5'),  # epoch ot event
+]
+
+
+class BaseRawIO(object):
+    """
+    Generic class to handle.
+
+    """
+
+    name = 'BaseIO'
+    description = ''
+    extensions = []
+
+    rawmode = None  # one key in possible_raw_modes
+
+    def __init__(self, use_cache=False, cache_path='same_as_resource', **kargs):
+        """
+
+        When rawmode=='one-file' kargs MUST contains 'filename' the filename
+        When rawmode=='multi-file' kargs MUST contains 'filename' one of the filenames.
+        When rawmode=='one-dir' kargs MUST contains 'dirname' the dirname.
+
+
+        """
+        # create a logger for the IO class
+        fullname = self.__class__.__module__ + '.' + self.__class__.__name__
+        self.logger = logging.getLogger(fullname)
+        # create a logger for 'neo' and add a handler to it if it doesn't
+        # have one already.
+        # (it will also not add one if the root logger has a handler)
+        corename = self.__class__.__module__.split('.')[0]
+        corelogger = logging.getLogger(corename)
+        rootlogger = logging.getLogger()
+        if not corelogger.handlers and not rootlogger.handlers:
+            corelogger.addHandler(logging_handler)
+
+        self.use_cache = use_cache
+        if use_cache:
+            assert HAVE_JOBLIB, 'You need to install joblib for cache'
+            self.setup_cache(cache_path)
+        else:
+            self._cache = None
+
+        self.header = None
+
+    def parse_header(self):
+        """
+        This must parse the file header to get all stuff for fast later one.
+
+        This must contain
+        self.header['nb_block']
+        self.header['nb_segment']
+        self.header['signal_channels']
+        self.header['units_channels']
+        self.header['event_channels']
+
+
+
+        """
+        self._parse_header()
+        self._group_signal_channel_characteristics()
+
+    def source_name(self):
+        """Return fancy name of file source"""
+        return self._source_name()
+
+    def __repr__(self):
+        txt = '{}: {}\n'.format(self.__class__.__name__, self.source_name())
+        if self.header is not None:
+            nb_block = self.block_count()
+            txt += 'nb_block: {}\n'.format(nb_block)
+            nb_seg = [self.segment_count(i) for i in range(nb_block)]
+            txt += 'nb_segment:  {}\n'.format(nb_seg)
+
+            for k in ('signal_channels', 'unit_channels', 'event_channels'):
+                ch = self.header[k]
+                if len(ch) > 8:
+                    chantxt = "[{} ... {}]".format(', '.join(e for e in ch['name'][:4]),
+                                                   ' '.join(e for e in ch['name'][-4:]))
+                else:
+                    chantxt = "[{}]".format(', '.join(e for e in ch['name']))
+                txt += '{}: {}\n'.format(k, chantxt)
+
+        return txt
+
+    def _generate_minimal_annotations(self):
+        """
+        Helper function that generate a nested dict
+        of all annotations.
+        must be called when theses are Ok:
+          * block_count()
+          * segment_count()
+          * signal_channels_count()
+          * unit_channels_count()
+          * event_channels_count()
+
+        Usage:
+        raw_annotations['blocks'][block_index] = { 'nickname' : 'super block', 'segments' : ...}
+        raw_annotations['blocks'][block_index] = { 'nickname' : 'super block', 'segments' : ...}
+        raw_annotations['blocks'][block_index]['segments'][seg_index]['signals'][channel_index] = {'nickname': 'super channel'}
+        raw_annotations['blocks'][block_index]['segments'][seg_index]['units'][unit_index] = {'nickname': 'super neuron'}
+        raw_annotations['blocks'][block_index]['segments'][seg_index]['events'][ev_chan] = {'nickname': 'super trigger'}
+
+        Theses annotations will be used at the neo.io API directly in objects.
+
+        Standard annotation like name/id/file_origin are already generated here.
+        """
+        signal_channels = self.header['signal_channels']
+        unit_channels = self.header['unit_channels']
+        event_channels = self.header['event_channels']
+
+        a = {'blocks': [], 'signal_channels': [], 'unit_channels': [], 'event_channels': []}
+        for block_index in range(self.block_count()):
+            d = {'segments': []}
+            d['file_origin'] = self.source_name()
+            a['blocks'].append(d)
+            for seg_index in range(self.segment_count(block_index)):
+                d = {'signals': [], 'units': [], 'events': []}
+                d['file_origin'] = self.source_name()
+                a['blocks'][block_index]['segments'].append(d)
+
+                for c in range(signal_channels.size):
+                    # use for AnalogSignal.annotations
+                    d = {}
+                    d['name'] = signal_channels['name'][c]
+                    d['channel_id'] = signal_channels['id'][c]
+                    a['blocks'][block_index]['segments'][seg_index]['signals'].append(d)
+
+                for c in range(unit_channels.size):
+                    # use for SpikeTrain.annotations
+                    d = {}
+                    d['name'] = unit_channels['name'][c]
+                    d['id'] = unit_channels['id'][c]
+                    a['blocks'][block_index]['segments'][seg_index]['units'].append(d)
+
+                for c in range(event_channels.size):
+                    # use for Event.annotations
+                    d = {}
+                    d['name'] = event_channels['name'][c]
+                    d['id'] = event_channels['id'][c]
+                    d['file_origin'] = self._source_name()
+                    a['blocks'][block_index]['segments'][seg_index]['events'].append(d)
+
+        for c in range(signal_channels.size):
+            # use for ChannelIndex.annotations
+            d = {}
+            d['name'] = signal_channels['name'][c]
+            d['channel_id'] = signal_channels['id'][c]
+            d['file_origin'] = self._source_name()
+            a['signal_channels'].append(d)
+
+        for c in range(unit_channels.size):
+            # use for Unit.annotations
+            d = {}
+            d['name'] = unit_channels['name'][c]
+            d['id'] = unit_channels['id'][c]
+            d['file_origin'] = self._source_name()
+            a['unit_channels'].append(d)
+
+        for c in range(event_channels.size):
+            # not used in neo.io at the moment could usefull one day
+            d = {}
+            d['name'] = event_channels['name'][c]
+            d['id'] = event_channels['id'][c]
+            d['file_origin'] = self._source_name()
+            a['event_channels'].append(d)
+
+        self.raw_annotations = a
+
+    def _raw_annotate(self, obj_name, chan_index=0, block_index=0, seg_index=0, **kargs):
+        """
+        Annotate a object in the list/dict tree annotations.
+        """
+        bl_annotations = self.raw_annotations['blocks'][block_index]
+        seg_annotations = bl_annotations['segments'][seg_index]
+        if obj_name == 'blocks':
+            bl_annotations.update(kargs)
+        elif obj_name == 'segments':
+            seg_annotations.update(kargs)
+        elif obj_name in ['signals', 'events', 'units']:
+            obj_annotations = seg_annotations[obj_name][chan_index]
+            obj_annotations.update(kargs)
+        elif obj_name in ['signal_channels', 'unit_channels', 'event_channel']:
+            obj_annotations = self.raw_annotations[obj_name][chan_index]
+            obj_annotations.update(kargs)
+
+    def _repr_annotations(self):
+        txt = 'Raw annotations\n'
+        for block_index in range(self.block_count()):
+            bl_a = self.raw_annotations['blocks'][block_index]
+            txt += '*Block {}\n'.format(block_index)
+            for k, v in bl_a.items():
+                if k in ('segments',):
+                    continue
+                txt += '  -{}: {}\n'.format(k, v)
+            for seg_index in range(self.segment_count(block_index)):
+                seg_a = bl_a['segments'][seg_index]
+                txt += '  *Segment {}\n'.format(seg_index)
+                for k, v in seg_a.items():
+                    if k in ('signals', 'units', 'events',):
+                        continue
+                    txt += '    -{}: {}\n'.format(k, v)
+
+                for child in ('signals', 'units', 'events'):
+                    n = self.header[child[:-1] + '_channels'].shape[0]
+                    for c in range(n):
+                        neo_name = {'signals': 'AnalogSignal',
+                                    'units': 'SpikeTrain', 'events': 'Event/Epoch'}[child]
+                        txt += '    *{} {}\n'.format(neo_name, c)
+                        child_a = seg_a[child][c]
+                        for k, v in child_a.items():
+                            txt += '      -{}: {}\n'.format(k, v)
+
+        return txt
+
+    def print_annotations(self):
+        """Print formated raw_annotations"""
+        print(self._repr_annotations())
+
+    def block_count(self):
+        """return number of blocks"""
+        return self.header['nb_block']
+
+    def segment_count(self, block_index):
+        """return number of segment for a given block"""
+        return self.header['nb_segment'][block_index]
+
+    def signal_channels_count(self):
+        """Return the number of signal channel.
+        Same allong all block and Segment.
+        """
+        return len(self.header['signal_channels'])
+
+    def unit_channels_count(self):
+        """Return the number of unit (aka spike) channel.
+        Same allong all block and Segment.
+        """
+        return len(self.header['unit_channels'])
+
+    def event_channels_count(self):
+        """Return the number of event/epoch channel.
+        Same allong all block and Segment.
+        """
+        return len(self.header['event_channels'])
+
+    def segment_t_start(self, block_index, seg_index):
+        """Global t_start of a Segment in s. shared by all objects except
+        for AnalogSignal.
+        """
+        return self._segment_t_start(block_index, seg_index)
+
+    def segment_t_stop(self, block_index, seg_index):
+        """Global t_start of a Segment in s. shared by all objects except
+        for AnalogSignal.
+        """
+        return self._segment_t_stop(block_index, seg_index)
+
+    ###
+    # signal and channel zone
+
+    def _group_signal_channel_characteristics(self):
+        """
+        Usefull for few IOs (TdtrawIO, NeuroExplorerRawIO, ...).
+
+        Group signals channels by same characteristics:
+          * sampling_rate (global along block and segment)
+          * group_id (explicite channel group)
+
+        If all channels have the same characteristics them
+        `get_analogsignal_chunk` can be call wihtout restriction.
+        If not then **channel_indexes** must be specified
+        in `get_analogsignal_chunk` and only channels with same
+        caracteristics can be read at the same time.
+
+        This is usefull for some IO  than
+        have internally several signals channels familly.
+
+        For many RawIO all channels have the same
+        sampling_rate/size/t_start. In that cases, internal flag
+        **self._several_channel_groups will be set to False, so
+        `get_analogsignal_chunk(..)` won't suffer in performance.
+
+        Note that at neo.io level this have an impact on
+        `signal_group_mode`. 'split-all'  will work in any situation
+        But grouping channel in the same AnalogSignal
+        with 'group-by-XXX' will depend on common characteristics
+        of course.
+
+        """
+
+        characteristics = self.header['signal_channels'][_common_sig_characteristics]
+        unique_characteristics = np.unique(characteristics)
+        if len(unique_characteristics) == 1:
+            self._several_channel_groups = False
+        else:
+            self._several_channel_groups = True
+
+    def _check_common_characteristics(self, channel_indexes):
+        """
+        Usefull for few IOs (TdtrawIO, NeuroExplorerRawIO, ...).
+
+        Check is a set a signal channel_indexes share common
+        characteristics (**sampling_rate/t_start/size**)
+        Usefull only when RawIO propose differents channels groups
+        with differents sampling_rate for instance.
+        """
+        # ~ print('_check_common_characteristics', channel_indexes)
+
+        assert channel_indexes is not None, \
+            'You must specify channel_indexes'
+        characteristics = self.header['signal_channels'][_common_sig_characteristics]
+        # ~ print(characteristics[channel_indexes])
+        assert np.unique(characteristics[channel_indexes]).size == 1, \
+            'This channel set have differents characteristics'
+
+    def get_group_channel_indexes(self):
+        """
+        Usefull for few IOs (TdtrawIO, NeuroExplorerRawIO, ...).
+
+        Return a list of channel_indexes than have same characteristics
+        """
+        if self._several_channel_groups:
+            characteristics = self.header['signal_channels'][_common_sig_characteristics]
+            unique_characteristics = np.unique(characteristics)
+            channel_indexes_list = []
+            for e in unique_characteristics:
+                channel_indexes, = np.nonzero(characteristics == e)
+                channel_indexes_list.append(channel_indexes)
+            return channel_indexes_list
+        else:
+            return [None]
+
+    def channel_name_to_index(self, channel_names):
+        """
+        Transform channel_names to channel_indexes.
+        Based on self.header['signal_channels']
+        """
+        ch = self.header['signal_channels']
+        channel_indexes, = np.nonzero(np.in1d(ch['name'], channel_names))
+        assert len(channel_indexes) == len(channel_names), 'not match'
+        return channel_indexes
+
+    def channel_id_to_index(self, channel_ids):
+        """
+        Transform channel_ids to channel_indexes.
+        Based on self.header['signal_channels']
+        """
+        ch = self.header['signal_channels']
+        channel_indexes, = np.nonzero(np.in1d(ch['id'], channel_ids))
+        assert len(channel_indexes) == len(channel_ids), 'not match'
+        return channel_indexes
+
+    def _get_channel_indexes(self, channel_indexes, channel_names, channel_ids):
+        """
+        select channel_indexes from channel_indexes/channel_names/channel_ids
+        depending which is not None
+        """
+        if channel_indexes is None and channel_names is not None:
+            channel_indexes = self.channel_name_to_index(channel_names)
+
+        if channel_indexes is None and channel_ids is not None:
+            channel_indexes = self.channel_id_to_index(channel_ids)
+
+        return channel_indexes
+
+    def get_signal_size(self, block_index, seg_index, channel_indexes=None):
+        if self._several_channel_groups:
+            self._check_common_characteristics(channel_indexes)
+        return self._get_signal_size(block_index, seg_index, channel_indexes)
+
+    def get_signal_t_start(self, block_index, seg_index, channel_indexes=None):
+        if self._several_channel_groups:
+            self._check_common_characteristics(channel_indexes)
+        return self._get_signal_t_start(block_index, seg_index, channel_indexes)
+
+    def get_signal_sampling_rate(self, channel_indexes=None):
+        if self._several_channel_groups:
+            self._check_common_characteristics(channel_indexes)
+            chan_index0 = channel_indexes[0]
+        else:
+            chan_index0 = 0
+        sr = self.header['signal_channels'][chan_index0]['sampling_rate']
+        return float(sr)
+
+    def get_analogsignal_chunk(self, block_index=0, seg_index=0, i_start=None, i_stop=None,
+                               channel_indexes=None, channel_names=None, channel_ids=None):
+        """
+        Return a chunk of raw signal.
+        """
+        channel_indexes = self._get_channel_indexes(channel_indexes, channel_names, channel_ids)
+        if self._several_channel_groups:
+            self._check_common_characteristics(channel_indexes)
+
+        raw_chunk = self._get_analogsignal_chunk(
+            block_index, seg_index, i_start, i_stop, channel_indexes)
+
+        return raw_chunk
+
+    def rescale_signal_raw_to_float(self, raw_signal, dtype='float32',
+                                    channel_indexes=None, channel_names=None, channel_ids=None):
+
+        channel_indexes = self._get_channel_indexes(channel_indexes, channel_names, channel_ids)
+        if channel_indexes is None:
+            channel_indexes = slice(None)
+
+        channels = self.header['signal_channels'][channel_indexes]
+
+        float_signal = raw_signal.astype(dtype)
+
+        if np.any(channels['gain'] != 1.):
+            float_signal *= channels['gain']
+
+        if np.any(channels['offset'] != 0.):
+            float_signal += channels['offset']
+
+        return float_signal
+
+    # spiketrain and unit zone
+    def spike_count(self, block_index=0, seg_index=0, unit_index=0):
+        return self._spike_count(block_index, seg_index, unit_index)
+
+    def get_spike_timestamps(self, block_index=0, seg_index=0, unit_index=0,
+                             t_start=None, t_stop=None):
+        """
+        The timestamp is as close to the format itself. Sometimes float/int32/int64.
+        Sometimes it is the index on the signal but not always.
+        The conversion to second or index_on_signal is done outside here.
+
+        t_start/t_sop are limits in seconds.
+
+        """
+        timestamp = self._get_spike_timestamps(block_index, seg_index, unit_index, t_start, t_stop)
+        return timestamp
+
+    def rescale_spike_timestamp(self, spike_timestamps, dtype='float64'):
+        """
+        Rescale spike timestamps to second
+        """
+        return self._rescale_spike_timestamp(spike_timestamps, dtype)
+
+    # spiketrain waveform zone
+    def get_spike_raw_waveforms(self, block_index=0, seg_index=0, unit_index=0,
+                                t_start=None, t_stop=None):
+        wf = self._get_spike_raw_waveforms(block_index, seg_index, unit_index, t_start, t_stop)
+        return wf
+
+    def rescale_waveforms_to_float(self, raw_waveforms, dtype='float32', unit_index=0):
+        wf_gain = self.header['unit_channels']['wf_gain'][unit_index]
+        wf_offset = self.header['unit_channels']['wf_offset'][unit_index]
+
+        float_waveforms = raw_waveforms.astype(dtype)
+
+        if wf_gain != 1.:
+            float_waveforms *= wf_gain
+        if wf_offset != 0.:
+            float_waveforms += wf_offset
+
+        return float_waveforms
+
+    # event and epoch zone
+    def event_count(self, block_index=0, seg_index=0, event_channel_index=0):
+        return self._event_count(block_index, seg_index, event_channel_index)
+
+    def get_event_timestamps(self, block_index=0, seg_index=0, event_channel_index=0,
+                             t_start=None, t_stop=None):
+        """
+        The timestamp is as close to the format itself. Sometimes float/int32/int64.
+        Sometimes it is the index on the signal but not always.
+        The conversion to second or index_on_signal is done outside here.
+
+        t_start/t_sop are limits in seconds.
+
+        returns
+            timestamp
+            labels
+            durations
+
+        """
+        timestamp, durations, labels = self._get_event_timestamps(
+            block_index, seg_index, event_channel_index, t_start, t_stop)
+        return timestamp, durations, labels
+
+    def rescale_event_timestamp(self, event_timestamps, dtype='float64'):
+        """
+        Rescale event timestamps to s
+        """
+        return self._rescale_event_timestamp(event_timestamps, dtype)
+
+    def rescale_epoch_duration(self, raw_duration, dtype='float64'):
+        """
+        Rescale epoch raw duration to s
+        """
+        return self._rescale_epoch_duration(raw_duration, dtype)
+
+    def setup_cache(self, cache_path, **init_kargs):
+        if self.rawmode in ('one-file', 'multi-file'):
+            ressource_name = self.filename
+        elif self.rawmode == 'one-dir':
+            ressource_name = self.dirname
+        else:
+            raise (NotImlementedError)
+
+        if cache_path == 'home':
+            if sys.platform.startswith('win'):
+                dirname = os.path.join(os.environ['APPDATA'], 'neo_rawio_cache')
+            elif sys.platform.startswith('darwin'):
+                dirname = '~/Library/Application Support/neo_rawio_cache'
+            else:
+                dirname = os.path.expanduser('~/.config/neo_rawio_cache')
+            dirname = os.path.join(dirname, self.__class__.__name__)
+
+            if not os.path.exists(dirname):
+                os.makedirs(dirname)
+        elif cache_path == 'same_as_resource':
+            dirname = os.path.dirname(ressource_name)
+        else:
+            assert os.path.exists(cache_path), \
+                'cache_path do not exists use "home" or "same_as_file" to make this auto'
+
+        # the hash of the ressource (dir of file) is done with filename+datetime
+        # TODO make something more sofisticated when rawmode='one-dir' that use all filename and datetime
+        d = dict(ressource_name=ressource_name, mtime=os.path.getmtime(ressource_name))
+        hash = joblib.hash(d, hash_name='md5')
+
+        # name is compund by the real_n,ame and the hash
+        name = '{}_{}'.format(os.path.basename(ressource_name), hash)
+        self.cache_filename = os.path.join(dirname, name)
+
+        if os.path.exists(self.cache_filename):
+            self.logger.warning('Use existing cache file {}'.format(self.cache_filename))
+            self._cache = joblib.load(self.cache_filename)
+        else:
+            self.logger.warning('Create cache file {}'.format(self.cache_filename))
+            self._cache = {}
+            self.dump_cache()
+
+    def add_in_cache(self, **kargs):
+        assert self.use_cache
+        self._cache.update(kargs)
+        self.dump_cache()
+
+    def dump_cache(self):
+        assert self.use_cache
+        joblib.dump(self._cache, self.cache_filename)
+
+    ##################
+
+    # Functions to be implement in IO below here
+
+    def _parse_header(self):
+        raise (NotImplementedError)
+        # must call
+        # self._generate_empty_annotations()
+
+    def _source_name(self):
+        raise (NotImplementedError)
+
+    def _segment_t_start(self, block_index, seg_index):
+        raise (NotImplementedError)
+
+    def _segment_t_stop(self, block_index, seg_index):
+        raise (NotImplementedError)
+
+    ###
+    # signal and channel zone
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        raise (NotImplementedError)
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        raise (NotImplementedError)
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        raise (NotImplementedError)
+
+    ###
+    # spiketrain and unit zone
+    def _spike_count(self, block_index, seg_index, unit_index):
+        raise (NotImplementedError)
+
+    def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
+        raise (NotImplementedError)
+
+    def _rescale_spike_timestamp(self, spike_timestamps, dtype):
+        raise (NotImplementedError)
+
+    ###
+    # spike waveforms zone
+    def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
+        raise (NotImplementedError)
+
+    ###
+    # event and epoch zone
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        raise (NotImplementedError)
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        raise (NotImplementedError)
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        raise (NotImplementedError)
+
+    def _rescale_epoch_duration(self, raw_duration, dtype):
+        raise (NotImplementedError)

+ 375 - 0
code/python-neo/neo/rawio/bci2000rawio.py

@@ -0,0 +1,375 @@
+# -*- coding: utf-8 -*-
+"""
+BCI2000RawIO is a class to read BCI2000 .dat files.
+https://www.bci2000.org/mediawiki/index.php/Technical_Reference:BCI2000_File_Format
+"""
+from __future__ import print_function, division, absolute_import  # unicode_literals
+
+from .baserawio import BaseRawIO, _signal_channel_dtype, _unit_channel_dtype, _event_channel_dtype
+
+import numpy as np
+import re
+
+try:
+    from urllib.parse import unquote
+except ImportError:
+    from urllib import url2pathname as unquote
+
+
+class BCI2000RawIO(BaseRawIO):
+    """
+    Class for reading data from a BCI2000 .dat file, either version 1.0 or 1.1
+    """
+    extensions = ['dat']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+        self._my_events = None
+
+    def _source_name(self):
+        return self.filename
+
+    def _parse_header(self):
+        file_info, state_defs, param_defs = parse_bci2000_header(self.filename)
+
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [1]
+
+        sig_channels = []
+        for chan_ix in range(file_info['SourceCh']):
+            ch_name = param_defs['ChannelNames']['value'][chan_ix] \
+                if 'ChannelNames' in param_defs else 'ch' + str(chan_ix)
+            chan_id = chan_ix + 1
+            sr = param_defs['SamplingRate']['value']  # Hz
+            dtype = file_info['DataFormat']
+            units = 'uV'
+            gain = param_defs['SourceChGain']['value'][chan_ix]
+            offset = param_defs['SourceChOffset']['value'][chan_ix]
+            group_id = 0
+            sig_channels.append((ch_name, chan_id, sr, dtype, units, gain, offset, group_id))
+        self.header['signal_channels'] = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        self.header['unit_channels'] = np.array([], dtype=_unit_channel_dtype)
+
+        # creating event channel for each state variable
+        event_channels = []
+        for st_ix, st_tup in enumerate(state_defs):
+            event_channels.append((st_tup[0], 'ev_' + str(st_ix), 'event'))
+        self.header['event_channels'] = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # Add annotations.
+
+        # Generates basic annotations in nested dict self.raw_annotations
+        self._generate_minimal_annotations()
+
+        self.raw_annotations['blocks'][0].update({
+            'file_info': file_info,
+            'param_defs': param_defs
+        })
+        for ev_ix, ev_dict in enumerate(self.raw_annotations['event_channels']):
+            ev_dict.update({
+                'length': state_defs[ev_ix][1],
+                'startVal': state_defs[ev_ix][2],
+                'bytePos': state_defs[ev_ix][3],
+                'bitPos': state_defs[ev_ix][4]
+            })
+
+        import time
+        time_formats = ['%a %b %d %H:%M:%S %Y', '%Y-%m-%dT%H:%M:%S']
+        try:
+            self._global_time = time.mktime(time.strptime(param_defs['StorageTime']['value'],
+                                                          time_formats[0]))
+        except:
+            self._global_time = time.mktime(time.strptime(param_defs['StorageTime']['value'],
+                                                          time_formats[1]))
+
+        # Save variables to make it easier to load the binary data.
+        self._read_info = {
+            'header_len': file_info['HeaderLen'],
+            'n_chans': file_info['SourceCh'],
+            'sample_dtype': {
+                'int16': np.int16,
+                'int32': np.int32,
+                'float32': np.float32}.get(file_info['DataFormat']),
+            'state_vec_len': file_info['StatevectorLen'],
+            'sampling_rate': param_defs['SamplingRate']['value']
+        }
+        # Calculate the dtype for a single timestamp of data. This contains the data + statevector
+        self._read_info['line_dtype'] = [
+            ('raw_vector', self._read_info['sample_dtype'], self._read_info['n_chans']),
+            ('state_vector', np.uint8, self._read_info['state_vec_len'])]
+        import os
+        self._read_info['n_samps'] = int((os.stat(self.filename).st_size - file_info['HeaderLen'])
+                                         / np.dtype(self._read_info['line_dtype']).itemsize)
+
+        # memmap is fast so we can get the data ready for reading now.
+        self._memmap = np.memmap(self.filename, dtype=self._read_info['line_dtype'],
+                                 offset=self._read_info['header_len'], mode='r')
+
+    def _segment_t_start(self, block_index, seg_index):
+        return 0.
+
+    def _segment_t_stop(self, block_index, seg_index):
+        return self._read_info['n_samps'] / self._read_info['sampling_rate']
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes=None):
+        return self._read_info['n_samps']
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return 0.
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if i_start is None:
+            i_start = 0
+        if i_stop is None:
+            i_stop = self._read_info['n_samps']
+        assert (0 <= i_start <= self._read_info['n_samps']), "i_start outside data range"
+        assert (0 <= i_stop <= self._read_info['n_samps']), "i_stop outside data range"
+        if channel_indexes is None:
+            channel_indexes = np.arange(self.header['signal_channels'].size)
+        return self._memmap['raw_vector'][i_start:i_stop, channel_indexes]
+
+    def _spike_count(self, block_index, seg_index, unit_index):
+        return 0
+
+    def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
+        return None
+
+    def _rescale_spike_timestamp(self, spike_timestamps, dtype):
+        return None
+
+    def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
+        return None
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        return self._event_arrays_list[event_channel_index][0].shape[0]
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        # Return 3 numpy arrays: timestamp, durations, labels
+        # durations must be None for 'event'
+        # label must a dtype ='U'
+        ts, dur, labels = self._event_arrays_list[event_channel_index]
+        # seg_t_start = self._segment_t_start(block_index, seg_index)
+        keep = np.ones(ts.shape, dtype=np.bool)
+        if t_start is not None:
+            keep = np.logical_and(keep, ts >= t_start)
+        if t_stop is not None:
+            keep = np.logical_and(keep, ts <= t_stop)
+        return ts[keep], dur[keep], labels[keep]
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        event_times = (event_timestamps / float(self._read_info['sampling_rate'])).astype(dtype)
+        return event_times
+
+    def _rescale_epoch_duration(self, raw_duration, dtype):
+        durations = (raw_duration / float(self._read_info['sampling_rate'])).astype(dtype)
+        return durations
+
+    @property
+    def _event_arrays_list(self):
+        if self._my_events is None:
+            self._my_events = []
+            for s_ix, sd in enumerate(self.raw_annotations['event_channels']):
+                ev_times = durs = vals = np.array([])
+                # Skip these big but mostly useless (?) states.
+                if sd['name'] not in ['SourceTime', 'StimulusTime']:
+                    # Determine which bytes of self._memmap['state_vector'] are needed.
+                    nbytes = int(np.ceil((sd['bitPos'] + sd['length']) / 8))
+                    byte_slice = slice(sd['bytePos'], sd['bytePos'] + nbytes)
+                    # Then determine how to mask those bytes to get only the needed bits.
+                    bit_mask = np.array([255] * nbytes, dtype=np.uint8)
+                    bit_mask[0] &= 255 & (255 << sd['bitPos'])  # Fix the mask for the first byte
+                    extra_bits = 8 - (sd['bitPos'] + sd['length']) % 8
+                    bit_mask[-1] &= 255 & (255 >> extra_bits)  # Fix the mask for the last byte
+                    # When converting to an int, we need to know which integer type it will become
+                    n_max_bytes = 1 << (nbytes - 1).bit_length()
+                    view_type = {1: np.int8, 2: np.int16,
+                        4: np.int32, 8: np.int64}.get(n_max_bytes)
+                    # Slice and mask the data
+                    masked_byte_array = self._memmap['state_vector'][:, byte_slice] & bit_mask
+                    # Convert byte array to a vector of ints:
+                    # pad to give even columns then view as larger int type
+                    state_vec = np.pad(masked_byte_array,
+                                       (0, n_max_bytes - nbytes),
+                                       'constant').view(dtype=view_type)
+                    state_vec = np.right_shift(state_vec, sd['bitPos'])[:, 0]
+
+                    # In the state vector, find 'events' whenever the state changes
+                    st_ch_ix = np.where(np.hstack((0, np.diff(state_vec))) != 0)[0]  # event inds
+                    if len(st_ch_ix) > 0:
+                        ev_times = st_ch_ix
+                        durs = np.asarray([None] * len(st_ch_ix))
+                        # np.hstack((np.diff(st_ch_ix), len(state_vec) - st_ch_ix[-1]))
+                        vals = np.char.mod('%d', state_vec[st_ch_ix])  # event val, string'd
+
+                self._my_events.append([ev_times, durs, vals])
+
+        return self._my_events
+
+
+def parse_bci2000_header(filename):
+    # typically we want parameter values in Hz, seconds, or microvolts.
+    scales_dict = {
+        'hz': 1, 'khz': 1000, 'mhz': 1000000,
+        'uv': 1, 'muv': 1, 'mv': 1000, 'v': 1000000,
+        's': 1, 'us': 0.000001, 'mus': 0.000001, 'ms': 0.001, 'min': 60,
+        'sec': 1, 'usec': 0.000001, 'musec': 0.000001, 'msec': 0.001
+    }
+
+    def rescale_value(param_val, data_type):
+        unit_str = ''
+        if param_val.lower().startswith('0x'):
+            param_val = int(param_val, 16)
+        elif data_type in ['int', 'float']:
+            matches = re.match(r'(-*\d+)(\w*)', param_val)
+            if matches is not None:  # Can be None for % in def, min, max vals
+                param_val, unit_str = matches.group(1), matches.group(2)
+                param_val = int(param_val) if data_type == 'int' else float(param_val)
+                if len(unit_str) > 0:
+                    param_val *= scales_dict.get(unit_str.lower(), 1)
+        else:
+            param_val = unquote(param_val)
+        return param_val, unit_str
+
+    def parse_dimensions(param_list):
+        num_els = param_list.pop(0)
+        # Sometimes the number of elements isn't given,
+        # but the list of element labels is wrapped with {}
+        if num_els == '{':
+            num_els = param_list.index('}')
+            el_labels = [unquote(param_list.pop(0)) for x in range(num_els)]
+            param_list.pop(0)  # Remove the '}'
+        else:
+            num_els = int(num_els)
+            el_labels = [str(ix) for ix in range(num_els)]
+        return num_els, el_labels
+
+    import io
+    with io.open(filename, 'rb') as fid:
+
+        # Parse the file header (plain text)
+
+        # The first line contains basic information which we store in a dictionary.
+        temp = fid.readline().decode('utf8').split()
+        keys = [k.rstrip('=') for k in temp[::2]]
+        vals = temp[1::2]
+        # Insert default version and format
+        file_info = {'BCI2000V': 1.0, 'DataFormat': 'int16'}
+        file_info.update(**dict(zip(keys, vals)))
+        # From string to float/int
+        file_info['BCI2000V'] = float(file_info['BCI2000V'])
+        for k in ['HeaderLen', 'SourceCh', 'StatevectorLen']:
+            if k in file_info:
+                file_info[k] = int(file_info[k])
+
+        # The next lines contain state vector definitions.
+        temp = fid.readline().decode('utf8').strip()
+        assert temp == '[ State Vector Definition ]', \
+            "State definitions not found in header %s" % filename
+        state_defs = []
+        state_def_dtype = [('name', 'a64'),
+                           ('length', int),
+                           ('startVal', int),
+                           ('bytePos', int),
+                           ('bitPos', int)]
+        while True:
+            temp = fid.readline().decode('utf8').strip()
+            if len(temp) == 0 or temp[0] == '[':
+                # Presence of '[' signifies new section.
+                break
+            temp = temp.split()
+            state_defs.append((temp[0], int(temp[1]), int(temp[2]), int(temp[3]), int(temp[4])))
+        state_defs = np.array(state_defs, dtype=state_def_dtype)
+
+        # The next lines contain parameter definitions.
+        # There are many, and their formatting can be complicated.
+        assert temp == '[ Parameter Definition ]', \
+            "Parameter definitions not found in header %s" % filename
+        param_defs = {}
+        while True:
+            temp = fid.readline().decode('utf8')
+            if fid.tell() >= file_info['HeaderLen']:
+                # End of header.
+                break
+            if len(temp.strip()) == 0:
+                continue  # Skip empty lines
+            # Everything after the '//' is a comment.
+            temp = temp.strip().split('//', 1)
+            param_def = {'comment': temp[1].strip() if len(temp) > 1 else ''}
+            # Parse the parameter definition. Generally it is sec:cat:name dtype name param_value+
+            temp = temp[0].split()
+            param_def.update(
+                {'section_category_name': [unquote(x) for x in temp.pop(0).split(':')]})
+            dtype = temp.pop(0)
+            param_name = unquote(temp.pop(0).rstrip('='))
+            # Parse the rest. Parse method depends on the dtype
+            param_value, units = None, None
+            if dtype in ('int', 'float'):
+                param_value = temp.pop(0)
+                if param_value == 'auto':
+                    param_value = np.nan
+                    units = ''
+                else:
+                    param_value, units = rescale_value(param_value, dtype)
+            elif dtype in ('string', 'variant'):
+                param_value = unquote(temp.pop(0))
+            elif dtype.endswith('list'):  # e.g., intlist, stringlist, floatlist, list
+                dtype = dtype[:-4]
+                # The list parameter values will begin with either
+                # an int to specify the number of elements
+                # or a list of labels surrounded by { }.
+                num_elements, element_labels = parse_dimensions(temp)  # This will pop off info.
+                param_def.update({'element_labels': element_labels})
+                pv_un = [rescale_value(pv, dtype) for pv in temp[:num_elements]]
+                if len(pv_un) > 0:
+                    param_value, units = zip(*pv_un)
+                else:
+                    param_value, units = np.nan, ''
+                temp = temp[num_elements:]
+                # Sometimes an element list will be a list of ints even though
+                # the element_type is '' (str)...
+                # This usually happens for known parameters, such as SourceChOffset,
+                # that can be dealt with explicitly later.
+            elif dtype.endswith('matrix'):
+                dtype = dtype[:-6]
+                # The parameter values will be preceded by two dimension descriptors,
+                # first rows then columns. Each dimension might be described by an
+                # int or a list of labels surrounded by {}
+                n_rows, row_labels = parse_dimensions(temp)
+                n_cols, col_labels = parse_dimensions(temp)
+                param_def.update({'row_labels': row_labels, 'col_labels': col_labels})
+
+                param_value = []
+                units = []
+                for row_ix in range(n_rows):
+                    cols = []
+                    for col_ix in range(n_cols):
+                        col_val, _units = rescale_value(temp[row_ix * n_cols + col_ix], dtype)
+                        cols.append(col_val)
+                        units.append(_units)
+                    param_value.append(cols)
+                temp = temp[n_rows * n_cols:]
+
+            param_def.update({
+                'value': param_value,
+                'units': units,
+                'dtype': dtype
+            })
+
+            # At the end of the parameter definition, we might get
+            # default, min, max values for the parameter.
+            temp.reverse()
+            if len(temp):
+                param_def.update({'max_val': rescale_value(temp.pop(0), dtype)})
+            if len(temp):
+                param_def.update({'min_val': rescale_value(temp.pop(0), dtype)})
+            if len(temp):
+                param_def.update({'default_val': rescale_value(temp.pop(0), dtype)})
+
+            param_defs.update({param_name: param_def})
+            # End parameter block
+    # Outdent to close file
+    return file_info, state_defs, param_defs

File diff suppressed because it is too large
+ 1925 - 0
code/python-neo/neo/rawio/blackrockrawio.py


+ 199 - 0
code/python-neo/neo/rawio/brainvisionrawio.py

@@ -0,0 +1,199 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading data from BrainVision product.
+
+This code was originally made by L. Pezard (2010), modified B. Burle and
+S. More.
+
+Author: Samuel Garcia
+"""
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+
+import datetime
+import os
+import re
+import io
+
+
+class BrainVisionRawIO(BaseRawIO):
+    """
+
+    """
+    extensions = ['vhdr']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+    def _parse_header(self):
+        # Read header file (vhdr)
+        vhdr_header = read_brainvsion_soup(self.filename)
+
+        bname = os.path.basename(self.filename)
+        marker_filename = self.filename.replace(bname, vhdr_header['Common Infos']['MarkerFile'])
+        binary_filename = self.filename.replace(bname, vhdr_header['Common Infos']['DataFile'])
+
+        assert vhdr_header['Common Infos'][
+            'DataFormat'] == 'BINARY', NotImplementedError
+        assert vhdr_header['Common Infos'][
+            'DataOrientation'] == 'MULTIPLEXED', NotImplementedError
+
+        nb_channel = int(vhdr_header['Common Infos']['NumberOfChannels'])
+        sr = 1.e6 / float(vhdr_header['Common Infos']['SamplingInterval'])
+        self._sampling_rate = sr
+
+        fmt = vhdr_header['Binary Infos']['BinaryFormat']
+        fmts = {'INT_16': np.int16, 'INT_32': np.int32, 'IEEE_FLOAT_32': np.float32, }
+
+        assert fmt in fmts, NotImplementedError
+        sig_dtype = fmts[fmt]
+
+        # raw signals memmap
+        sigs = np.memmap(binary_filename, dtype=sig_dtype, mode='r', offset=0)
+        if sigs.size % nb_channel != 0:
+            sigs = sigs[:-sigs.size % nb_channel]
+        self._raw_signals = sigs.reshape(-1, nb_channel)
+
+        sig_channels = []
+        for c in range(nb_channel):
+            name, ref, res, units = vhdr_header['Channel Infos'][
+                'Ch%d' % (c + 1,)].split(',')
+            units = units.replace('µ', 'u')
+            chan_id = c + 1
+            if sig_dtype == np.int16 or sig_dtype == np.int32:
+                gain = float(res)
+            else:
+                gain = 1
+            offset = 0
+            group_id = 0
+            sig_channels.append((name, chan_id, self._sampling_rate, sig_dtype,
+                                 units, gain, offset, group_id))
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        # No spikes
+        unit_channels = []
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # read all markers in memory
+
+        all_info = read_brainvsion_soup(marker_filename)['Marker Infos']
+        ev_types = []
+        ev_timestamps = []
+        ev_labels = []
+        for i in range(len(all_info)):
+            ev_type, ev_label, pos, size, channel = all_info[
+                'Mk%d' % (i + 1,)].split(',')[:5]
+            ev_types.append(ev_type)
+            ev_timestamps.append(int(pos))
+            ev_labels.append(ev_label)
+        ev_types = np.array(ev_types)
+        ev_timestamps = np.array(ev_timestamps)
+        ev_labels = np.array(ev_labels, dtype='U')
+
+        # group them by types
+        self._raw_events = []
+        event_channels = []
+        for c, ev_type in enumerate(np.unique(ev_types)):
+            ind = (ev_types == ev_type)
+            event_channels.append((ev_type, '', 'event'))
+
+            self._raw_events.append((ev_timestamps[ind], ev_labels[ind]))
+
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [1]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        self._generate_minimal_annotations()
+        for c in range(sig_channels.size):
+            coords = vhdr_header['Coordinates']['Ch{}'.format(c + 1)]
+            coords = [float(v) for v in coords.split(',')]
+            if coords[0] > 0.:
+                # if radius is 0 we do not have coordinates.
+                self.raw_annotations['signal_channels'][c]['coordinates'] = coords
+
+    def _source_name(self):
+        return self.filename
+
+    def _segment_t_start(self, block_index, seg_index):
+        return 0.
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = self._raw_signals.shape[0] / self._sampling_rate
+        return t_stop
+
+    ###
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        return self._raw_signals.shape[0]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return 0.
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if channel_indexes is None:
+            channel_indexes = slice(None)
+        raw_signals = self._raw_signals[slice(i_start, i_stop), channel_indexes]
+        return raw_signals
+
+    ###
+    def _spike_count(self, block_index, seg_index, unit_index):
+        return 0
+
+    ###
+    # event and epoch zone
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        all_timestamps, all_label = self._raw_events[event_channel_index]
+        return all_timestamps.size
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        timestamps, labels = self._raw_events[event_channel_index]
+
+        if t_start is not None:
+            keep = timestamps >= int(t_start * self._sampling_rate)
+            timestamps = timestamps[keep]
+            labels = labels[keep]
+
+        if t_stop is not None:
+            keep = timestamps <= int(t_stop * self._sampling_rate)
+            timestamps = timestamps[keep]
+            labels = labels[keep]
+
+        durations = None
+
+        return timestamps, durations, labels
+
+        raise (NotImplementedError)
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        event_times = event_timestamps.astype(dtype) / self._sampling_rate
+        return event_times
+
+
+def read_brainvsion_soup(filename):
+    with io.open(filename, 'r', encoding='utf8') as f:
+        section = None
+        all_info = {}
+        for line in f:
+            line = line.strip('\n').strip('\r')
+            if line.startswith('['):
+                section = re.findall(r'\[([\S ]+)\]', line)[0]
+                all_info[section] = {}
+                continue
+            if line.startswith(';'):
+                continue
+            if '=' in line and len(line.split('=')) == 2:
+                k, v = line.split('=')
+                all_info[section][k] = v
+
+    return all_info

+ 233 - 0
code/python-neo/neo/rawio/elanrawio.py

@@ -0,0 +1,233 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading data from Elan.
+
+Elan is software for studying time-frequency maps of EEG data.
+
+Elan is developed in Lyon, France, at INSERM U821
+
+https://elan.lyon.inserm.fr
+
+An Elan dataset is separated into 3 files :
+ - .eeg          raw data file
+ - .eeg.ent      hearder file
+ - .eeg.pos      event file
+
+Author: Samuel Garcia
+
+"""
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+
+import datetime
+import os
+import re
+import io
+
+
+class ElanRawIO(BaseRawIO):
+    extensions = ['eeg']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+    def _parse_header(self):
+
+        with io.open(self.filename + '.ent', mode='rt', encoding='ascii', newline=None) as f:
+
+            # version
+            version = f.readline()[:-1]
+            assert version in ['V2', 'V3'], 'Read only V2 or V3 .eeg.ent files. %s given' % version
+
+            # info
+            info1 = f.readline()[:-1]
+            info2 = f.readline()[:-1]
+
+            # strange 2 line for datetime
+            # line1
+            l = f.readline()
+            r1 = re.findall(r'(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
+            r2 = re.findall(r'(\d+):(\d+):(\d+)', l)
+            r3 = re.findall(r'(\d+)-(\d+)-(\d+)', l)
+            YY, MM, DD, hh, mm, ss = (None,) * 6
+            if len(r1) != 0:
+                DD, MM, YY, hh, mm, ss = r1[0]
+            elif len(r2) != 0:
+                hh, mm, ss = r2[0]
+            elif len(r3) != 0:
+                DD, MM, YY = r3[0]
+
+            # line2
+            l = f.readline()
+            r1 = re.findall(r'(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', l)
+            r2 = re.findall(r'(\d+):(\d+):(\d+)', l)
+            r3 = re.findall(r'(\d+)-(\d+)-(\d+)', l)
+            if len(r1) != 0:
+                DD, MM, YY, hh, mm, ss = r1[0]
+            elif len(r2) != 0:
+                hh, mm, ss = r2[0]
+            elif len(r3) != 0:
+                DD, MM, YY = r3[0]
+            try:
+                fulldatetime = datetime.datetime(int(YY), int(MM), int(DD),
+                                                 int(hh), int(mm), int(ss))
+            except:
+                fulldatetime = None
+
+            l = f.readline()
+            l = f.readline()
+            l = f.readline()
+
+            # sampling rate sample
+            l = f.readline()
+            self._sampling_rate = 1. / float(l)
+
+            # nb channel
+            l = f.readline()
+            nb_channel = int(l) - 2
+
+            channel_infos = [{} for c in range(nb_channel + 2)]
+            # channel label
+            for c in range(nb_channel + 2):
+                channel_infos[c]['label'] = f.readline()[:-1]
+            # channel kind
+            for c in range(nb_channel + 2):
+                channel_infos[c]['kind'] = f.readline()[:-1]
+            # channel unit
+            for c in range(nb_channel + 2):
+                channel_infos[c]['units'] = f.readline()[:-1]
+            # range for gain and offset
+            for c in range(nb_channel + 2):
+                channel_infos[c]['min_physic'] = float(f.readline()[:-1])
+            for c in range(nb_channel + 2):
+                channel_infos[c]['max_physic'] = float(f.readline()[:-1])
+            for c in range(nb_channel + 2):
+                channel_infos[c]['min_logic'] = float(f.readline()[:-1])
+            for c in range(nb_channel + 2):
+                channel_infos[c]['max_logic'] = float(f.readline()[:-1])
+
+            # info filter
+            info_filter = []
+            for c in range(nb_channel + 2):
+                channel_infos[c]['info_filter'] = f.readline()[:-1]
+
+        n = int(round(np.log(channel_infos[0]['max_logic'] -
+                             channel_infos[0]['min_logic']) / np.log(2)) / 8)
+        sig_dtype = np.dtype('>i' + str(n))
+
+        sig_channels = []
+        for c, chan_info in enumerate(channel_infos[:-2]):
+            chan_name = chan_info['label']
+            chan_id = c
+
+            gain = (chan_info['max_physic'] - chan_info['min_physic']) / \
+                   (chan_info['max_logic'] - chan_info['min_logic'])
+            offset = - chan_info['min_logic'] * gain + chan_info['min_physic']
+            gourp_id = 0
+            sig_channels.append((chan_name, chan_id, self._sampling_rate, sig_dtype,
+                                 chan_info['units'], gain, offset, gourp_id))
+
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        # raw data
+        self._raw_signals = np.memmap(self.filename, dtype=sig_dtype, mode='r',
+                                      offset=0).reshape(-1, nb_channel + 2)
+        self._raw_signals = self._raw_signals[:, :-2]
+
+        # triggers
+        with io.open(self.filename + '.pos', mode='rt', encoding='ascii', newline=None) as f:
+            self._raw_event_timestamps = []
+            self._event_labels = []
+            self._reject_codes = []
+            for l in f.readlines():
+                r = re.findall(r' *(\d+) *(\d+) *(\d+) *', l)
+                self._raw_event_timestamps.append(int(r[0][0]))
+                self._event_labels.append(str(r[0][1]))
+                self._reject_codes.append(str(r[0][2]))
+
+        self._raw_event_timestamps = np.array(self._raw_event_timestamps, dtype='int64')
+        self._event_labels = np.array(self._event_labels, dtype='U')
+        self._reject_codes = np.array(self._reject_codes, dtype='U')
+
+        event_channels = []
+        event_channels.append(('Trigger', '', 'event'))
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # No spikes
+        unit_channels = []
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [1]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # insert some annotation at some place
+        self._generate_minimal_annotations()
+        extra_info = dict(rec_datetime=fulldatetime, elan_version=version,
+                          info1=info1, info2=info2)
+        for obj_name in ('blocks', 'segments'):
+            self._raw_annotate(obj_name, **extra_info)
+        for c in range(nb_channel):
+            d = channel_infos[c]
+            self._raw_annotate('signals', chan_index=c, info_filter=d['info_filter'])
+            self._raw_annotate('signals', chan_index=c, kind=d['kind'])
+        self._raw_annotate('events', chan_index=0, reject_codes=self._reject_codes)
+
+    def _source_name(self):
+        return self.filename
+
+    def _segment_t_start(self, block_index, seg_index):
+        return 0.
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = self._raw_signals.shape[0] / self._sampling_rate
+        return t_stop
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes=None):
+        return self._raw_signals.shape[0]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes=None):
+        return 0.
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if channel_indexes is None:
+            channel_indexes = slice(None)
+        raw_signals = self._raw_signals[slice(i_start, i_stop), channel_indexes]
+        return raw_signals
+
+    def _spike_count(self, block_index, seg_index, unit_index):
+        return 0
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        return self._raw_event_timestamps.size
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        timestamp = self._raw_event_timestamps
+        labels = self._event_labels
+        durations = None
+
+        if t_start is not None:
+            keep = timestamp >= int(t_start * self._sampling_rate)
+            timestamp = timestamp[keep]
+            labels = labels[keep]
+
+        if t_stop is not None:
+            keep = timestamp <= int(t_stop * self._sampling_rate)
+            timestamp = timestamp[keep]
+            labels = labels[keep]
+
+        return timestamp, durations, labels
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        event_times = event_timestamps.astype(dtype) / self._sampling_rate
+        return event_times

+ 369 - 0
code/python-neo/neo/rawio/examplerawio.py

@@ -0,0 +1,369 @@
+# -*- coding: utf-8 -*-
+"""
+ExampleRawIO is a class of a  fake example.
+This is to be used when coding a new RawIO.
+
+
+Rules for creating a new class:
+  1. Step 1: Create the main class
+    * Create a file in **neo/rawio/** that endith with "rawio.py"
+    * Create the class that inherits BaseRawIO
+    * copy/paste all methods that need to be implemented.
+      See the end a neo.rawio.baserawio.BaseRawIO
+    * code hard! The main difficulty **is _parse_header()**.
+      In short you have a create a mandatory dict than
+      contains channel informations::
+
+            self.header = {}
+            self.header['nb_block'] = 2
+            self.header['nb_segment'] = [2, 3]
+            self.header['signal_channels'] = sig_channels
+            self.header['unit_channels'] = unit_channels
+            self.header['event_channels'] = event_channels
+
+  2. Step 2: RawIO test:
+    * create a file in neo/rawio/tests with the same name with "test_" prefix
+    * copy paste neo/rawio/tests/test_examplerawio.py and do the same
+
+  3. Step 3 : Create the neo.io class with the wrapper
+    * Create a file in neo/io/ that endith with "io.py"
+    * Create a that hinerits bot yrou RawIO class and BaseFromRaw class
+    * copy/paste from neo/io/exampleio.py
+
+  4.Step 4 : IO test
+    * create a file in neo/test/iotest with the same previous name with "test_" prefix
+    * copy/paste from neo/test/iotest/test_exampleio.py
+
+
+
+"""
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+
+
+class ExampleRawIO(BaseRawIO):
+    """
+    Class for "reading" fake data from an imaginary file.
+
+    For the user, it give acces to raw data (signals, event, spikes) as they
+    are in the (fake) file int16 and int64.
+
+    For a developer, it is just an example showing guidelines for someone who wants
+    to develop a new IO module.
+
+    Two rules for developers:
+      * Respect the Neo RawIO API (:ref:`_neo_rawio_API`)
+      * Follow :ref:`_io_guiline`
+
+    This fake IO:
+        * have 2 blocks
+        * blocks have 2 and 3 segments
+        * have 16 signal_channel sample_rate = 10000
+        * have 3 unit_channel
+        * have 2 event channel: one have *type=event*, the other have
+          *type=epoch*
+
+
+    Usage:
+        >>> import neo.rawio
+        >>> r = neo.rawio.ExampleRawIO(filename='itisafake.nof')
+        >>> r.parse_header()
+        >>> print(r)
+        >>> raw_chunk = r.get_analogsignal_chunk(block_index=0, seg_index=0,
+                            i_start=0, i_stop=1024,  channel_names=channel_names)
+        >>> float_chunk = reader.rescale_signal_raw_to_float(raw_chunk, dtype='float64',
+                            channel_indexes=[0, 3, 6])
+        >>> spike_timestamp = reader.spike_timestamps(unit_index=0, t_start=None, t_stop=None)
+        >>> spike_times = reader.rescale_spike_timestamp(spike_timestamp, 'float64')
+        >>> ev_timestamps, _, ev_labels = reader.event_timestamps(event_channel_index=0)
+
+    """
+    extensions = ['fake']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        # note that this filename is ued in self._source_name
+        self.filename = filename
+
+    def _source_name(self):
+        # this function is used by __repr__
+        # for general cases self.filename is good
+        # But for URL you could mask some part of the URL to keep
+        # the main part.
+        return self.filename
+
+    def _parse_header(self):
+        # This is the central of a RawIO
+        # we need to collect in the original format all
+        # informations needed for further fast acces
+        # at any place in the file
+        # In short _parse_header can be slow but
+        # _get_analogsignal_chunk need to be as fast as possible
+
+        # create signals channels information
+        # This is mandatory!!!!
+        # gain/offset/units are really important because
+        # the scaling to real value will be done with that
+        # at the end real_signal = (raw_signal* gain + offset) * pq.Quantity(units)
+        sig_channels = []
+        for c in range(16):
+            ch_name = 'ch{}'.format(c)
+            # our channel id is c+1 just for fun
+            # Note that chan_id should be realated to
+            # original channel id in the file format
+            # so that the end user should not be lost when reading datasets
+            chan_id = c + 1
+            sr = 10000.  # Hz
+            dtype = 'int16'
+            units = 'uV'
+            gain = 1000. / 2 ** 16
+            offset = 0.
+            # group_id isonly for special cases when channel have diferents
+            # sampling rate for instance. See TdtIO for that.
+            # Here this is the general case :all channel have the same characteritics
+            group_id = 0
+            sig_channels.append((ch_name, chan_id, sr, dtype, units, gain, offset, group_id))
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        # creating units channels
+        # This is mandatory!!!!
+        # Note that if there is no waveform at all in the file
+        # then wf_units/wf_gain/wf_offset/wf_left_sweep/wf_sampling_rate
+        # can be set to any value because _spike_raw_waveforms
+        # will return None
+        unit_channels = []
+        for c in range(3):
+            unit_name = 'unit{}'.format(c)
+            unit_id = '#{}'.format(c)
+            wf_units = 'uV'
+            wf_gain = 1000. / 2 ** 16
+            wf_offset = 0.
+            wf_left_sweep = 20
+            wf_sampling_rate = 10000.
+            unit_channels.append((unit_name, unit_id, wf_units, wf_gain,
+                                  wf_offset, wf_left_sweep, wf_sampling_rate))
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # creating event/epoch channel
+        # This is mandatory!!!!
+        # In RawIO epoch and event they are dealt the same way.
+        event_channels = []
+        event_channels.append(('Some events', 'ev_0', 'event'))
+        event_channels.append(('Some epochs', 'ep_1', 'epoch'))
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # fille into header dict
+        # This is mandatory!!!!!
+        self.header = {}
+        self.header['nb_block'] = 2
+        self.header['nb_segment'] = [2, 3]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # insert some annotation at some place
+        # at neo.io level IO are free to add some annoations
+        # to any object. To keep this functionality with the wrapper
+        # BaseFromRaw you can add annoations in a nested dict.
+        self._generate_minimal_annotations()
+        # If you are a lazy dev you can stop here.
+        for block_index in range(2):
+            bl_ann = self.raw_annotations['blocks'][block_index]
+            bl_ann['name'] = 'Block #{}'.format(block_index)
+            bl_ann['block_extra_info'] = 'This is the block {}'.format(block_index)
+            for seg_index in range([2, 3][block_index]):
+                seg_ann = bl_ann['segments'][seg_index]
+                seg_ann['name'] = 'Seg #{} Block #{}'.format(
+                    seg_index, block_index)
+                seg_ann['seg_extra_info'] = 'This is the seg {} of block {}'.format(
+                    seg_index, block_index)
+                for c in range(16):
+                    anasig_an = seg_ann['signals'][c]
+                    anasig_an['info'] = 'This is a good signals'
+                for c in range(3):
+                    spiketrain_an = seg_ann['units'][c]
+                    spiketrain_an['quality'] = 'Good!!'
+                for c in range(2):
+                    event_an = seg_ann['events'][c]
+                    if c == 0:
+                        event_an['nickname'] = 'Miss Event 0'
+                    elif c == 1:
+                        event_an['nickname'] = 'MrEpoch 1'
+
+    def _segment_t_start(self, block_index, seg_index):
+        # this must return an float scale in second
+        # this t_start will be shared by all object in the segment
+        # except AnalogSignal
+        all_starts = [[0., 15.], [0., 20., 60.]]
+        return all_starts[block_index][seg_index]
+
+    def _segment_t_stop(self, block_index, seg_index):
+        # this must return an float scale in second
+        all_stops = [[10., 25.], [10., 30., 70.]]
+        return all_stops[block_index][seg_index]
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes=None):
+        # we are lucky: signals in all segment have the same shape!! (10.0 seconds)
+        # it is not always the case
+        # this must return an int = the number of sample
+
+        # Note that channel_indexes can be ignored for most cases
+        # except for several sampling rate.
+        return 100000
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        # This give the t_start of signals.
+        # Very often this equal to _segment_t_start but not
+        # always.
+        # this must return an float scale in second
+
+        # Note that channel_indexes can be ignored for most cases
+        # except for several sampling rate.
+
+        # Here this is the same.
+        # this is not always the case
+        return self._segment_t_start(block_index, seg_index)
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        # this must return a signal chunk limited with
+        # i_start/i_stop (can be None)
+        # channel_indexes can be None (=all channel) or a list or numpy.array
+        # This must return a numpy array 2D (even with one channel).
+        # This must return the orignal dtype. No conversion here.
+        # This must as fast as possible.
+        # Everything that can be done in _parse_header() must not be here.
+
+        # Here we are lucky:  our signals is always zeros!!
+        # it is not always the case
+        # internally signals are int16
+        # convertion to real units is done with self.header['signal_channels']
+
+        if i_start is None:
+            i_start = 0
+        if i_stop is None:
+            i_stop = 100000
+
+        assert i_start >= 0, "I don't like your jokes"
+        assert i_stop <= 100000, "I don't like your jokes"
+
+        if channel_indexes is None:
+            nb_chan = 16
+        else:
+            nb_chan = len(channel_indexes)
+        raw_signals = np.zeros((i_stop - i_start, nb_chan), dtype='int16')
+        return raw_signals
+
+    def _spike_count(self, block_index, seg_index, unit_index):
+        # Must return the nb of spike for given (block_index, seg_index, unit_index)
+        # we are lucky:  our units have all the same nb of spikes!!
+        # it is not always the case
+        nb_spikes = 20
+        return nb_spikes
+
+    def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
+        # In our IO, timstamp are internally coded 'int64' and they
+        # represent the index of the signals 10kHz
+        # we are lucky: spikes have the same discharge in all segments!!
+        # incredible neuron!! This is not always the case
+
+        # the same clip t_start/t_start must be used in _spike_raw_waveforms()
+
+        ts_start = (self._segment_t_start(block_index, seg_index) * 10000)
+
+        spike_timestamps = np.arange(0, 10000, 500) + ts_start
+
+        if t_start is not None or t_stop is not None:
+            # restricte spikes to given limits (in seconds)
+            lim0 = int(t_start * 10000)
+            lim1 = int(t_stop * 10000)
+            mask = (spike_timestamps >= lim0) & (spike_timestamps <= lim1)
+            spike_timestamps = spike_timestamps[mask]
+
+        return spike_timestamps
+
+    def _rescale_spike_timestamp(self, spike_timestamps, dtype):
+        # must rescale to second a particular spike_timestamps
+        # with a fixed dtype so the user can choose the precisino he want.
+        spike_times = spike_timestamps.astype(dtype)
+        spike_times /= 10000.  # because 10kHz
+        return spike_times
+
+    def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
+        # this must return a 3D numpy array (nb_spike, nb_channel, nb_sample)
+        # in the original dtype
+        # this must be as fast as possible.
+        # the same clip t_start/t_start must be used in _spike_timestamps()
+
+        # If there there is no waveform supported in the
+        # IO them _spike_raw_waveforms must return None
+
+        # In our IO waveforms come from all channels
+        # they are int16
+        # convertion to real units is done with self.header['unit_channels']
+        # Here, we have a realistic case: all waveforms are only noise.
+        # it is not always the case
+        # we 20 spikes with a sweep of 50 (5ms)
+
+        np.random.seed(2205)  # a magic number (my birthday)
+        waveforms = np.random.randint(low=-2 ** 4, high=2 ** 4, size=20 * 50, dtype='int16')
+        waveforms = waveforms.reshape(20, 1, 50)
+        return waveforms
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        # event and spike are very similar
+        # we have 2 event channels
+        if event_channel_index == 0:
+            # event channel
+            return 6
+        elif event_channel_index == 1:
+            # epoch channel
+            return 10
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        # the main difference between spike channel and event channel
+        # is that for here we have 3 numpy array timestamp, durations, labels
+        # durations must be None for 'event'
+        # label must a dtype ='U'
+
+        # in our IO event are directly coded in seconds
+        seg_t_start = self._segment_t_start(block_index, seg_index)
+        if event_channel_index == 0:
+            timestamp = np.arange(0, 6, dtype='float64') + seg_t_start
+            durations = None
+            labels = np.array(['trigger_a', 'trigger_b'] * 3, dtype='U12')
+        elif event_channel_index == 1:
+            timestamp = np.arange(0, 10, dtype='float64') + .5 + seg_t_start
+            durations = np.ones((10), dtype='float64') * .25
+            labels = np.array(['zoneX'] * 5 + ['zoneZ'] * 5, dtype='U12')
+
+        if t_start is not None:
+            keep = timestamp >= t_start
+            timestamp, labels = timestamp[keep], labels[keep]
+            if durations is not None:
+                durations = durations[keep]
+
+        if t_stop is not None:
+            keep = timestamp <= t_stop
+            timestamp, labels = timestamp[keep], labels[keep]
+            if durations is not None:
+                durations = durations[keep]
+
+        return timestamp, durations, labels
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        # must rescale to second a particular event_timestamps
+        # with a fixed dtype so the user can choose the precisino he want.
+
+        # really easy here because in our case it is already seconds
+        event_times = event_timestamps.astype(dtype)
+        return event_times
+
+    def _rescale_epoch_duration(self, raw_duration, dtype):
+        # really easy here because in our case it is already seconds
+        durations = raw_duration.astype(dtype)
+        return durations

+ 530 - 0
code/python-neo/neo/rawio/intanrawio.py

@@ -0,0 +1,530 @@
+# -*- coding: utf-8 -*-
+"""
+
+Support for intan tech rhd  and rhs files.
+
+This 2 formats are more or less the same but:
+  * some variance in headers.
+  * rhs amplifier is more complexe because the optional DC channel
+
+RHS supported version 1.0
+RHD supported version  1.0 1.1 1.2 1.3 2.0
+
+See:
+  * http://intantech.com/files/Intan_RHD2000_data_file_formats.pdf
+  * http://intantech.com/files/Intan_RHS2000_data_file_formats.pdf
+
+Author: Samuel Garcia
+
+"""
+from __future__ import print_function, division, absolute_import
+# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+from collections import OrderedDict
+from distutils.version import LooseVersion as V
+
+
+class IntanRawIO(BaseRawIO):
+    """
+
+    """
+    extensions = ['rhd', 'rhs']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+
+        self.filename = filename
+
+    def _source_name(self):
+        return self.filename
+
+    def _parse_header(self):
+
+        if self.filename.endswith('.rhs'):
+            self._global_info, self._ordered_channels, data_dtype,\
+                header_size, self._block_size = read_rhs(self.filename)
+        elif self.filename.endswith('.rhd'):
+            self._global_info, self._ordered_channels, data_dtype,\
+                header_size, self._block_size = read_rhd(self.filename)
+
+        # memmap raw data with the complicated structured dtype
+        self._raw_data = np.memmap(self.filename, dtype=data_dtype, mode='r', offset=header_size)
+
+        # check timestamp continuity
+        timestamp = self._raw_data['timestamp'].flatten()
+        assert np.all(np.diff(timestamp) == 1), 'timestamp have gaps'
+
+        # signals
+        sig_channels = []
+        for c, chan_info in enumerate(self._ordered_channels):
+            name = chan_info['native_channel_name']
+            chan_id = c  # the chan_id have no meaning in intan
+            if chan_info['signal_type'] == 20:
+                # exception for temperature
+                sig_dtype = 'int16'
+            else:
+                sig_dtype = 'uint16'
+            group_id = 0
+            sig_channels.append((name, chan_id, chan_info['sampling_rate'],
+                                sig_dtype, chan_info['units'], chan_info['gain'],
+                                chan_info['offset'], chan_info['signal_type']))
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        self._max_sampling_rate = np.max(sig_channels['sampling_rate'])
+        self._max_sigs_length = self._raw_data.size * self._block_size
+
+        # No events
+        event_channels = []
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # No spikes
+        unit_channels = []
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [1]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        self._generate_minimal_annotations()
+
+    def _segment_t_start(self, block_index, seg_index):
+        return 0.
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = self._max_sigs_length / self._max_sampling_rate
+        return t_stop
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        assert channel_indexes is not None, 'channel_indexes cannot be None, several signal size'
+        assert np.unique(self.header['signal_channels'][channel_indexes]['group_id']).size == 1
+        channel_names = self.header['signal_channels'][channel_indexes]['name']
+        chan_name = channel_names[0]
+        size = self._raw_data[chan_name].size
+        return size
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return 0.
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+
+        if i_start is None:
+            i_start = 0
+        if i_stop is None:
+            i_stop = self._get_signal_size(block_index, seg_index, channel_indexes)
+
+        if channel_indexes is None:
+            channel_indexes = slice(None)
+        channel_names = self.header['signal_channels'][channel_indexes]['name']
+
+        shape = self._raw_data[channel_names[0]].shape
+
+        # some channel (temperature) have 1D field so shape 1D
+        # because 1 sample per block
+        if len(shape) == 2:
+            # this is the general case with 2D
+            block_size = shape[1]
+            block_start = i_start // block_size
+            block_stop = i_stop // block_size + 1
+
+            sl0 = i_start % block_size
+            sl1 = sl0 + (i_stop - i_start)
+
+        sigs_chunk = np.zeros((i_stop - i_start, len(channel_names)), dtype='uint16')
+        for i, chan_name in enumerate(channel_names):
+            data_chan = self._raw_data[chan_name]
+            if len(shape) == 1:
+                sigs_chunk[:, i] = data_chan[i_start:i_stop]
+            else:
+                sigs_chunk[:, i] = data_chan[block_start:block_stop].flatten()[sl0:sl1]
+
+        return sigs_chunk
+
+
+def read_qstring(f):
+    length = np.fromfile(f, dtype='uint32', count=1)[0]
+    if length == 0xFFFFFFFF or length == 0:
+        return ''
+    txt = f.read(length).decode('utf-16')
+    return txt
+
+
+def read_variable_header(f, header):
+    info = {}
+    for field_name, field_type in header:
+        if field_type == 'QString':
+            field_value = read_qstring(f)
+        else:
+            field_value = np.fromfile(f, dtype=field_type, count=1)[0]
+        info[field_name] = field_value
+    return info
+
+
+###############
+# RHS ZONE
+
+rhs_global_header = [
+    ('magic_number', 'uint32'),  # 0xD69127AC
+
+    ('major_version', 'int16'),
+    ('minor_version', 'int16'),
+
+    ('sampling_rate', 'float32'),
+
+    ('dsp_enabled', 'int16'),
+
+    ('actual_dsp_cutoff_frequency', 'float32'),
+    ('actual_lower_bandwidth', 'float32'),
+    ('actual_lower_settle_bandwidth', 'float32'),
+    ('actual_upper_bandwidth', 'float32'),
+    ('desired_dsp_cutoff_frequency', 'float32'),
+    ('desired_lower_bandwidth', 'float32'),
+    ('desired_lower_settle_bandwidth', 'float32'),
+    ('desired_upper_bandwidth', 'float32'),
+
+    ('notch_filter_mode', 'int16'),
+
+    ('desired_impedance_test_frequency', 'float32'),
+    ('actual_impedance_test_frequency', 'float32'),
+
+    ('amp_settle_mode', 'int16'),
+    ('charge_recovery_mode', 'int16'),
+
+    ('stim_step_size', 'float32'),
+    ('recovery_current_limit', 'float32'),
+    ('recovery_target_voltage', 'float32'),
+
+    ('note1', 'QString'),
+    ('note2', 'QString'),
+    ('note3', 'QString'),
+
+    ('dc_amplifier_data_saved', 'int16'),
+
+    ('board_mode', 'int16'),
+
+    ('ref_channel_name', 'QString'),
+
+    ('nb_signal_group', 'int16'),
+]
+
+rhs_signal_group_header = [
+    ('signal_group_name', 'QString'),
+    ('signal_group_prefix', 'QString'),
+    ('signal_group_enabled', 'int16'),
+    ('channel_num', 'int16'),
+    ('amplified_channel_num', 'int16'),
+]
+
+rhs_signal_channel_header = [
+    ('native_channel_name', 'QString'),
+    ('custom_channel_name', 'QString'),
+    ('native_order', 'int16'),
+    ('custom_order', 'int16'),
+    ('signal_type', 'int16'),
+    ('channel_enabled', 'int16'),
+    ('chip_channel_num', 'int16'),
+    ('command_stream', 'int16'),
+    ('board_stream_num', 'int16'),
+    ('spike_scope_trigger_mode', 'int16'),
+    ('spike_scope_voltage_thresh', 'int16'),
+    ('spike_scope_digital_trigger_channel', 'int16'),
+    ('spike_scope_digital_edge_polarity', 'int16'),
+    ('electrode_impedance_magnitude', 'float32'),
+    ('electrode_impedance_phase', 'float32'),
+]
+
+
+def read_rhs(filename):
+    BLOCK_SIZE = 128  # sample per block
+
+    with open(filename, mode='rb') as f:
+        global_info = read_variable_header(f, rhs_global_header)
+
+        channels_by_type = {k: [] for k in [0, 3, 4, 5, 6]}
+        for g in range(global_info['nb_signal_group']):
+            group_info = read_variable_header(f, rhs_signal_group_header)
+
+            if bool(group_info['signal_group_enabled']):
+                for c in range(group_info['channel_num']):
+                    chan_info = read_variable_header(f, rhs_signal_channel_header)
+                    assert chan_info['signal_type'] not in (1, 2)
+                    if bool(chan_info['channel_enabled']):
+                        channels_by_type[chan_info['signal_type']].append(chan_info)
+
+        header_size = f.tell()
+
+    sr = global_info['sampling_rate']
+
+    # construct dtype by re-ordering channels by types
+    ordered_channels = []
+    data_dtype = [('timestamp', 'int32', BLOCK_SIZE)]
+
+    # 0: RHS2000 amplifier channel.
+    for chan_info in channels_by_type[0]:
+        name = chan_info['native_channel_name']
+        chan_info['sampling_rate'] = sr
+        chan_info['units'] = 'uV'
+        chan_info['gain'] = 0.195
+        chan_info['offset'] = -32768 * 0.195
+        ordered_channels.append(chan_info)
+        data_dtype += [(name, 'uint16', BLOCK_SIZE)]
+
+    if bool(global_info['dc_amplifier_data_saved']):
+        for chan_info in channels_by_type[0]:
+            name = chan_info['native_channel_name']
+            chan_info_dc = dict(chan_info)
+            chan_info_dc['native_channel_name'] = name + '_DC'
+            chan_info_dc['sampling_rate'] = sr
+            chan_info_dc['units'] = 'mV'
+            chan_info_dc['gain'] = 19.23
+            chan_info_dc['offset'] = -512 * 19.23
+            chan_info_dc['signal_type'] = 10  # put it in another group
+            ordered_channels.append(chan_info_dc)
+            data_dtype += [(name + '_DC', 'uint16', BLOCK_SIZE)]
+
+    for chan_info in channels_by_type[0]:
+        name = chan_info['native_channel_name']
+        chan_info_stim = dict(chan_info)
+        chan_info_stim['native_channel_name'] = name + '_STIM'
+        chan_info_stim['sampling_rate'] = sr
+        # stim channel are coplicated because they are coded
+        # with bits, they do not fit the gain/offset rawio strategy
+        chan_info_stim['units'] = ''
+        chan_info_stim['gain'] = 1.
+        chan_info_stim['offset'] = 0.
+        chan_info_stim['signal_type'] = 11  # put it in another group
+        ordered_channels.append(chan_info_stim)
+        data_dtype += [(name + '_STIM', 'uint16', BLOCK_SIZE)]
+
+    # 3: Analog input channel.
+    # 4: Analog output channel.
+    for sig_type in [3, 4, ]:
+        for chan_info in channels_by_type[sig_type]:
+            name = chan_info['native_channel_name']
+            chan_info['sampling_rate'] = sr
+            chan_info['units'] = 'V'
+            chan_info['gain'] = 0.0003125
+            chan_info['offset'] = -32768 * 0.0003125
+            ordered_channels.append(chan_info)
+            data_dtype += [(name, 'uint16', BLOCK_SIZE)]
+
+    # 5: Digital input channel.
+    # 6: Digital output channel.
+    for sig_type in [5, 6]:
+        # at the moment theses channel are not in sig channel list
+        # but they are in the raw memamp
+        if len(channels_by_type[sig_type]) > 0:
+            name = {5: 'DIGITAL-IN', 6: 'DIGITAL-OUT'}[sig_type]
+            data_dtype += [(name, 'uint16', BLOCK_SIZE)]
+
+    return global_info, ordered_channels, data_dtype, header_size, BLOCK_SIZE
+
+
+###############
+# RHD ZONE
+
+rhd_global_header_base = [
+    ('magic_number', 'uint32'),  # 0xC6912702
+    ('major_version', 'int16'),
+    ('minor_version', 'int16'),
+]
+
+
+rhd_global_header_part1 = [
+    ('sampling_rate', 'float32'),
+
+    ('dsp_enabled', 'int16'),
+
+    ('actual_dsp_cutoff_frequency', 'float32'),
+    ('actual_lower_bandwidth', 'float32'),
+    ('actual_upper_bandwidth', 'float32'),
+    ('desired_dsp_cutoff_frequency', 'float32'),
+    ('desired_lower_bandwidth', 'float32'),
+    ('desired_upper_bandwidth', 'float32'),
+
+    ('notch_filter_mode', 'int16'),
+
+    ('desired_impedance_test_frequency', 'float32'),
+    ('actual_impedance_test_frequency', 'float32'),
+
+    ('note1', 'QString'),
+    ('note2', 'QString'),
+    ('note3', 'QString'),
+
+]
+
+rhd_global_header_v11 = [
+    ('num_temp_sensor_channels', 'int16'),
+]
+
+rhd_global_header_v13 = [
+    ('eval_board_mode', 'int16'),
+]
+
+rhd_global_header_v20 = [
+    ('reference_channel', 'QString'),
+]
+
+rhd_global_header_final = [
+    ('nb_signal_group', 'int16'),
+]
+
+rhd_signal_group_header = [
+    ('signal_group_name', 'QString'),
+    ('signal_group_prefix', 'QString'),
+    ('signal_group_enabled', 'int16'),
+    ('channel_num', 'int16'),
+    ('amplified_channel_num', 'int16'),
+]
+
+rhd_signal_channel_header = [
+    ('native_channel_name', 'QString'),
+    ('custom_channel_name', 'QString'),
+    ('native_order', 'int16'),
+    ('custom_order', 'int16'),
+    ('signal_type', 'int16'),
+    ('channel_enabled', 'int16'),
+    ('chip_channel_num', 'int16'),
+    ('board_stream_num', 'int16'),
+    ('spike_scope_trigger_mode', 'int16'),
+    ('spike_scope_voltage_thresh', 'int16'),
+    ('spike_scope_digital_trigger_channel', 'int16'),
+    ('spike_scope_digital_edge_polarity', 'int16'),
+    ('electrode_impedance_magnitude', 'float32'),
+    ('electrode_impedance_phase', 'float32'),
+]
+
+
+def read_rhd(filename):
+    with open(filename, mode='rb') as f:
+
+        global_info = read_variable_header(f, rhd_global_header_base)
+
+        version = V('{major_version}.{minor_version}'.format(**global_info))
+
+        # the header size depend on the version :-(
+        header = list(rhd_global_header_part1)  # make a copy
+
+        if version >= '1.1':
+            header = header + rhd_global_header_v11
+        else:
+            global_info['num_temp_sensor_channels'] = 0
+
+        if version >= '1.3':
+            header = header + rhd_global_header_v13
+        else:
+            global_info['eval_board_mode'] = 0
+
+        if version >= '2.0':
+            header = header + rhd_global_header_v20
+        else:
+            global_info['reference_channel'] = ''
+
+        header = header + rhd_global_header_final
+
+        global_info.update(read_variable_header(f, header))
+
+        # read channel group and channel header
+        channels_by_type = {k: [] for k in [0, 1, 2, 3, 4, 5]}
+        for g in range(global_info['nb_signal_group']):
+            group_info = read_variable_header(f, rhd_signal_group_header)
+
+            if bool(group_info['signal_group_enabled']):
+                for c in range(group_info['channel_num']):
+                    chan_info = read_variable_header(f, rhd_signal_channel_header)
+                    if bool(chan_info['channel_enabled']):
+                        channels_by_type[chan_info['signal_type']].append(chan_info)
+
+        header_size = f.tell()
+
+    sr = global_info['sampling_rate']
+
+    # construct the data block dtype and reorder channels
+    if version >= '2.0':
+        BLOCK_SIZE = 128
+    else:
+        BLOCK_SIZE = 60  # 256 channels
+
+    ordered_channels = []
+
+    if version >= '1.2':
+        data_dtype = [('timestamp', 'int32', BLOCK_SIZE)]
+    else:
+        data_dtype = [('timestamp', 'uint32', BLOCK_SIZE)]
+
+    # 0: RHD2000 amplifier channel
+    for chan_info in channels_by_type[0]:
+        name = chan_info['native_channel_name']
+        chan_info['sampling_rate'] = sr
+        chan_info['units'] = 'uV'
+        chan_info['gain'] = 0.195
+        chan_info['offset'] = -32768 * 0.195
+        ordered_channels.append(chan_info)
+        data_dtype += [(name, 'uint16', BLOCK_SIZE)]
+
+    # 1: RHD2000 auxiliary input channel
+    for chan_info in channels_by_type[1]:
+        name = chan_info['native_channel_name']
+        chan_info['sampling_rate'] = sr / 4.
+        chan_info['units'] = 'V'
+        chan_info['gain'] = 0.0000374
+        chan_info['offset'] = 0.
+        ordered_channels.append(chan_info)
+        data_dtype += [(name, 'uint16', BLOCK_SIZE // 4)]
+
+    # 2: RHD2000 supply voltage channel
+    for chan_info in channels_by_type[2]:
+        name = chan_info['native_channel_name']
+        chan_info['sampling_rate'] = sr / BLOCK_SIZE
+        chan_info['units'] = 'V'
+        chan_info['gain'] = 0.0000748
+        chan_info['offset'] = 0.
+        ordered_channels.append(chan_info)
+        data_dtype += [(name, 'uint16')]
+
+    # temperature is not an official channel in the header
+    for i in range(global_info['num_temp_sensor_channels']):
+        name = 'temperature_{}'.format(i)
+        chan_info = {'native_channel_name': name, 'signal_type': 20}
+        chan_info['sampling_rate'] = sr / BLOCK_SIZE
+        chan_info['units'] = 'Celsius'
+        chan_info['gain'] = 0.001
+        chan_info['offset'] = 0.
+        ordered_channels.append(chan_info)
+        data_dtype += [(name, 'int16')]
+
+    # 3: USB board ADC input channel
+    for chan_info in channels_by_type[3]:
+        name = chan_info['native_channel_name']
+        chan_info['sampling_rate'] = sr
+        chan_info['units'] = 'V'
+        if global_info['eval_board_mode'] == 0:
+            chan_info['gain'] = 0.000050354
+            chan_info['offset'] = 0.
+        elif global_info['eval_board_mode'] == 1:
+            chan_info['gain'] = 0.00015259
+            chan_info['offset'] = -32768 * 0.00015259
+        elif global_info['eval_board_mode'] == 13:
+            chan_info['gain'] = 0.0003125
+            chan_info['offset'] = -32768 * 0.0003125
+        ordered_channels.append(chan_info)
+        data_dtype += [(name, 'uint16', BLOCK_SIZE)]
+
+    # 4: USB board digital input channel
+    # 5: USB board digital output channel
+    for sig_type in [4, 5]:
+        # at the moment theses channel are not in sig channel list
+        # but they are in the raw memamp
+        if len(channels_by_type[sig_type]) > 0:
+            name = {4: 'DIGITAL-IN', 5: 'DIGITAL-OUT'}[sig_type]
+            data_dtype += [(name, 'uint16', BLOCK_SIZE)]
+
+    return global_info, ordered_channels, data_dtype, header_size, BLOCK_SIZE

+ 231 - 0
code/python-neo/neo/rawio/micromedrawio.py

@@ -0,0 +1,231 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading/writing data from micromed (.trc).
+Inspired by the Matlab code for EEGLAB from Rami K. Niazy.
+
+Completed with matlab Guillaume BECQ code.
+
+Author: Samuel Garcia
+"""
+from __future__ import print_function, division, absolute_import
+# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
+
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+
+import datetime
+import os
+import struct
+import io
+
+
+class StructFile(io.BufferedReader):
+    def read_f(self, fmt, offset=None):
+        if offset is not None:
+            self.seek(offset)
+        return struct.unpack(fmt, self.read(struct.calcsize(fmt)))
+
+
+class MicromedRawIO(BaseRawIO):
+    """
+    Class for reading  data from micromed (.trc).
+    """
+    extensions = ['trc', 'TRC']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+    def _parse_header(self):
+        with io.open(self.filename, 'rb') as fid:
+            f = StructFile(fid)
+
+            # Name
+            f.seek(64)
+            surname = f.read(22).strip(b' ')
+            firstname = f.read(20).strip(b' ')
+
+            # Date
+            day, month, year, hour, minute, sec = f.read_f('bbbbbb', offset=128)
+            rec_datetime = datetime.datetime(year + 1900, month, day, hour, minute,
+                                             sec)
+
+            Data_Start_Offset, Num_Chan, Multiplexer, Rate_Min, Bytes = f.read_f(
+                'IHHHH', offset=138)
+
+            # header version
+            header_version, = f.read_f('b', offset=175)
+            assert header_version == 4
+
+            # area
+            f.seek(176)
+            zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA',
+                          'IMPED_B', 'IMPED_E', 'MONTAGE',
+                          'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A',
+                          'EVENT B', 'TRIGGER']
+            zones = {}
+            for zname in zone_names:
+                zname2, pos, length = f.read_f('8sII')
+                zones[zname] = zname2, pos, length
+                assert zname == zname2.decode('ascii').strip(' ')
+
+            # raw signals memmap
+            sig_dtype = 'u' + str(Bytes)
+            self._raw_signals = np.memmap(self.filename, dtype=sig_dtype, mode='r',
+                                          offset=Data_Start_Offset).reshape(-1, Num_Chan)
+
+            # Reading Code Info
+            zname2, pos, length = zones['ORDER']
+            f.seek(pos)
+            code = np.frombuffer(f.read(Num_Chan * 2), dtype='u2')
+
+            units_code = {-1: 'nV', 0: 'uV', 1: 'mV', 2: 1, 100: 'percent',
+                          101: 'dimensionless', 102: 'dimensionless'}
+            sig_channels = []
+            sig_grounds = []
+            for c in range(Num_Chan):
+                zname2, pos, length = zones['LABCOD']
+                f.seek(pos + code[c] * 128 + 2, 0)
+
+                chan_name = f.read(6).strip(b"\x00").decode('ascii')
+                ground = f.read(6).strip(b"\x00").decode('ascii')
+                sig_grounds.append(ground)
+                logical_min, logical_max, logical_ground, physical_min, physical_max = f.read_f(
+                    'iiiii')
+                k, = f.read_f('h')
+                units = units_code.get(k, 'uV')
+
+                factor = float(physical_max - physical_min) / float(
+                    logical_max - logical_min + 1)
+                gain = factor
+                offset = -logical_ground * factor
+
+                f.seek(8, 1)
+                sampling_rate, = f.read_f('H')
+                sampling_rate *= Rate_Min
+                chan_id = c
+                group_id = 0
+                sig_channels.append((chan_name, chan_id, sampling_rate, sig_dtype,
+                                     units, gain, offset, group_id))
+
+            sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+            assert np.unique(sig_channels['sampling_rate']).size == 1
+            self._sampling_rate = float(np.unique(sig_channels['sampling_rate'])[0])
+
+            # Event channels
+            event_channels = []
+            event_channels.append(('Trigger', '', 'event'))
+            event_channels.append(('Note', '', 'event'))
+            event_channels.append(('Event A', '', 'epoch'))
+            event_channels.append(('Event B', '', 'epoch'))
+            event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+            # Read trigger and notes
+            self._raw_events = []
+            ev_dtypes = [('TRIGGER', [('start', 'u4'), ('label', 'u2')]),
+                         ('NOTE', [('start', 'u4'), ('label', 'S40')]),
+                         ('EVENT A', [('label', 'u4'), ('start', 'u4'), ('stop', 'u4')]),
+                         ('EVENT B', [('label', 'u4'), ('start', 'u4'), ('stop', 'u4')]),
+                         ]
+            for zname, ev_dtype in ev_dtypes:
+                zname2, pos, length = zones[zname]
+                dtype = np.dtype(ev_dtype)
+                rawevent = np.memmap(self.filename, dtype=dtype, mode='r',
+                                     offset=pos, shape=length // dtype.itemsize)
+
+                keep = (rawevent['start'] >= rawevent['start'][0]) & (
+                    rawevent['start'] < self._raw_signals.shape[0]) & (
+                    rawevent['start'] != 0)
+                rawevent = rawevent[keep]
+                self._raw_events.append(rawevent)
+
+            # No spikes
+            unit_channels = []
+            unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+            # fille into header dict
+            self.header = {}
+            self.header['nb_block'] = 1
+            self.header['nb_segment'] = [1]
+            self.header['signal_channels'] = sig_channels
+            self.header['unit_channels'] = unit_channels
+            self.header['event_channels'] = event_channels
+
+            # insert some annotation at some place
+            self._generate_minimal_annotations()
+            bl_annotations = self.raw_annotations['blocks'][0]
+            seg_annotations = bl_annotations['segments'][0]
+
+            for d in (bl_annotations, seg_annotations):
+                d['rec_datetime'] = rec_datetime
+                d['firstname'] = firstname
+                d['surname'] = surname
+                d['header_version'] = header_version
+
+            for c in range(sig_channels.size):
+                anasig_an = seg_annotations['signals'][c]
+                anasig_an['ground'] = sig_grounds[c]
+                channel_an = self.raw_annotations['signal_channels'][c]
+                channel_an['ground'] = sig_grounds[c]
+
+    def _source_name(self):
+        return self.filename
+
+    def _segment_t_start(self, block_index, seg_index):
+        return 0.
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = self._raw_signals.shape[0] / self._sampling_rate
+        return t_stop
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        return self._raw_signals.shape[0]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return 0.
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if channel_indexes is None:
+            channel_indexes = slice(channel_indexes)
+        raw_signals = self._raw_signals[slice(i_start, i_stop), channel_indexes]
+        return raw_signals
+
+    def _spike_count(self, block_index, seg_index, unit_index):
+        return 0
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        n = self._raw_events[event_channel_index].size
+        return n
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+
+        raw_event = self._raw_events[event_channel_index]
+
+        if t_start is not None:
+            keep = raw_event['start'] >= int(t_start * self._sampling_rate)
+            raw_event = raw_event[keep]
+
+        if t_stop is not None:
+            keep = raw_event['start'] <= int(t_stop * self._sampling_rate)
+            raw_event = raw_event[keep]
+
+        timestamp = raw_event['start']
+        if event_channel_index < 2:
+            durations = None
+        else:
+            durations = raw_event['stop'] - raw_event['start']
+        labels = raw_event['label'].astype('U')
+
+        return timestamp, durations, labels
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        event_times = event_timestamps.astype(dtype) / self._sampling_rate
+        return event_times
+
+    def _rescale_epoch_duration(self, raw_duration, dtype):
+        durations = raw_duration.astype(dtype) // self._sampling_rate
+        return durations

+ 689 - 0
code/python-neo/neo/rawio/neuralynxrawio.py

@@ -0,0 +1,689 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading data from Neuralynx files.
+This IO supports NCS, NEV, NSE and NTT file formats.
+
+
+NCS contains signals for one channel
+NEV contains events
+NSE contains spikes and waveforms for mono electrodes
+NTT contains spikes and waveforms for tetrodes
+
+
+NCS can contains gaps that can be detected in inregularity
+in timestamps of data blocks. Each gap lead to one new segment.
+NCS files need to be read entirely to detect that gaps.... too bad....
+
+
+Author: Julia Sprenger, Carlos Canova, Samuel Garcia
+"""
+from __future__ import print_function, division, absolute_import
+# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
+
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype,
+                        _unit_channel_dtype, _event_channel_dtype)
+
+import numpy as np
+import os
+import re
+import distutils.version
+import datetime
+from collections import OrderedDict
+
+BLOCK_SIZE = 512  # nb sample per signal block
+HEADER_SIZE = 2 ** 14  # file have a txt header of 16kB
+
+
+class NeuralynxRawIO(BaseRawIO):
+    """"
+    Class for reading dataset recorded by Neuralynx.
+
+    Examples:
+        >>> reader = NeuralynxRawIO(dirname='Cheetah_v5.5.1/original_data')
+        >>> reader.parse_header()
+
+            Inspect all file in the directory.
+
+        >>> print(reader)
+
+            Display all informations about signal channels, units, segment size....
+    """
+    extensions = ['nse', 'ncs', 'nev', 'ntt']
+    rawmode = 'one-dir'
+
+    def __init__(self, dirname='', **kargs):
+        self.dirname = dirname
+        BaseRawIO.__init__(self, **kargs)
+
+    def _source_name(self):
+        return self.dirname
+
+    def _parse_header(self):
+
+        sig_channels = []
+        unit_channels = []
+        event_channels = []
+
+        self.ncs_filenames = OrderedDict()  # chan_id: filename
+        self.nse_ntt_filenames = OrderedDict()  # chan_id: filename
+        self.nev_filenames = OrderedDict()  # chan_id: filename
+
+        self._nev_memmap = {}
+        self._spike_memmap = {}
+        self.internal_unit_ids = []  # channel_index > (channel_id, unit_id)
+        self.internal_event_ids = []
+        self._empty_ncs = []  # this list contains filenames of empty records
+        self._empty_nse_ntt = []
+
+        # explore the directory looking for ncs, nev, nse and ntt
+        # And construct channels headers
+        signal_annotations = []
+        unit_annotations = []
+        event_annotations = []
+
+        for filename in sorted(os.listdir(self.dirname)):
+            filename = os.path.join(self.dirname, filename)
+
+            _, ext = os.path.splitext(filename)
+            ext = ext[1:]  # remove dot
+            if ext not in self.extensions:
+                continue
+
+            if (os.path.getsize(filename) <= HEADER_SIZE) and (ext in ['ncs']):
+                self._empty_ncs.append(filename)
+                continue
+
+            # All file have more or less the same header structure
+            info = read_txt_header(filename)
+            chan_names = info['channel_names']
+            chan_ids = info['channel_ids']
+
+            for idx, chan_id in enumerate(chan_ids):
+                chan_name = chan_names[idx]
+                if ext == 'ncs':
+                    # a signal channels
+                    units = 'uV'
+                    gain = info['bit_to_microVolt'][idx]
+                    if info['input_inverted']:
+                        gain *= -1
+                    offset = 0.
+                    group_id = 0
+                    sig_channels.append((chan_name, chan_id, info['sampling_rate'],
+                                         'int16', units, gain, offset, group_id))
+                    self.ncs_filenames[chan_id] = filename
+                    keys = [
+                        'DspFilterDelay_µs',
+                        'recording_opened',
+                        'FileType',
+                        'DspDelayCompensation',
+                        'recording_closed',
+                        'DspLowCutFilterType',
+                        'HardwareSubSystemName',
+                        'DspLowCutNumTaps',
+                        'DSPLowCutFilterEnabled',
+                        'HardwareSubSystemType',
+                        'DspHighCutNumTaps',
+                        'ADMaxValue',
+                        'DspLowCutFrequency',
+                        'DSPHighCutFilterEnabled',
+                        'RecordSize',
+                        'InputRange',
+                        'DspHighCutFrequency',
+                        'input_inverted',
+                        'NumADChannels',
+                        'DspHighCutFilterType',
+                    ]
+                    d = {k: info[k] for k in keys if k in info}
+                    signal_annotations.append(d)
+
+                elif ext in ('nse', 'ntt'):
+                    # nse and ntt are pretty similar except for the wavform shape
+                    # a file can contain several unit_id (so several unit channel)
+                    assert chan_id not in self.nse_ntt_filenames, \
+                        'Several nse or ntt files have the same unit_id!!!'
+                    self.nse_ntt_filenames[chan_id] = filename
+
+                    dtype = get_nse_or_ntt_dtype(info, ext)
+
+                    if (os.path.getsize(filename) <= HEADER_SIZE):
+                        self._empty_nse_ntt.append(filename)
+                        data = np.zeros((0,), dtype=dtype)
+                    else:
+                        data = np.memmap(filename, dtype=dtype, mode='r', offset=HEADER_SIZE)
+
+                    self._spike_memmap[chan_id] = data
+
+                    unit_ids = np.unique(data['unit_id'])
+                    for unit_id in unit_ids:
+                        # a spike channel for each (chan_id, unit_id)
+                        self.internal_unit_ids.append((chan_id, unit_id))
+
+                        unit_name = "ch{}#{}".format(chan_id, unit_id)
+                        unit_id = '{}'.format(unit_id)
+                        wf_units = 'uV'
+                        wf_gain = info['bit_to_microVolt'][idx]
+                        if info['input_inverted']:
+                            wf_gain *= -1
+                        wf_offset = 0.
+                        wf_left_sweep = -1  # NOT KNOWN
+                        wf_sampling_rate = info['sampling_rate']
+                        unit_channels.append(
+                            (unit_name, '{}'.format(unit_id), wf_units, wf_gain,
+                             wf_offset, wf_left_sweep, wf_sampling_rate))
+                        unit_annotations.append(dict(file_origin=filename))
+
+                elif ext == 'nev':
+                    # an event channel
+                    # each ('event_id',  'ttl_input') give a new event channel
+                    self.nev_filenames[chan_id] = filename
+                    data = np.memmap(
+                        filename, dtype=nev_dtype, mode='r', offset=HEADER_SIZE)
+                    internal_ids = np.unique(
+                        data[['event_id', 'ttl_input']]).tolist()
+                    for internal_event_id in internal_ids:
+                        if internal_event_id not in self.internal_event_ids:
+                            event_id, ttl_input = internal_event_id
+                            name = '{} event_id={} ttl={}'.format(
+                                chan_name, event_id, ttl_input)
+                            event_channels.append((name, chan_id, 'event'))
+                            self.internal_event_ids.append(internal_event_id)
+
+                    self._nev_memmap[chan_id] = data
+
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        if sig_channels.size > 0:
+            sampling_rate = np.unique(sig_channels['sampling_rate'])
+            assert sampling_rate.size == 1
+            self._sigs_sampling_rate = sampling_rate[0]
+
+        # read ncs files for gaps detection and nb_segment computation
+        self.read_ncs_files(self.ncs_filenames)
+
+        # timestamp limit in nev, nse
+        # so need to scan all spike and event to
+        ts0, ts1 = None, None
+        for _data_memmap in (self._spike_memmap, self._nev_memmap):
+            for chan_id, data in _data_memmap.items():
+                ts = data['timestamp']
+                if ts.size == 0:
+                    continue
+                if ts0 is None:
+                    ts0 = ts[0]
+                    ts1 = ts[-1]
+                ts0 = min(ts0, ts[0])
+                ts1 = max(ts0, ts[-1])
+
+        if self._timestamp_limits is None:
+            # case  NO ncs but HAVE nev or nse
+            self._timestamp_limits = [(ts0, ts1)]
+            self._seg_t_starts = [ts0 / 1e6]
+            self._seg_t_stops = [ts1 / 1e6]
+            self.global_t_start = ts0 / 1e6
+            self.global_t_stop = ts1 / 1e6
+        elif ts0 is not None:
+            # case  HAVE ncs AND HAVE nev or nse
+            self.global_t_start = min(ts0 / 1e6, self._sigs_t_start[0])
+            self.global_t_stop = max(ts1 / 1e6, self._sigs_t_stop[-1])
+            self._seg_t_starts = list(self._sigs_t_start)
+            self._seg_t_starts[0] = self.global_t_start
+            self._seg_t_stops = list(self._sigs_t_stop)
+            self._seg_t_stops[-1] = self.global_t_stop
+        else:
+            # case HAVE ncs but  NO nev or nse
+            self._seg_t_starts = self._sigs_t_start
+            self._seg_t_stops = self._sigs_t_stop
+            self.global_t_start = self._sigs_t_start[0]
+            self.global_t_stop = self._sigs_t_stop[-1]
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [self._nb_segment]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # Annotations
+        self._generate_minimal_annotations()
+        bl_annotations = self.raw_annotations['blocks'][0]
+
+        for seg_index in range(self._nb_segment):
+            seg_annotations = bl_annotations['segments'][seg_index]
+
+            for c in range(sig_channels.size):
+                sig_ann = seg_annotations['signals'][c]
+                sig_ann.update(signal_annotations[c])
+
+            for c in range(unit_channels.size):
+                unit_ann = seg_annotations['units'][c]
+                unit_ann.update(unit_annotations[c])
+
+            for c in range(event_channels.size):
+                # annotations for channel events
+                event_id, ttl_input = self.internal_event_ids[c]
+                chan_id = event_channels[c]['id']
+
+                ev_ann = seg_annotations['events'][c]
+                ev_ann['file_origin'] = self.nev_filenames[chan_id]
+
+                # ~ ev_ann['marker_id'] =
+                # ~ ev_ann['nttl'] =
+                # ~ ev_ann['digital_marker'] =
+                # ~ ev_ann['analog_marker'] =
+
+    def _segment_t_start(self, block_index, seg_index):
+        return self._seg_t_starts[seg_index] - self.global_t_start
+
+    def _segment_t_stop(self, block_index, seg_index):
+        return self._seg_t_stops[seg_index] - self.global_t_start
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        return self._sigs_length[seg_index]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return self._sigs_t_start[seg_index] - self.global_t_start
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if i_start is None:
+            i_start = 0
+        if i_stop is None:
+            i_stop = self._sigs_length[seg_index]
+
+        block_start = i_start // BLOCK_SIZE
+        block_stop = i_stop // BLOCK_SIZE + 1
+        sl0 = i_start % 512
+        sl1 = sl0 + (i_stop - i_start)
+
+        if channel_indexes is None:
+            channel_indexes = slice(None)
+        channel_ids = self.header['signal_channels'][channel_indexes]['id']
+
+        sigs_chunk = np.zeros((i_stop - i_start, len(channel_ids)), dtype='int16')
+        for i, chan_id in enumerate(channel_ids):
+            data = self._sigs_memmap[seg_index][chan_id]
+            sub = data[block_start:block_stop]
+            sigs_chunk[:, i] = sub['samples'].flatten()[sl0:sl1]
+
+        return sigs_chunk
+
+    def _spike_count(self, block_index, seg_index, unit_index):
+        chan_id, unit_id = self.internal_unit_ids[unit_index]
+        data = self._spike_memmap[chan_id]
+        ts = data['timestamp']
+
+        ts0, ts1 = self._timestamp_limits[seg_index]
+
+        keep = (ts >= ts0) & (ts <= ts1) & (unit_id == data['unit_id'])
+        nb_spike = int(data[keep].size)
+        return nb_spike
+
+    def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
+        chan_id, unit_id = self.internal_unit_ids[unit_index]
+        data = self._spike_memmap[chan_id]
+        ts = data['timestamp']
+
+        ts0, ts1 = self._timestamp_limits[seg_index]
+        if t_start is not None:
+            ts0 = int((t_start + self.global_t_start) * 1e6)
+        if t_start is not None:
+            ts1 = int((t_stop + self.global_t_start) * 1e6)
+
+        keep = (ts >= ts0) & (ts <= ts1) & (unit_id == data['unit_id'])
+        timestamps = ts[keep]
+        return timestamps
+
+    def _rescale_spike_timestamp(self, spike_timestamps, dtype):
+        spike_times = spike_timestamps.astype(dtype)
+        spike_times /= 1e6
+        spike_times -= self.global_t_start
+        return spike_times
+
+    def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index,
+                                 t_start, t_stop):
+        chan_id, unit_id = self.internal_unit_ids[unit_index]
+        data = self._spike_memmap[chan_id]
+        ts = data['timestamp']
+
+        ts0, ts1 = self._timestamp_limits[seg_index]
+        if t_start is not None:
+            ts0 = int((t_start + self.global_t_start) * 1e6)
+        if t_start is not None:
+            ts1 = int((t_stop + self.global_t_start) * 1e6)
+
+        keep = (ts >= ts0) & (ts <= ts1) & (unit_id == data['unit_id'])
+
+        wfs = data[keep]['samples']
+        if wfs.ndim == 2:
+            # case for nse
+            waveforms = wfs[:, None, :]
+        else:
+            # case for ntt change (n, 32, 4) to (n, 4, 32)
+            waveforms = wfs.swapaxes(1, 2)
+
+        return waveforms
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        event_id, ttl_input = self.internal_event_ids[event_channel_index]
+        chan_id = self.header['event_channels'][event_channel_index]['id']
+        data = self._nev_memmap[chan_id]
+        ts0, ts1 = self._timestamp_limits[seg_index]
+        ts = data['timestamp']
+        keep = (ts >= ts0) & (ts <= ts1) & (data['event_id'] == event_id) & \
+               (data['ttl_input'] == ttl_input)
+        nb_event = int(data[keep].size)
+        return nb_event
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        event_id, ttl_input = self.internal_event_ids[event_channel_index]
+        chan_id = self.header['event_channels'][event_channel_index]['id']
+        data = self._nev_memmap[chan_id]
+        ts0, ts1 = self._timestamp_limits[seg_index]
+
+        if t_start is not None:
+            ts0 = int((t_start + self.global_t_start) * 1e6)
+        if t_start is not None:
+            ts1 = int((t_stop + self.global_t_start) * 1e6)
+
+        ts = data['timestamp']
+        keep = (ts >= ts0) & (ts <= ts1) & (data['event_id'] == event_id) & \
+               (data['ttl_input'] == ttl_input)
+
+        subdata = data[keep]
+        timestamps = subdata['timestamp']
+        labels = subdata['event_string'].astype('U')
+        durations = None
+        return timestamps, durations, labels
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        event_times = event_timestamps.astype(dtype)
+        event_times /= 1e6
+        event_times -= self.global_t_start
+        return event_times
+
+    def read_ncs_files(self, ncs_filenames):
+        """
+        Given a list of ncs files contrsuct:
+            * self._sigs_memmap = [ {} for seg_index in range(self._nb_segment) ]
+            * self._sigs_t_start = []
+            * self._sigs_t_stop = []
+            * self._sigs_length = []
+            * self._nb_segment
+            * self._timestamp_limits
+
+        The first file is read entirely to detect gaps in timestamp.
+        each gap lead to a new segment.
+
+        Other files are not read entirely but we check than gaps
+        are at the same place.
+
+
+        gap_indexes can be given (when cached) to avoid full read.
+
+        """
+        if len(ncs_filenames) == 0:
+            self._nb_segment = 1
+            self._timestamp_limits = None
+            return
+
+        good_delta = int(BLOCK_SIZE * 1e6 / self._sigs_sampling_rate)
+        chan_id0 = list(ncs_filenames.keys())[0]
+        filename0 = ncs_filenames[chan_id0]
+
+        data0 = np.memmap(filename0, dtype=ncs_dtype, mode='r', offset=HEADER_SIZE)
+
+        gap_indexes = None
+        if self.use_cache:
+            gap_indexes = self._cache.get('gap_indexes')
+
+        # detect gaps on first file
+        if gap_indexes is None:
+            # this can be long!!!!
+            timestamps0 = data0['timestamp']
+            deltas0 = np.diff(timestamps0)
+
+            # It should be that:
+            # gap_indexes, = np.nonzero(deltas0!=good_delta)
+            # but for a file I have found many deltas0==15999 deltas0==16000
+            # I guess this is a round problem
+            # So this is the same with a tolerance of 1 or 2 ticks
+            mask = deltas0 != good_delta
+            for tolerance in (1, 2):
+                mask &= (deltas0 != good_delta - tolerance)
+                mask &= (deltas0 != good_delta + tolerance)
+            gap_indexes, = np.nonzero(mask)
+
+            if self.use_cache:
+                self.add_in_cache(gap_indexes=gap_indexes)
+
+        gap_bounds = [0] + (gap_indexes + 1).tolist() + [data0.size]
+        self._nb_segment = len(gap_bounds) - 1
+
+        self._sigs_memmap = [{} for seg_index in range(self._nb_segment)]
+        self._sigs_t_start = []
+        self._sigs_t_stop = []
+        self._sigs_length = []
+        self._timestamp_limits = []
+        # create segment with subdata block/t_start/t_stop/length
+        for chan_id, ncs_filename in self.ncs_filenames.items():
+            data = np.memmap(ncs_filename, dtype=ncs_dtype, mode='r', offset=HEADER_SIZE)
+            assert data.size == data0.size, 'ncs files do not have the same data length'
+
+            for seg_index in range(self._nb_segment):
+                i0 = gap_bounds[seg_index]
+                i1 = gap_bounds[seg_index + 1]
+
+                assert data[i0]['timestamp'] == data0[i0][
+                    'timestamp'], 'ncs files do not have the same gaps'
+                assert data[i1 - 1]['timestamp'] == data0[i1 - 1][
+                    'timestamp'], 'ncs files do not have the same gaps'
+
+                subdata = data[i0:i1]
+                self._sigs_memmap[seg_index][chan_id] = subdata
+
+                if chan_id == chan_id0:
+                    ts0 = subdata[0]['timestamp']
+                    ts1 = subdata[-1]['timestamp'] + \
+                          np.uint64(BLOCK_SIZE / self._sigs_sampling_rate * 1e6)
+                    self._timestamp_limits.append((ts0, ts1))
+                    t_start = ts0 / 1e6
+                    self._sigs_t_start.append(t_start)
+                    t_stop = ts1 / 1e6
+                    self._sigs_t_stop.append(t_stop)
+                    length = subdata.size * BLOCK_SIZE
+                    self._sigs_length.append(length)
+
+
+# Keys funcitons
+def _to_bool(txt):
+    if txt == 'True':
+        return True
+    elif txt == 'False':
+        return False
+    else:
+        raise Exception('Can not convert %s to bool' % (txt))
+
+
+# keys in
+txt_header_keys = [
+    ('AcqEntName', 'channel_names', None),  # used
+    ('FileType', '', None),
+    ('FileVersion', '', None),
+    ('RecordSize', '', None),
+    ('HardwareSubSystemName', '', None),
+    ('HardwareSubSystemType', '', None),
+    ('SamplingFrequency', 'sampling_rate', float),  # used
+    ('ADMaxValue', '', None),
+    ('ADBitVolts', 'bit_to_microVolt', None),  # used
+    ('NumADChannels', '', None),
+    ('ADChannel', 'channel_ids', None),  # used
+    ('InputRange', '', None),
+    ('InputInverted', 'input_inverted', _to_bool),  # used
+    ('DSPLowCutFilterEnabled', '', None),
+    ('DspLowCutFrequency', '', None),
+    ('DspLowCutNumTaps', '', None),
+    ('DspLowCutFilterType', '', None),
+    ('DSPHighCutFilterEnabled', '', None),
+    ('DspHighCutFrequency', '', None),
+    ('DspHighCutNumTaps', '', None),
+    ('DspHighCutFilterType', '', None),
+    ('DspDelayCompensation', '', None),
+    ('DspFilterDelay_µs', '', None),
+    ('DisabledSubChannels', '', None),
+    ('WaveformLength', '', int),
+    ('AlignmentPt', '', None),
+    ('ThreshVal', '', None),
+    ('MinRetriggerSamples', '', None),
+    ('SpikeRetriggerTime', '', None),
+    ('DualThresholding', '', None),
+    (r'Feature \w+ \d+', '', None),
+    ('SessionUUID', '', None),
+    ('FileUUID', '', None),
+    ('CheetahRev', 'version', None),  # used  possibilty 1 for version
+    ('ProbeName', '', None),
+    ('OriginalFileName', '', None),
+    ('TimeCreated', '', None),
+    ('TimeClosed', '', None),
+    ('ApplicationName Cheetah', 'version', None),  # used  possibilty 2 for version
+    ('AcquisitionSystem', '', None),
+    ('ReferenceChannel', '', None),
+]
+
+
+def read_txt_header(filename):
+    """
+    All file in neuralynx contains a 16kB hedaer in txt
+    format.
+    This function parse it to create info dict.
+    This include datetime
+    """
+    with open(filename, 'rb') as f:
+        txt_header = f.read(HEADER_SIZE)
+    txt_header = txt_header.strip(b'\x00').decode('latin-1')
+
+    # find keys
+    info = OrderedDict()
+    for k1, k2, type_ in txt_header_keys:
+        pattern = r'-(?P<name>' + k1 + r') (?P<value>[\S ]*)'
+        matches = re.findall(pattern, txt_header)
+        for match in matches:
+            if k2 == '':
+                name = match[0]
+            else:
+                name = k2
+            value = match[1].rstrip(' ')
+            if type_ is not None:
+                value = type_(value)
+            info[name] = value
+
+    # if channel_ids or s not in info then the filename is used
+    name = os.path.splitext(os.path.basename(filename))[0]
+
+    # convert channel ids
+    if 'channel_ids' in info:
+        chid_entries = re.findall(r'\w+', info['channel_ids'])
+        info['channel_ids'] = [int(c) for c in chid_entries]
+    else:
+        info['channel_ids'] = [name]
+
+    # convert channel names
+    if 'channel_names' in info:
+        name_entries = re.findall(r'\w+', info['channel_names'])
+        if len(name_entries) == 1:
+            info['channel_names'] = name_entries * len(info['channel_ids'])
+        assert len(info['channel_names']) == len(info['channel_ids']), \
+            'Number of channel ids does not match channel names.'
+    else:
+        info['channel_names'] = [name] * len(info['channel_ids'])
+    if 'version' in info:
+        version = info['version'].replace('"', '')
+        info['version'] = distutils.version.LooseVersion(version)
+
+    # convert bit_to_microvolt
+    if 'bit_to_microVolt' in info:
+        btm_entries = re.findall(r'\S+', info['bit_to_microVolt'])
+        if len(btm_entries) == 1:
+            btm_entries = btm_entries * len(info['channel_ids'])
+        info['bit_to_microVolt'] = [float(e) * 1e6 for e in btm_entries]
+        assert len(info['bit_to_microVolt']) == len(info['channel_ids']), \
+            'Number of channel ids does not match bit_to_microVolt conversion factors.'
+
+    if 'InputRange' in info:
+        ir_entries = re.findall(r'\w+', info['InputRange'])
+        if len(ir_entries) == 1:
+            info['InputRange'] = [int(ir_entries[0])] * len(chid_entries)
+        else:
+            info['InputRange'] = [int(e) for e in ir_entries]
+        assert len(info['InputRange']) == len(chid_entries), \
+            'Number of channel ids does not match input range values.'
+
+    # filename and datetime
+    if info['version'] <= distutils.version.LooseVersion('5.6.4'):
+        datetime1_regex = r'## Time Opened \(m/d/y\): (?P<date>\S+)  \(h:m:s\.ms\) (?P<time>\S+)'
+        datetime2_regex = r'## Time Closed \(m/d/y\): (?P<date>\S+)  \(h:m:s\.ms\) (?P<time>\S+)'
+        filename_regex = r'## File Name (?P<filename>\S+)'
+        datetimeformat = '%m/%d/%Y %H:%M:%S.%f'
+    else:
+        datetime1_regex = r'-TimeCreated (?P<date>\S+) (?P<time>\S+)'
+        datetime2_regex = r'-TimeClosed (?P<date>\S+) (?P<time>\S+)'
+        filename_regex = r'-OriginalFileName "?(?P<filename>\S+)"?'
+        datetimeformat = '%Y/%m/%d %H:%M:%S'
+
+    original_filename = re.search(filename_regex, txt_header).groupdict()['filename']
+
+    dt1 = re.search(datetime1_regex, txt_header).groupdict()
+    dt2 = re.search(datetime2_regex, txt_header).groupdict()
+
+    info['recording_opened'] = datetime.datetime.strptime(
+        dt1['date'] + ' ' + dt1['time'], datetimeformat)
+    info['recording_closed'] = datetime.datetime.strptime(
+        dt2['date'] + ' ' + dt2['time'], datetimeformat)
+
+    return info
+
+
+ncs_dtype = [('timestamp', 'uint64'), ('channel_id', 'uint32'), ('sample_rate', 'uint32'),
+             ('nb_valid', 'uint32'), ('samples', 'int16', (BLOCK_SIZE,))]
+
+nev_dtype = [
+    ('reserved', '<i2'),
+    ('system_id', '<i2'),
+    ('data_size', '<i2'),
+    ('timestamp', '<u8'),
+    ('event_id', '<i2'),
+    ('ttl_input', '<i2'),
+    ('crc_check', '<i2'),
+    ('dummy1', '<i2'),
+    ('dummy2', '<i2'),
+    ('extra', '<i4', (8,)),
+    ('event_string', 'S128'),
+]
+
+
+def get_nse_or_ntt_dtype(info, ext):
+    """
+    For NSE and NTT the dtype depend on the header.
+
+    """
+    dtype = [('timestamp', 'uint64'), ('channel_id', 'uint32'), ('unit_id', 'uint32')]
+
+    # count feature
+    nb_feature = 0
+    for k in info.keys():
+        if k.startswith('Feature '):
+            nb_feature += 1
+    dtype += [('features', 'int32', (nb_feature,))]
+
+    # count sample
+    if ext == 'nse':
+        nb_sample = info['WaveformLength']
+        dtype += [('samples', 'int16', (nb_sample,))]
+    elif ext == 'ntt':
+        nb_sample = info['WaveformLength']
+        nb_chan = 4  # check this if not tetrode
+        dtype += [('samples', 'int16', (nb_sample, nb_chan))]
+
+    return dtype

+ 324 - 0
code/python-neo/neo/rawio/neuroexplorerrawio.py

@@ -0,0 +1,324 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading data from NeuroExplorer (.nex)
+
+Note:
+  * NeuroExplorer have introduced a new .nex5 file format
+    with 64 timestamps. This is NOT implemented here.
+    If someone have some file in that new format we could also
+    integrate it in neo
+  * NeuroExplorer now provide there own python class for
+    reading/writting nex and nex5. This could be usefull
+    for testing this class.
+
+Porting NeuroExplorerIO to NeuroExplorerRawIO have some
+limitation because in neuro explorer signals can differents sampling
+rate and shape. So NeuroExplorerRawIO can read only one channel
+at once.
+
+Documentation for dev :
+http://www.neuroexplorer.com/downloadspage/
+
+
+Author: Samuel Garcia, luc estebanez, mark hollenbeck
+
+"""
+from __future__ import print_function, division, absolute_import
+# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+from collections import OrderedDict
+import datetime
+
+
+class NeuroExplorerRawIO(BaseRawIO):
+    extensions = ['nex']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+    def _source_name(self):
+        return self.filename
+
+    def _parse_header(self):
+        with open(self.filename, 'rb') as fid:
+            self.global_header = read_as_dict(fid, GlobalHeader, offset=0)
+            offset = 544
+            self._entity_headers = []
+            for i in range(self.global_header['nvar']):
+                self._entity_headers.append(read_as_dict(
+                    fid, EntityHeader, offset=offset + i * 208))
+
+        self._memmap = np.memmap(self.filename, dtype='u1', mode='r')
+
+        self._sig_lengths = []
+        self._sig_t_starts = []
+        sig_channels = []
+        unit_channels = []
+        event_channels = []
+        for i in range(self.global_header['nvar']):
+            entity_header = self._entity_headers[i]
+            name = entity_header['name']
+            _id = i
+            if entity_header['type'] == 0:  # Unit
+                unit_channels.append((name, _id, '', 0, 0, 0, 0))
+
+            elif entity_header['type'] == 1:  # Event
+                event_channels.append((name, _id, 'event'))
+
+            elif entity_header['type'] == 2:  # interval = Epoch
+                event_channels.append((name, _id, 'epoch'))
+
+            elif entity_header['type'] == 3:  # spiketrain and wavefoms
+                wf_units = 'mV'
+                wf_gain = entity_header['ADtoMV']
+                wf_offset = entity_header['MVOffset']
+                wf_left_sweep = 0
+                wf_sampling_rate = entity_header['WFrequency']
+                unit_channels.append((name, _id, wf_units, wf_gain, wf_offset,
+                                      wf_left_sweep, wf_sampling_rate))
+
+            elif entity_header['type'] == 4:
+                # popvectors
+                pass
+
+            if entity_header['type'] == 5:  # Signals
+                units = 'mV'
+                sampling_rate = entity_header['WFrequency']
+                dtype = 'int16'
+                gain = entity_header['ADtoMV']
+                offset = entity_header['MVOffset']
+                group_id = 0
+                sig_channels.append((name, _id, sampling_rate, dtype, units,
+                                     gain, offset, group_id))
+                self._sig_lengths.append(entity_header['NPointsWave'])
+                # sig t_start is the first timestamp if datablock
+                offset = entity_header['offset']
+                timestamps0 = self._memmap[offset:offset + 4].view('int32')
+                t_start = timestamps0[0] / self.global_header['freq']
+                self._sig_t_starts.append(t_start)
+
+            elif entity_header['type'] == 6:  # Markers
+                event_channels.append((name, _id, 'event'))
+
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # each signal channel have a dierent groups that force reading
+        # them one by one
+        sig_channels['group_id'] = np.arange(sig_channels.size)
+
+        # fill into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [1]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # Annotations
+        self._generate_minimal_annotations()
+        bl_annotations = self.raw_annotations['blocks'][0]
+        seg_annotations = bl_annotations['segments'][0]
+        for d in (bl_annotations, seg_annotations):
+            d['neuroexplorer_version'] = self.global_header['version']
+            d['comment'] = self.global_header['comment']
+
+    def _segment_t_start(self, block_index, seg_index):
+        t_start = self.global_header['tbeg'] / self.global_header['freq']
+        return t_start
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = self.global_header['tend'] / self.global_header['freq']
+        return t_stop
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        assert len(channel_indexes) == 1, 'only one channel by one channel'
+        return self._sig_lengths[channel_indexes[0]]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        assert len(channel_indexes) == 1, 'only one channel by one channel'
+        return self._sig_t_starts[channel_indexes[0]]
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        assert len(channel_indexes) == 1, 'only one channel by one channel'
+        channel_index = channel_indexes[0]
+        entity_index = int(self.header['signal_channels'][channel_index]['id'])
+        entity_header = self._entity_headers[entity_index]
+        n = entity_header['n']
+        nb_sample = entity_header['NPointsWave']
+        # offset = entity_header['offset']
+        # timestamps = self._memmap[offset:offset+n*4].view('int32')
+        # offset2 = entity_header['offset'] + n*4
+        # fragment_starts = self._memmap[offset2:offset2+n*4].view('int32')
+        offset3 = entity_header['offset'] + n * 4 + n * 4
+        raw_signal = self._memmap[offset3:offset3 + nb_sample * 2].view('int16')
+        raw_signal = raw_signal[slice(i_start, i_stop), None]  # 2D for compliance
+        return raw_signal
+
+    def _spike_count(self, block_index, seg_index, unit_index):
+        entity_index = int(self.header['unit_channels'][unit_index]['id'])
+        entity_header = self._entity_headers[entity_index]
+        nb_spike = entity_header['n']
+        return nb_spike
+
+    def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
+        entity_index = int(self.header['unit_channels'][unit_index]['id'])
+        entity_header = self._entity_headers[entity_index]
+        n = entity_header['n']
+        offset = entity_header['offset']
+        timestamps = self._memmap[offset:offset + n * 4].view('int32')
+
+        if t_start is not None:
+            keep = timestamps >= int(t_start * self.global_header['freq'])
+            timestamps = timestamps[keep]
+        if t_stop is not None:
+            keep = timestamps <= int(t_stop * self.global_header['freq'])
+            timestamps = timestamps[keep]
+
+        return timestamps
+
+    def _rescale_spike_timestamp(self, spike_timestamps, dtype):
+        spike_times = spike_timestamps.astype(dtype)
+        spike_times /= self.global_header['freq']
+        return spike_times
+
+    def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
+        entity_index = int(self.header['unit_channels'][unit_index]['id'])
+        entity_header = self._entity_headers[entity_index]
+        if entity_header['type'] == 0:
+            return None
+        assert entity_header['type'] == 3
+
+        n = entity_header['n']
+        width = entity_header['NPointsWave']
+        offset = entity_header['offset'] + n * 2
+        waveforms = self._memmap[offset:offset + n * 2 * width].view('int16')
+        waveforms = waveforms.reshape(n, 1, width)
+
+        return waveforms
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        entity_index = int(self.header['event_channels'][event_channel_index]['id'])
+        entity_header = self._entity_headers[entity_index]
+        nb_event = entity_header['n']
+        return nb_event
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        entity_index = int(self.header['event_channels'][event_channel_index]['id'])
+        entity_header = self._entity_headers[entity_index]
+
+        n = entity_header['n']
+        offset = entity_header['offset']
+        timestamps = self._memmap[offset:offset + n * 4].view('int32')
+
+        if t_start is None:
+            i_start = None
+        else:
+            i_start = np.searchsorted(timestamps, int(t_start * self.global_header['freq']))
+        if t_stop is None:
+            i_stop = None
+        else:
+            i_stop = np.searchsorted(timestamps, int(t_stop * self.global_header['freq']))
+        keep = slice(i_start, i_stop)
+
+        timestamps = timestamps[keep]
+
+        if entity_header['type'] == 1:  # Event
+            durations = None
+            labels = np.array([''] * timestamps.size, dtype='U')
+        elif entity_header['type'] == 2:  # Epoch
+            offset2 = offset + n * 4
+            stop_timestamps = self._memmap[offset2:offset2 + n * 4].view('int32')
+            durations = stop_timestamps[keep] - timestamps
+            labels = np.array([''] * timestamps.size, dtype='U')
+        elif entity_header['type'] == 6:  # Marker
+            durations = None
+            offset2 = offset + n * 4 + 64
+            s = entity_header['MarkerLength']
+            labels = self._memmap[offset2:offset2 + s * n].view('S' + str(s))
+            labels = labels[keep].astype('U')
+
+        return timestamps, durations, labels
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        event_times = event_timestamps.astype(dtype)
+        event_times /= self.global_header['freq']
+        return event_times
+
+    def _rescale_epoch_duration(self, raw_duration, dtype):
+        durations = raw_duration.astype(dtype)
+        durations /= self.global_header['freq']
+        return durations
+
+
+def read_as_dict(fid, dtype, offset=None):
+    """
+    Given a file descriptor
+    and a numpy.dtype of the binary struct return a dict.
+    Make conversion for strings.
+    """
+    if offset is not None:
+        fid.seek(offset)
+    dt = np.dtype(dtype)
+    h = np.frombuffer(fid.read(dt.itemsize), dt)[0]
+    info = OrderedDict()
+    for k in dt.names:
+        v = h[k]
+
+        if dt[k].kind == 'S':
+            v = v.replace(b'\x00', b'')
+            v = v.decode('utf8')
+
+        info[k] = v
+    return info
+
+
+GlobalHeader = [
+    ('signature', 'S4'),
+    ('version', 'int32'),
+    ('comment', 'S256'),
+    ('freq', 'float64'),
+    ('tbeg', 'int32'),
+    ('tend', 'int32'),
+    ('nvar', 'int32'),
+]
+
+EntityHeader = [
+    ('type', 'int32'),
+    ('varVersion', 'int32'),
+    ('name', 'S64'),
+    ('offset', 'int32'),
+    ('n', 'int32'),
+    ('WireNumber', 'int32'),
+    ('UnitNumber', 'int32'),
+    ('Gain', 'int32'),
+    ('Filter', 'int32'),
+    ('XPos', 'float64'),
+    ('YPos', 'float64'),
+    ('WFrequency', 'float64'),
+    ('ADtoMV', 'float64'),
+    ('NPointsWave', 'int32'),
+    ('NMarkers', 'int32'),
+    ('MarkerLength', 'int32'),
+    ('MVOffset', 'float64'),
+    ('dummy', 'S60'),
+]
+
+MarkerHeader = [
+    ('type', 'int32'),
+    ('varVersion', 'int32'),
+    ('name', 'S64'),
+    ('offset', 'int32'),
+    ('n', 'int32'),
+    ('WireNumber', 'int32'),
+    ('UnitNumber', 'int32'),
+    ('Gain', 'int32'),
+    ('Filter', 'int32'),
+]

+ 119 - 0
code/python-neo/neo/rawio/neuroscoperawio.py

@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+"""
+Reading from neuroscope format files.
+Ref: http://neuroscope.sourceforge.net/
+
+It is an old format from Buzsaki's lab
+
+Some old open datasets from spike sorting
+are still using this format.
+
+This only the signals.
+This should be done (but maybe never will):
+  * SpikeTrain file   '.clu'  '.res'
+  * Event  '.ext.evt'  or '.evt.ext'
+
+Author: Samuel Garcia
+
+"""
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+from xml.etree import ElementTree
+
+
+class NeuroScopeRawIO(BaseRawIO):
+    extensions = ['xml', 'dat']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+    def _source_name(self):
+        return self.filename.replace('.xml', '').replace('.dat', '')
+
+    def _parse_header(self):
+        filename = self.filename.replace('.xml', '').replace('.dat', '')
+
+        tree = ElementTree.parse(filename + '.xml')
+        root = tree.getroot()
+        acq = root.find('acquisitionSystem')
+        nbits = int(acq.find('nBits').text)
+        nb_channel = int(acq.find('nChannels').text)
+        self._sampling_rate = float(acq.find('samplingRate').text)
+        voltage_range = float(acq.find('voltageRange').text)
+        # offset = int(acq.find('offset').text)
+        amplification = float(acq.find('amplification').text)
+
+        # find groups for channels
+        channel_group = {}
+        for grp_index, xml_chx in enumerate(
+                root.find('anatomicalDescription').find('channelGroups').findall('group')):
+            for xml_rc in xml_chx:
+                channel_group[int(xml_rc.text)] = grp_index
+
+        if nbits == 16:
+            sig_dtype = 'int16'
+            gain = voltage_range / (2 ** 16) / amplification / 1000.
+            # ~ elif nbits==32:
+            # Not sure if it is int or float
+            # ~ dt = 'int32'
+            # ~ gain  = voltage_range/2**32/amplification
+        else:
+            raise (NotImplementedError)
+
+        self._raw_signals = np.memmap(filename + '.dat', dtype=sig_dtype,
+                                      mode='r', offset=0).reshape(-1, nb_channel)
+
+        # signals
+        sig_channels = []
+        for c in range(nb_channel):
+            name = 'ch{}grp{}'.format(c, channel_group[c])
+            chan_id = c
+            units = 'mV'
+            offset = 0.
+            group_id = 0
+            sig_channels.append((name, chan_id, self._sampling_rate,
+                                 sig_dtype, units, gain, offset, group_id))
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        # No events
+        event_channels = []
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # No spikes
+        unit_channels = []
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [1]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        self._generate_minimal_annotations()
+
+    def _segment_t_start(self, block_index, seg_index):
+        return 0.
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = self._raw_signals.shape[0] / self._sampling_rate
+        return t_stop
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        return self._raw_signals.shape[0]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return 0.
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if channel_indexes is None:
+            channel_indexes = slice(None)
+        raw_signals = self._raw_signals[slice(i_start, i_stop), channel_indexes]
+        return raw_signals

+ 326 - 0
code/python-neo/neo/rawio/nixrawio.py

@@ -0,0 +1,326 @@
+"""
+RawIO Class for NIX files
+
+The RawIO assumes all segments and all blocks have the same structure.
+It supports all kinds of NEO objects.
+
+Author: Chek Yin Choi
+"""
+
+from __future__ import print_function, division, absolute_import
+from neo.rawio.baserawio import (BaseRawIO, _signal_channel_dtype,
+                                 _unit_channel_dtype, _event_channel_dtype)
+import numpy as np
+try:
+    import nixio as nix
+
+    HAVE_NIX = True
+except ImportError:
+    HAVE_NIX = False
+    nix = None
+
+
+class NIXRawIO(BaseRawIO):
+
+    extensions = ['nix']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+    def _source_name(self):
+        return self.filename
+
+    def _parse_header(self):
+
+        self.file = nix.File.open(self.filename, nix.FileMode.ReadOnly)
+        sig_channels = []
+        size_list = []
+        for bl in self.file.blocks:
+            for seg in bl.groups:
+                for da_idx, da in enumerate(seg.data_arrays):
+                    if da.type == "neo.analogsignal":
+                        chan_id = da_idx
+                        ch_name = da.metadata['neo_name']
+                        units = str(da.unit)
+                        dtype = str(da.dtype)
+                        sr = 1 / da.dimensions[0].sampling_interval
+                        da_leng = da.size
+                        if da_leng not in size_list:
+                            size_list.append(da_leng)
+                        group_id = 0
+                        for sid, li_leng in enumerate(size_list):
+                            if li_leng == da_leng:
+                                group_id = sid
+                                # very important! group_id use to store channel groups!!!
+                                # use only for different signal length
+                        gain = 1
+                        offset = 0.
+                        sig_channels.append((ch_name, chan_id, sr, dtype,
+                                            units, gain, offset, group_id))
+                break
+            break
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        unit_channels = []
+        unit_name = ""
+        unit_id = ""
+        for bl in self.file.blocks:
+            for seg in bl.groups:
+                for mt in seg.multi_tags:
+                    if mt.type == "neo.spiketrain":
+                        unit_name = mt.metadata['neo_name']
+                        unit_id = mt.id
+                        if mt.features:
+                            wf_units = mt.features[0].data.unit
+                            wf_sampling_rate = 1 / mt.features[0].data.dimensions[
+                                2].sampling_interval
+                        else:
+                            wf_units = None
+                            wf_sampling_rate = 0
+                        wf_gain = 1
+                        wf_offset = 0.
+                        if mt.features and "left_sweep" in mt.features[0].data.metadata:
+                            wf_left_sweep = mt.features[0].data.metadata["left_sweep"]
+                        else:
+                            wf_left_sweep = 0
+                        unit_channels.append((unit_name, unit_id, wf_units, wf_gain,
+                                              wf_offset, wf_left_sweep, wf_sampling_rate))
+                break
+            break
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        event_channels = []
+        event_count = 0
+        epoch_count = 0
+        for bl in self.file.blocks:
+            for seg in bl.groups:
+                for mt in seg.multi_tags:
+                    if mt.type == "neo.event":
+                        ev_name = mt.metadata['neo_name']
+                        ev_id = event_count
+                        event_count += 1
+                        ev_type = "event"
+                        event_channels.append((ev_name, ev_id, ev_type))
+                    if mt.type == "neo.epoch":
+                        ep_name = mt.metadata['neo_name']
+                        ep_id = epoch_count
+                        epoch_count += 1
+                        ep_type = "epoch"
+                        event_channels.append((ep_name, ep_id, ep_type))
+                break
+            break
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        self.da_list = {'blocks': []}
+        for block_index, blk in enumerate(self.file.blocks):
+            d = {'segments': []}
+            self.da_list['blocks'].append(d)
+            for seg_index, seg in enumerate(blk.groups):
+                d = {'signals': []}
+                self.da_list['blocks'][block_index]['segments'].append(d)
+                size_list = []
+                data_list = []
+                da_name_list = []
+                for da in seg.data_arrays:
+                    if da.type == 'neo.analogsignal':
+                        size_list.append(da.size)
+                        data_list.append(da)
+                        da_name_list.append(da.metadata['neo_name'])
+                self.da_list['blocks'][block_index]['segments'][seg_index]['data_size'] = size_list
+                self.da_list['blocks'][block_index]['segments'][seg_index]['data'] = data_list
+                self.da_list['blocks'][block_index]['segments'][seg_index]['ch_name'] = \
+                    da_name_list
+
+        self.unit_list = {'blocks': []}
+        for block_index, blk in enumerate(self.file.blocks):
+            d = {'segments': []}
+            self.unit_list['blocks'].append(d)
+            for seg_index, seg in enumerate(blk.groups):
+                d = {'spiketrains': [], 'spiketrains_id': [], 'spiketrains_unit': []}
+                self.unit_list['blocks'][block_index]['segments'].append(d)
+                st_idx = 0
+                for st in seg.multi_tags:
+                    d = {'waveforms': []}
+                    self.unit_list[
+                        'blocks'][block_index]['segments'][seg_index]['spiketrains_unit'].append(d)
+                    if st.type == 'neo.spiketrain':
+                        seg = self.unit_list['blocks'][block_index]['segments'][seg_index]
+                        seg['spiketrains'].append(st.positions)
+                        seg['spiketrains_id'].append(st.id)
+                        if st.features and st.features[0].data.type == "neo.waveforms":
+                            waveforms = st.features[0].data
+                            if waveforms:
+                                seg['spiketrains_unit'][st_idx]['waveforms'] = waveforms
+                            else:
+                                seg['spiketrains_unit'][st_idx]['waveforms'] = None
+                            # assume one spiketrain one waveform
+                            st_idx += 1
+
+        self.header = {}
+        self.header['nb_block'] = len(self.file.blocks)
+        self.header['nb_segment'] = [len(bl.groups) for bl in self.file.blocks]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        self._generate_minimal_annotations()
+
+    def _segment_t_start(self, block_index, seg_index):
+        t_start = 0
+        for mt in self.file.blocks[block_index].groups[seg_index].multi_tags:
+            if mt.type == "neo.spiketrain":
+                t_start = mt.metadata['t_start']
+        return t_start
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = 0
+        for mt in self.file.blocks[block_index].groups[seg_index].multi_tags:
+            if mt.type == "neo.spiketrain":
+                t_stop = mt.metadata['t_stop']
+        return t_stop
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        if channel_indexes is None:
+            channel_indexes = list(range(self.header['signal_channels'].size))
+        ch_idx = channel_indexes[0]
+        size = self.da_list['blocks'][block_index]['segments'][seg_index]['data_size'][ch_idx]
+        return size  # size is per signal, not the sum of all channel_indexes
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        if channel_indexes is None:
+            channel_indexes = list(range(self.header['signal_channels'].size))
+        ch_idx = channel_indexes[0]
+        da = [da for da in self.file.blocks[block_index].groups[seg_index].data_arrays][ch_idx]
+        sig_t_start = float(da.metadata['t_start'])
+        return sig_t_start  # assume same group_id always same t_start
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+
+        if channel_indexes is None:
+            channel_indexes = list(range(self.header['signal_channels'].size))
+        if i_start is None:
+            i_start = 0
+        if i_stop is None:
+            for c in channel_indexes:
+                i_stop = self.da_list['blocks'][block_index]['segments'][seg_index]['data_size'][c]
+                break
+
+        raw_signals_list = []
+        da_list = self.da_list['blocks'][block_index]['segments'][seg_index]
+        for idx in channel_indexes:
+            da = da_list['data'][idx]
+            raw_signals_list.append(da[i_start:i_stop])
+
+        raw_signals = np.array(raw_signals_list)
+        raw_signals = np.transpose(raw_signals)
+        return raw_signals
+
+    def _spike_count(self, block_index, seg_index, unit_index):
+        count = 0
+        head_id = self.header['unit_channels'][unit_index][1]
+        for mt in self.file.blocks[block_index].groups[seg_index].multi_tags:
+            for src in mt.sources:
+                if mt.type == 'neo.spiketrain' and [src.type == "neo.unit"]:
+                    if head_id == src.id:
+                        return len(mt.positions)
+        return count
+
+    def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
+        spike_dict = self.unit_list['blocks'][block_index]['segments'][seg_index]['spiketrains']
+        spike_timestamps = spike_dict[unit_index]
+        spike_timestamps = np.transpose(spike_timestamps)
+
+        if t_start is not None or t_stop is not None:
+            lim0 = t_start
+            lim1 = t_stop
+            mask = (spike_timestamps >= lim0) & (spike_timestamps <= lim1)
+            spike_timestamps = spike_timestamps[mask]
+        return spike_timestamps
+
+    def _rescale_spike_timestamp(self, spike_timestamps, dtype):
+        spike_times = spike_timestamps.astype(dtype)
+        return spike_times
+
+    def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
+        # this must return a 3D numpy array (nb_spike, nb_channel, nb_sample)
+        seg = self.unit_list['blocks'][block_index]['segments'][seg_index]
+        waveforms = seg['spiketrains_unit'][unit_index]['waveforms']
+        if not waveforms:
+            return None
+        raw_waveforms = np.array(waveforms)
+
+        if t_start is not None:
+            lim0 = t_start
+            mask = (raw_waveforms >= lim0)
+            raw_waveforms = np.where(mask, raw_waveforms, np.nan)  # use nan to keep the shape
+        if t_stop is not None:
+            lim1 = t_stop
+            mask = (raw_waveforms <= lim1)
+            raw_waveforms = np.where(mask, raw_waveforms, np.nan)
+        return raw_waveforms
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        event_count = 0
+        for event in self.file.blocks[block_index].groups[seg_index].multi_tags:
+            if event.type == 'neo.event' or event.type == 'neo.epoch':
+                if event_count == event_channel_index:
+                    return len(event.positions)
+                else:
+                    event_count += 1
+        return event_count
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        timestamp = []
+        labels = []
+        durations = None
+        if event_channel_index is None:
+            raise IndexError
+        for mt in self.file.blocks[block_index].groups[seg_index].multi_tags:
+            if mt.type == "neo.event" or mt.type == "neo.epoch":
+                labels.append(mt.positions.dimensions[0].labels)
+                po = mt.positions
+                if po.type == "neo.event.times" or po.type == "neo.epoch.times":
+                    timestamp.append(po)
+                if self.header['event_channels'][event_channel_index]['type'] == b'epoch' \
+                        and mt.extents:
+                    if mt.extents.type == 'neo.epoch.durations':
+                        durations = np.array(mt.extents)
+                        break
+        timestamp = timestamp[event_channel_index][:]
+        timestamp = np.array(timestamp, dtype="float")
+        labels = labels[event_channel_index][:]
+        labels = np.array(labels, dtype='U')
+        if t_start is not None:
+            keep = timestamp >= t_start
+            timestamp, labels = timestamp[keep], labels[keep]
+
+        if t_stop is not None:
+            keep = timestamp <= t_stop
+            timestamp, labels = timestamp[keep], labels[keep]
+        return timestamp, durations, labels  # only the first fits in rescale
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype='float64'):
+        ev_unit = ''
+        for mt in self.file.blocks[0].groups[0].multi_tags:
+            if mt.type == "neo.event":
+                ev_unit = mt.positions.unit
+                break
+        if ev_unit == 'ms':
+            event_timestamps /= 1000
+        event_times = event_timestamps.astype(dtype)
+        # supposing unit is second, other possibilities maybe mS microS...
+        return event_times  # return in seconds
+
+    def _rescale_epoch_duration(self, raw_duration, dtype='float64'):
+        ep_unit = ''
+        for mt in self.file.blocks[0].groups[0].multi_tags:
+            if mt.type == "neo.epoch":
+                ep_unit = mt.positions.unit
+                break
+        if ep_unit == 'ms':
+            raw_duration /= 1000
+        durations = raw_duration.astype(dtype)
+        # supposing unit is second, other possibilities maybe mS microS...
+        return durations  # return in seconds

+ 523 - 0
code/python-neo/neo/rawio/openephysrawio.py

@@ -0,0 +1,523 @@
+# -*- coding: utf-8 -*-
+"""
+This module implement OpenEphys format.
+
+Author: Samuel Garcia
+"""
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import os
+
+import numpy as np
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+
+RECORD_SIZE = 1024
+HEADER_SIZE = 1024
+
+
+class OpenEphysRawIO(BaseRawIO):
+    """
+    OpenEphys GUI software offers several data formats, see
+    https://open-ephys.atlassian.net/wiki/spaces/OEW/pages/491632/Data+format
+
+    This class implements the legacy OpenEphys format here
+    https://open-ephys.atlassian.net/wiki/spaces/OEW/pages/65667092/Open+Ephys+format
+
+    The OpenEphys group already proposes some tools here:
+    https://github.com/open-ephys/analysis-tools/blob/master/OpenEphys.py
+    but (i) there is no package at PyPI and (ii) those tools read everything in memory.
+
+    The format is directory based with several files:
+        * .continuous
+        * .events
+        * .spikes
+
+    This implementation is based on:
+      * this code https://github.com/open-ephys/analysis-tools/blob/master/Python3/OpenEphys.py
+        written by Dan Denman and Josh Siegle
+      * a previous PR by Cristian Tatarau at Charité Berlin
+
+    In contrast to previous code for reading this format, here all data use memmap so it should
+    be super fast and light compared to legacy code.
+
+    When the acquisition is stopped and restarted then files are named *_2, *_3.
+    In that case this class creates a new Segment. Note that timestamps are reset in this
+    situation.
+
+    Limitation :
+      * Works only if all continuous channels have the same sampling rate, which is a reasonable
+        hypothesis.
+      * When the recording is stopped and restarted all continuous files will contain gaps.
+        Ideally this would lead to a new Segment but this use case is not implemented due to its
+        complexity.
+        Instead it will raise an error.
+
+    Special cases:
+      * Normaly all continuous files have the same first timestamp and length. In situations
+        where it is not the case all files are clipped to the smallest one so that they are all
+        aligned,
+        and a warning is emitted.
+    """
+    extensions = []
+    rawmode = 'one-dir'
+
+    def __init__(self, dirname=''):
+        BaseRawIO.__init__(self)
+        self.dirname = dirname
+
+    def _source_name(self):
+        return self.dirname
+
+    def _parse_header(self):
+        info = self._info = explore_folder(self.dirname)
+        nb_segment = info['nb_segment']
+
+        # scan for continuous files
+        self._sigs_memmap = {}
+        self._sig_length = {}
+        self._sig_timestamp0 = {}
+        sig_channels = []
+        for seg_index in range(nb_segment):
+            self._sigs_memmap[seg_index] = {}
+
+            all_sigs_length = []
+            all_first_timestamps = []
+            all_last_timestamps = []
+            all_samplerate = []
+            for continuous_filename in info['continuous'][seg_index]:
+                fullname = os.path.join(self.dirname, continuous_filename)
+                chan_info = read_file_header(fullname)
+
+                s = continuous_filename.replace('.continuous', '').split('_')
+                processor_id, ch_name = s[0], s[1]
+                chan_id = int(ch_name.replace('CH', ''))
+
+                filesize = os.stat(fullname).st_size
+                size = (filesize - HEADER_SIZE) // np.dtype(continuous_dtype).itemsize
+                data_chan = np.memmap(fullname, mode='r', offset=HEADER_SIZE,
+                                        dtype=continuous_dtype, shape=(size, ))
+                self._sigs_memmap[seg_index][chan_id] = data_chan
+
+                all_sigs_length.append(data_chan.size * RECORD_SIZE)
+                all_first_timestamps.append(data_chan[0]['timestamp'])
+                all_last_timestamps.append(data_chan[-1]['timestamp'])
+                all_samplerate.append(chan_info['sampleRate'])
+
+                # check for continuity (no gaps)
+                diff = np.diff(data_chan['timestamp'])
+                assert np.all(diff == RECORD_SIZE), \
+                    'Not continuous timestamps for {}. ' \
+                    'Maybe because recording was paused/stopped.'.format(continuous_filename)
+
+                if seg_index == 0:
+                    # add in channel list
+                    sig_channels.append((ch_name, chan_id, chan_info['sampleRate'],
+                                'int16', 'V', chan_info['bitVolts'], 0., int(processor_id)))
+
+            # In some cases, continuous do not have the same lentgh because
+            # one record block is missing when the "OE GUI is freezing"
+            # So we need to clip to the smallest files
+            if not all(all_sigs_length[0] == e for e in all_sigs_length) or\
+                    not all(all_first_timestamps[0] == e for e in all_first_timestamps):
+
+                self.logger.warning('Continuous files do not have aligned timestamps; '
+                                    'clipping to make them aligned.')
+
+                first, last = -np.inf, np.inf
+                for chan_id in self._sigs_memmap[seg_index]:
+                    data_chan = self._sigs_memmap[seg_index][chan_id]
+                    if data_chan[0]['timestamp'] > first:
+                        first = data_chan[0]['timestamp']
+                    if data_chan[-1]['timestamp'] < last:
+                        last = data_chan[-1]['timestamp']
+
+                all_sigs_length = []
+                all_first_timestamps = []
+                all_last_timestamps = []
+                for chan_id in self._sigs_memmap[seg_index]:
+                    data_chan = self._sigs_memmap[seg_index][chan_id]
+                    keep = (data_chan['timestamp'] >= first) & (data_chan['timestamp'] <= last)
+                    data_chan = data_chan[keep]
+                    self._sigs_memmap[seg_index][chan_id] = data_chan
+                    all_sigs_length.append(data_chan.size * RECORD_SIZE)
+                    all_first_timestamps.append(data_chan[0]['timestamp'])
+                    all_last_timestamps.append(data_chan[-1]['timestamp'])
+
+            # chech that all signals have the same lentgh and timestamp0 for this segment
+            assert all(all_sigs_length[0] == e for e in all_sigs_length),\
+                        'All signals do not have the same lentgh'
+            assert all(all_first_timestamps[0] == e for e in all_first_timestamps),\
+                        'All signals do not have the same first timestamp'
+            assert all(all_samplerate[0] == e for e in all_samplerate),\
+                        'All signals do not have the same sample rate'
+
+            self._sig_length[seg_index] = all_sigs_length[0]
+            self._sig_timestamp0[seg_index] = all_first_timestamps[0]
+
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+        self._sig_sampling_rate = sig_channels['sampling_rate'][0]  # unique for channel
+
+        # scan for spikes files
+        unit_channels = []
+
+        if len(info['spikes']) > 0:
+
+            self._spikes_memmap = {}
+            for seg_index in range(nb_segment):
+                self._spikes_memmap[seg_index] = {}
+                for spike_filename in info['spikes'][seg_index]:
+                    fullname = os.path.join(self.dirname, spike_filename)
+                    spike_info = read_file_header(fullname)
+                    spikes_dtype = make_spikes_dtype(fullname)
+
+                    # "STp106.0n0_2.spikes" to "STp106.0n0"
+                    name = spike_filename.replace('.spikes', '')
+                    if seg_index > 0:
+                        name = name.replace('_' + str(seg_index + 1), '')
+
+                    data_spike = np.memmap(fullname, mode='r', offset=HEADER_SIZE,
+                                        dtype=spikes_dtype)
+                    self._spikes_memmap[seg_index][name] = data_spike
+
+            # In each file 'sorted_id' indicate the number of cluster so number of units
+            # so need to scan file for all segment to get units
+            self._spike_sampling_rate = None
+            for spike_filename_seg0 in info['spikes'][0]:
+                name = spike_filename_seg0.replace('.spikes', '')
+
+                fullname = os.path.join(self.dirname, spike_filename_seg0)
+                spike_info = read_file_header(fullname)
+                if self._spike_sampling_rate is None:
+                    self._spike_sampling_rate = spike_info['sampleRate']
+                else:
+                    assert self._spike_sampling_rate == spike_info['sampleRate'],\
+                        'mismatch in spike sampling rate'
+
+                # scan all to detect several all unique(sorted_ids)
+                all_sorted_ids = []
+                for seg_index in range(nb_segment):
+                    data_spike = self._spikes_memmap[seg_index][name]
+                    all_sorted_ids += np.unique(data_spike['sorted_id']).tolist()
+                all_sorted_ids = np.unique(all_sorted_ids)
+
+                # supose all channel have the same gain
+                wf_units = 'uV'
+                wf_gain = 1000. / data_spike[0]['gains'][0]
+                wf_offset = - (2**15) * wf_gain
+                wf_left_sweep = 0
+                wf_sampling_rate = spike_info['sampleRate']
+
+                # each sorted_id is one channel
+                for sorted_id in all_sorted_ids:
+                    unit_name = "{}#{}".format(name, sorted_id)
+                    unit_id = "{}#{}".format(name, sorted_id)
+                    unit_channels.append((unit_name, unit_id, wf_units,
+                                wf_gain, wf_offset, wf_left_sweep, wf_sampling_rate))
+
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # event file are:
+        #    * all_channel.events (header + binray)  -->  event 0
+        # and message.events (text based)      --> event 1 not implemented yet
+        event_channels = []
+        self._events_memmap = {}
+        for seg_index in range(nb_segment):
+            if seg_index == 0:
+                event_filename = 'all_channels.events'
+            else:
+                event_filename = 'all_channels_{}.events'.format(seg_index + 1)
+
+            fullname = os.path.join(self.dirname, event_filename)
+            event_info = read_file_header(fullname)
+            self._event_sampling_rate = event_info['sampleRate']
+            data_event = np.memmap(fullname, mode='r', offset=HEADER_SIZE,
+                                    dtype=events_dtype)
+            self._events_memmap[seg_index] = data_event
+
+        event_channels.append(('all_channels', '', 'event'))
+        # event_channels.append(('message', '', 'event')) # not implemented
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # main header
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [nb_segment]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # Annotate some objects from coninuous files
+        self._generate_minimal_annotations()
+        bl_ann = self.raw_annotations['blocks'][0]
+        for seg_index in range(nb_segment):
+            seg_ann = bl_ann['segments'][seg_index]
+            if len(info['continuous']) > 0:
+                fullname = os.path.join(self.dirname, info['continuous'][seg_index][0])
+                chan_info = read_file_header(fullname)
+                seg_ann['openephys_version'] = chan_info['version']
+                bl_ann['openephys_version'] = chan_info['version']
+                seg_ann['date_created'] = chan_info['date_created']
+
+    def _segment_t_start(self, block_index, seg_index):
+        # segment start/stop are difine by  continuous channels
+        return self._sig_timestamp0[seg_index] / self._sig_sampling_rate
+
+    def _segment_t_stop(self, block_index, seg_index):
+        return (self._sig_timestamp0[seg_index] + self._sig_length[seg_index])\
+            / self._sig_sampling_rate
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes=None):
+        return self._sig_length[seg_index]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return self._sig_timestamp0[seg_index] / self._sig_sampling_rate
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if i_start is None:
+            i_start = 0
+        if i_stop is None:
+            i_stop = self._sig_length[seg_index]
+
+        block_start = i_start // RECORD_SIZE
+        block_stop = i_stop // RECORD_SIZE + 1
+        sl0 = i_start % RECORD_SIZE
+        sl1 = sl0 + (i_stop - i_start)
+
+        if channel_indexes is None:
+            channel_indexes = slice(None)
+        channel_ids = self.header['signal_channels'][channel_indexes]['id']
+
+        sigs_chunk = np.zeros((i_stop - i_start, len(channel_ids)), dtype='int16')
+        for i, chan_id in enumerate(channel_ids):
+            data = self._sigs_memmap[seg_index][chan_id]
+            sub = data[block_start:block_stop]
+            sigs_chunk[:, i] = sub['samples'].flatten()[sl0:sl1]
+
+        return sigs_chunk
+
+    def _get_spike_slice(self, seg_index, unit_index, t_start, t_stop):
+        name, sorted_id = self.header['unit_channels'][unit_index]['name'].split('#')
+        sorted_id = int(sorted_id)
+        data_spike = self._spikes_memmap[seg_index][name]
+
+        if t_start is None:
+            t_start = self._segment_t_start(0, seg_index)
+        if t_stop is None:
+            t_stop = self._segment_t_stop(0, seg_index)
+        ts0 = int(t_start * self._spike_sampling_rate)
+        ts1 = int(t_stop * self._spike_sampling_rate)
+
+        ts = data_spike['timestamp']
+        keep = (data_spike['sorted_id'] == sorted_id) & (ts >= ts0) & (ts <= ts1)
+        return data_spike, keep
+
+    def _spike_count(self, block_index, seg_index, unit_index):
+        data_spike, keep = self._get_spike_slice(seg_index, unit_index, None, None)
+        return np.sum(keep)
+
+    def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
+        data_spike, keep = self._get_spike_slice(seg_index, unit_index, t_start, t_stop)
+        return data_spike['timestamp'][keep]
+
+    def _rescale_spike_timestamp(self, spike_timestamps, dtype):
+        spike_times = spike_timestamps.astype(dtype) / self._spike_sampling_rate
+        return spike_times
+
+    def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
+        data_spike, keep = self._get_spike_slice(seg_index, unit_index, t_start, t_stop)
+        nb_chan = data_spike[0]['nb_channel']
+        nb = np.sum(keep)
+        waveforms = data_spike[keep]['samples'].flatten()
+        waveforms = waveforms.reshape(nb, nb_chan, -1)
+        return waveforms
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        # assert event_channel_index==0
+        return self._events_memmap[seg_index].size
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        # assert event_channel_index==0
+
+        if t_start is None:
+            t_start = self._segment_t_start(block_index, seg_index)
+        if t_stop is None:
+            t_stop = self._segment_t_stop(block_index, seg_index)
+        ts0 = int(t_start * self._event_sampling_rate)
+        ts1 = int(t_stop * self._event_sampling_rate)
+        ts = self._events_memmap[seg_index]['timestamp']
+        keep = (ts >= ts0) & (ts <= ts1)
+
+        subdata = self._events_memmap[seg_index][keep]
+        timestamps = subdata['timestamp']
+        # question what is the label????
+        # here I put a combinaison
+        labels = np.array(['{}#{}#{}'.format(int(d['event_type']),
+                                int(d['processor_id']), int(d['chan_id'])) for d in subdata])
+        durations = None
+
+        return timestamps, durations, labels
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        event_times = event_timestamps.astype(dtype) / self._event_sampling_rate
+        return event_times
+
+    def _rescale_epoch_duration(self, raw_duration, dtype):
+        return None
+
+
+continuous_dtype = [('timestamp', 'int64'), ('nb_sample', 'uint16'),
+    ('rec_num', 'uint16'), ('samples', 'int16', RECORD_SIZE),
+    ('markers', 'uint8', 10)]
+
+events_dtype = [('timestamp', 'int64'), ('sample_pos', 'int16'),
+    ('event_type', 'uint8'), ('processor_id', 'uint8'),
+    ('event_id', 'uint8'), ('chan_id', 'uint8'),
+    ('record_num', 'uint16')]
+
+# the dtype is dynamic and depend on nb_channel and nb_sample
+_base_spikes_dtype = [('event_stype', 'uint8'), ('timestamp', 'int64'),
+    ('software_timestamp', 'int64'), ('source_id', 'uint16'),
+    ('nb_channel', 'uint16'), ('nb_sample', 'uint16'),
+    ('sorted_id', 'uint16'), ('electrode_id', 'uint16'),
+    ('within_chan_index', 'uint16'), ('color', 'uint8', 3),
+    ('pca', 'float32', 2), ('sampling_rate', 'uint16'),
+    ('samples', 'uint16', None), ('gains', 'float32', None),
+    ('thresholds', 'uint16', None), ('rec_num', 'uint16')]
+
+
+def make_spikes_dtype(filename):
+    """
+    Given the spike file make the appropriate dtype that depends on:
+      * N - number of channels
+      * M - samples per spike
+    See documentation of file format.
+    """
+
+    # strangly the header do not have the sample size
+    # So this do not work (too bad):
+    # spike_info = read_file_header(filename)
+    # N = spike_info['num_channels']
+    # M =????
+
+    # so we need to read the very first spike
+    # but it will fail when 0 spikes (too bad)
+    filesize = os.stat(filename).st_size
+    if filesize >= (HEADER_SIZE + 23):
+        with open(filename, mode='rb') as f:
+            # M and N is at 1024 + 19 bytes
+            f.seek(HEADER_SIZE + 19)
+            N = np.fromfile(f, np.dtype('<u2'), 1)[0]
+            M = np.fromfile(f, np.dtype('<u2'), 1)[0]
+    else:
+        spike_info = read_file_header(filename)
+        N = spike_info['num_channels']
+        M = 40  # this is in the original code from openephys
+
+    # make a copy
+    spikes_dtype = [e for e in _base_spikes_dtype]
+    spikes_dtype[12] = ('samples', 'uint16', N * M)
+    spikes_dtype[13] = ('gains', 'float32', N)
+    spikes_dtype[14] = ('thresholds', 'uint16', N)
+
+    return spikes_dtype
+
+
+def explore_folder(dirname):
+    """
+    This explores a folder and dispatch coninuous, event and spikes
+    files by segment (aka recording session).
+
+    The number of segments is checked with these rules
+    "100_CH0.continuous" ---> seg_index 0
+    "100_CH0_2.continuous" ---> seg_index 1
+    "100_CH0_N.continuous" ---> seg_index N-1
+    """
+    filenames = os.listdir(dirname)
+
+    info = {}
+    info['nb_segment'] = 0
+    info['continuous'] = {}
+    info['spikes'] = {}
+    for filename in filenames:
+        if filename.endswith('.continuous'):
+            s = filename.replace('.continuous', '').split('_')
+            if len(s) == 2:
+                seg_index = 0
+            else:
+                seg_index = int(s[2]) - 1
+            if seg_index not in info['continuous'].keys():
+                info['continuous'][seg_index] = []
+            info['continuous'][seg_index].append(filename)
+            if (seg_index + 1) > info['nb_segment']:
+                info['nb_segment'] += 1
+        elif filename.endswith('.spikes'):
+            s = filename.replace('.spikes', '').split('_')
+            if len(s) == 1:
+                seg_index = 0
+            else:
+                seg_index = int(s[1]) - 1
+            if seg_index not in info['spikes'].keys():
+                info['spikes'][seg_index] = []
+            info['spikes'][seg_index].append(filename)
+            if (seg_index + 1) > info['nb_segment']:
+                info['nb_segment'] += 1
+
+    # order continuous file by channel number within segment
+    for seg_index, continuous_filenames in info['continuous'].items():
+        channel_ids = []
+        for continuous_filename in continuous_filenames:
+            s = continuous_filename.replace('.continuous', '').split('_')
+            processor_id, ch_name = s[0], s[1]
+            chan_id = int(ch_name.replace('CH', ''))
+            channel_ids.append(chan_id)
+        order = np.argsort(channel_ids)
+        continuous_filenames = [continuous_filenames[i] for i in order]
+        info['continuous'][seg_index] = continuous_filenames
+
+    # order spike files within segment
+    for seg_index, spike_filenames in info['spikes'].items():
+        names = []
+        for spike_filename in spike_filenames:
+            name = spike_filename.replace('.spikes', '')
+            if seg_index > 0:
+                name = name.replace('_' + str(seg_index + 1), '')
+            names.append(name)
+        order = np.argsort(names)
+        spike_filenames = [spike_filenames[i] for i in order]
+        info['spikes'][seg_index] = spike_filenames
+
+    return info
+
+
+def read_file_header(filename):
+    """Read header information from the first 1024 bytes of an OpenEphys file.
+    See docs.
+    """
+    header = {}
+    with open(filename, mode='rb') as f:
+        # Read the data as a string
+        # Remove newlines and redundant "header." prefixes
+        # The result should be a series of "key = value" strings, separated
+        # by semicolons.
+        header_string = f.read(HEADER_SIZE).replace(b'\n', b'').replace(b'header.', b'')
+
+    # Parse each key = value string separately
+    for pair in header_string.split(b';'):
+        if b'=' in pair:
+            key, value = pair.split(b' = ')
+            key = key.strip().decode('ascii')
+            value = value.strip()
+
+            # Convert some values to numeric
+            if key in ['bitVolts', 'sampleRate']:
+                header[key] = float(value)
+            elif key in ['blockLength', 'bufferSize', 'header_bytes', 'num_channels']:
+                header[key] = int(value)
+            else:
+                # Keep as string
+                header[key] = value.decode('ascii')
+
+    return header

+ 514 - 0
code/python-neo/neo/rawio/plexonrawio.py

@@ -0,0 +1,514 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading the old data format from Plexon
+acquisition system (.plx)
+
+Note that Plexon now use a new format PL2 which is NOT
+supported by this IO.
+
+Compatible with versions 100 to 106.
+Other versions have not been tested.
+
+This IO is developed thanks to the header file downloadable from:
+http://www.plexon.com/software-downloads
+
+This IO was rewritten in 2017 and this was a huge pain because
+the underlying file format is really inefficient.
+The rewrite is now based on numpy dtype and not on Python struct.
+This should be faster.
+If one day, somebody use it, consider to offer me a beer.
+
+
+Author: Samuel Garcia
+
+"""
+from __future__ import print_function, division, absolute_import
+# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+from collections import OrderedDict
+import datetime
+
+
+class PlexonRawIO(BaseRawIO):
+    extensions = ['plx']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+    def _source_name(self):
+        return self.filename
+
+    def _parse_header(self):
+
+        # global header
+        with open(self.filename, 'rb') as fid:
+            offset0 = 0
+            global_header = read_as_dict(fid, GlobalHeader, offset=offset0)
+
+        rec_datetime = datetime.datetime(global_header['Year'],
+                                         global_header['Month'],
+                                         global_header['Day'],
+                                         global_header['Hour'],
+                                         global_header['Minute'],
+                                         global_header['Second'])
+
+        # dsp channels header = spikes and waveforms
+        nb_unit_chan = global_header['NumDSPChannels']
+        offset1 = np.dtype(GlobalHeader).itemsize
+        dspChannelHeaders = np.memmap(self.filename, dtype=DspChannelHeader, mode='r',
+                                      offset=offset1, shape=(nb_unit_chan,))
+
+        # event channel header
+        nb_event_chan = global_header['NumEventChannels']
+        offset2 = offset1 + np.dtype(DspChannelHeader).itemsize * nb_unit_chan
+        eventHeaders = np.memmap(self.filename, dtype=EventChannelHeader, mode='r',
+                                 offset=offset2, shape=(nb_event_chan,))
+
+        # slow channel header = signal
+        nb_sig_chan = global_header['NumSlowChannels']
+        offset3 = offset2 + np.dtype(EventChannelHeader).itemsize * nb_event_chan
+        slowChannelHeaders = np.memmap(self.filename, dtype=SlowChannelHeader, mode='r',
+                                       offset=offset3, shape=(nb_sig_chan,))
+
+        offset4 = offset3 + np.dtype(SlowChannelHeader).itemsize * nb_sig_chan
+
+        # loop over data blocks and put them by type and channel
+        block_headers = {1: {c: [] for c in dspChannelHeaders['Channel']},
+                         4: {c: [] for c in eventHeaders['Channel']},
+                         5: {c: [] for c in slowChannelHeaders['Channel']},
+                         }
+        block_pos = {1: {c: [] for c in dspChannelHeaders['Channel']},
+                     4: {c: [] for c in eventHeaders['Channel']},
+                     5: {c: [] for c in slowChannelHeaders['Channel']},
+                     }
+        data = self._memmap = np.memmap(self.filename, dtype='u1', offset=0, mode='r')
+        pos = offset4
+        while pos < data.size:
+            bl_header = data[pos:pos + 16].view(DataBlockHeader)[0]
+            length = bl_header['NumberOfWaveforms'] * bl_header['NumberOfWordsInWaveform'] * 2 + 16
+            bl_type = int(bl_header['Type'])
+            chan_id = int(bl_header['Channel'])
+            block_headers[bl_type][chan_id].append(bl_header)
+            block_pos[bl_type][chan_id].append(pos)
+            pos += length
+
+        self._last_timestamps = bl_header['UpperByteOf5ByteTimestamp'] * \
+                                2 ** 32 + bl_header['TimeStamp']
+
+        # ... and finalize them in self._data_blocks
+        # for a faster acces depending on type (1, 4, 5)
+        self._data_blocks = {}
+        dt_base = [('pos', 'int64'), ('timestamp', 'int64'), ('size', 'int64')]
+        dtype_by_bltype = {
+            # Spikes and waveforms
+            1: np.dtype(dt_base + [('unit_id', 'uint16'), ('n1', 'uint16'), ('n2', 'uint16'), ]),
+            # Events
+            4: np.dtype(dt_base + [('label', 'uint16'), ]),
+            # Signals
+            5: np.dtype(dt_base + [('cumsum', 'int64'), ]),
+        }
+        for bl_type in block_headers:
+            self._data_blocks[bl_type] = {}
+            for chan_id in block_headers[bl_type]:
+                bl_header = np.array(block_headers[bl_type][chan_id], dtype=DataBlockHeader)
+                bl_pos = np.array(block_pos[bl_type][chan_id], dtype='int64')
+
+                timestamps = bl_header['UpperByteOf5ByteTimestamp'] * \
+                             2 ** 32 + bl_header['TimeStamp']
+
+                n1 = bl_header['NumberOfWaveforms']
+                n2 = bl_header['NumberOfWordsInWaveform']
+                dt = dtype_by_bltype[bl_type]
+                data_block = np.empty(bl_pos.size, dtype=dt)
+                data_block['pos'] = bl_pos + 16
+                data_block['timestamp'] = timestamps
+                data_block['size'] = n1 * n2 * 2
+
+                if bl_type == 1:  # Spikes and waveforms
+                    data_block['unit_id'] = bl_header['Unit']
+                    data_block['n1'] = n1
+                    data_block['n2'] = n2
+                elif bl_type == 4:  # Events
+                    data_block['label'] = bl_header['Unit']
+                elif bl_type == 5:  # Signals
+                    if data_block.size > 0:
+                        # cumulative some of sample index for fast acces to chunks
+                        data_block['cumsum'][0] = 0
+                        data_block['cumsum'][1:] = np.cumsum(data_block['size'][:-1]) // 2
+
+                self._data_blocks[bl_type][chan_id] = data_block
+
+        # signals channels
+        sig_channels = []
+        all_sig_length = []
+        for chan_index in range(nb_sig_chan):
+            h = slowChannelHeaders[chan_index]
+            name = h['Name'].decode('utf8')
+            chan_id = h['Channel']
+            length = self._data_blocks[5][chan_id]['size'].sum() // 2
+            if length == 0:
+                continue  # channel not added
+            all_sig_length.append(length)
+            sampling_rate = float(h['ADFreq'])
+            sig_dtype = 'int16'
+            units = ''  # I dont't knwon units
+            if global_header['Version'] in [100, 101]:
+                gain = 5000. / (2048 * h['Gain'] * 1000.)
+            elif global_header['Version'] in [102]:
+                gain = 5000. / (2048 * h['Gain'] * h['PreampGain'])
+            elif global_header['Version'] >= 103:
+                gain = global_header['SlowMaxMagnitudeMV'] / (
+                    .5 * (2 ** global_header['BitsPerSpikeSample']) *
+                    h['Gain'] * h['PreampGain'])
+            offset = 0.
+            group_id = 0
+            sig_channels.append((name, chan_id, sampling_rate, sig_dtype,
+                                 units, gain, offset, group_id))
+        if len(all_sig_length) > 0:
+            self._signal_length = min(all_sig_length)
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        self._global_ssampling_rate = global_header['ADFrequency']
+        if slowChannelHeaders.size > 0:
+            assert np.unique(slowChannelHeaders['ADFreq']
+                             ).size == 1, 'Signal do not have the same sampling rate'
+            self._sig_sampling_rate = float(slowChannelHeaders['ADFreq'][0])
+
+        # Determine number of units per channels
+        self.internal_unit_ids = []
+        for chan_id, data_clock in self._data_blocks[1].items():
+            unit_ids = np.unique(data_clock['unit_id'])
+            for unit_id in unit_ids:
+                self.internal_unit_ids.append((chan_id, unit_id))
+
+        # Spikes channels
+        unit_channels = []
+        for unit_index, (chan_id, unit_id) in enumerate(self.internal_unit_ids):
+            c = np.nonzero(dspChannelHeaders['Channel'] == chan_id)[0][0]
+            h = dspChannelHeaders[c]
+
+            name = h['Name'].decode('utf8')
+            _id = 'ch{}#{}'.format(chan_id, unit_id)
+            wf_units = ''
+            if global_header['Version'] < 103:
+                wf_gain = 3000. / (2048 * h['Gain'] * 1000.)
+            elif 103 <= global_header['Version'] < 105:
+                wf_gain = global_header['SpikeMaxMagnitudeMV'] / (
+                    .5 * 2. ** (global_header['BitsPerSpikeSample']) *
+                    h['Gain'] * 1000.)
+            elif global_header['Version'] >= 105:
+                wf_gain = global_header['SpikeMaxMagnitudeMV'] / (
+                    .5 * 2. ** (global_header['BitsPerSpikeSample']) *
+                    h['Gain'] * global_header['SpikePreAmpGain'])
+            wf_offset = 0.
+            wf_left_sweep = -1  # DONT KNOWN
+            wf_sampling_rate = global_header['WaveformFreq']
+            unit_channels.append((name, _id, wf_units, wf_gain, wf_offset,
+                                  wf_left_sweep, wf_sampling_rate))
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # Event channels
+        event_channels = []
+        for chan_index in range(nb_event_chan):
+            h = eventHeaders[chan_index]
+            chan_id = h['Channel']
+            name = h['Name'].decode('utf8')
+            _id = h['Channel']
+            event_channels.append((name, _id, 'event'))
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [1]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # Annotations
+        self._generate_minimal_annotations()
+        bl_annotations = self.raw_annotations['blocks'][0]
+        seg_annotations = bl_annotations['segments'][0]
+        for d in (bl_annotations, seg_annotations):
+            d['rec_datetime'] = rec_datetime
+            d['plexon_version'] = global_header['Version']
+
+    def _segment_t_start(self, block_index, seg_index):
+        return 0.
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop1 = float(self._last_timestamps) / self._global_ssampling_rate
+        if hasattr(self, '_signal_length'):
+            t_stop2 = self._signal_length / self._sig_sampling_rate
+            return max(t_stop1, t_stop2)
+        else:
+            return t_stop1
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        return self._signal_length
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return 0.
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if i_start is None:
+            i_start = 0
+        if i_stop is None:
+            i_stop = self._signal_length
+
+        if channel_indexes is None:
+            channel_indexes = np.arange(self.header['signal_channels'].size)
+
+        raw_signals = np.zeros((i_stop - i_start, len(channel_indexes)), dtype='int16')
+        for c, channel_index in enumerate(channel_indexes):
+            chan_header = self.header['signal_channels'][channel_index]
+            chan_id = chan_header['id']
+
+            data_blocks = self._data_blocks[5][chan_id]
+
+            # loop over data blocks and get chunks
+            bl0 = np.searchsorted(data_blocks['cumsum'], i_start, side='left')
+            bl1 = np.searchsorted(data_blocks['cumsum'], i_stop, side='left')
+            ind = 0
+            for bl in range(bl0, bl1):
+                ind0 = data_blocks[bl]['pos']
+                ind1 = data_blocks[bl]['size'] + ind0
+                data = self._memmap[ind0:ind1].view('int16')
+                if bl == bl1 - 1:
+                    # right border
+                    # be carfull that bl could be both bl0 and bl1!!
+                    border = data.size - (i_stop - data_blocks[bl]['cumsum'])
+                    data = data[:-border]
+                if bl == bl0:
+                    # left border
+                    border = i_start - data_blocks[bl]['cumsum']
+                    data = data[border:]
+                raw_signals[ind:data.size + ind, c] = data
+                ind += data.size
+
+        return raw_signals
+
+    def _get_internal_mask(self, data_block, t_start, t_stop):
+        timestamps = data_block['timestamp']
+
+        if t_start is None:
+            lim0 = 0
+        else:
+            lim0 = int(t_start * self._global_ssampling_rate)
+
+        if t_stop is None:
+            lim1 = self._last_timestamps
+        else:
+            lim1 = int(t_stop * self._global_ssampling_rate)
+
+        keep = (timestamps >= lim0) & (timestamps <= lim1)
+
+        return keep
+
+    def _spike_count(self, block_index, seg_index, unit_index):
+        chan_id, unit_id = self.internal_unit_ids[unit_index]
+        data_block = self._data_blocks[1][chan_id]
+        nb_spike = np.sum(data_block['unit_id'] == unit_id)
+        return nb_spike
+
+    def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
+        chan_id, unit_id = self.internal_unit_ids[unit_index]
+        data_block = self._data_blocks[1][chan_id]
+
+        keep = self._get_internal_mask(data_block, t_start, t_stop)
+        keep &= data_block['unit_id'] == unit_id
+        spike_timestamps = data_block[keep]['timestamp']
+
+        return spike_timestamps
+
+    def _rescale_spike_timestamp(self, spike_timestamps, dtype):
+        spike_times = spike_timestamps.astype(dtype)
+        spike_times /= self._global_ssampling_rate
+        return spike_times
+
+    def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
+        chan_id, unit_id = self.internal_unit_ids[unit_index]
+        data_block = self._data_blocks[1][chan_id]
+
+        n1 = data_block['n1'][0]
+        n2 = data_block['n2'][0]
+
+        keep = self._get_internal_mask(data_block, t_start, t_stop)
+        keep &= data_block['unit_id'] == unit_id
+
+        data_block = data_block[keep]
+        nb_spike = data_block.size
+
+        waveforms = np.zeros((nb_spike, n1, n2), dtype='int16')
+        for i, db in enumerate(data_block):
+            ind0 = db['pos']
+            ind1 = db['size'] + ind0
+            data = self._memmap[ind0:ind1].view('int16').reshape(n1, n2)
+            waveforms[i, :, :] = data
+
+        return waveforms
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        chan_id = int(self.header['event_channels'][event_channel_index]['id'])
+        nb_event = self._data_blocks[4][chan_id].size
+        return nb_event
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        chan_id = int(self.header['event_channels'][event_channel_index]['id'])
+        data_block = self._data_blocks[4][chan_id]
+        keep = self._get_internal_mask(data_block, t_start, t_stop)
+
+        db = data_block[keep]
+        timestamps = db['timestamp']
+        labels = db['label'].astype('U')
+        durations = None
+
+        return timestamps, durations, labels
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        event_times = event_timestamps.astype(dtype)
+        event_times /= self._global_ssampling_rate
+        return event_times
+
+
+def read_as_dict(fid, dtype, offset=None):
+    """
+    Given a file descriptor
+    and a numpy.dtype of the binary struct return a dict.
+    Make conversion for strings.
+    """
+    if offset is not None:
+        fid.seek(offset)
+    dt = np.dtype(dtype)
+    h = np.frombuffer(fid.read(dt.itemsize), dt)[0]
+    info = OrderedDict()
+    for k in dt.names:
+        v = h[k]
+
+        if dt[k].kind == 'S':
+            v = v.decode('utf8')
+            v = v.replace('\x03', '')
+            v = v.replace('\x00', '')
+
+        info[k] = v
+    return info
+
+
+GlobalHeader = [
+    ('MagicNumber', 'uint32'),
+    ('Version', 'int32'),
+    ('Comment', 'S128'),
+    ('ADFrequency', 'int32'),
+    ('NumDSPChannels', 'int32'),
+    ('NumEventChannels', 'int32'),
+    ('NumSlowChannels', 'int32'),
+    ('NumPointsWave', 'int32'),
+    ('NumPointsPreThr', 'int32'),
+    ('Year', 'int32'),
+    ('Month', 'int32'),
+    ('Day', 'int32'),
+    ('Hour', 'int32'),
+    ('Minute', 'int32'),
+    ('Second', 'int32'),
+    ('FastRead', 'int32'),
+    ('WaveformFreq', 'int32'),
+    ('LastTimestamp', 'float64'),
+
+    # version >103
+    ('Trodalness', 'uint8'),
+    ('DataTrodalness', 'uint8'),
+    ('BitsPerSpikeSample', 'uint8'),
+    ('BitsPerSlowSample', 'uint8'),
+    ('SpikeMaxMagnitudeMV', 'uint16'),
+    ('SlowMaxMagnitudeMV', 'uint16'),
+
+    # version 105
+    ('SpikePreAmpGain', 'uint16'),
+
+    # version 106
+    ('AcquiringSoftware', 'S18'),
+    ('ProcessingSoftware', 'S18'),
+
+    ('Padding', 'S10'),
+
+    # all version
+    ('TSCounts', 'int32', (650,)),
+    ('WFCounts', 'int32', (650,)),
+    ('EVCounts', 'int32', (512,)),
+
+]
+
+DspChannelHeader = [
+    ('Name', 'S32'),
+    ('SIGName', 'S32'),
+    ('Channel', 'int32'),
+    ('WFRate', 'int32'),
+    ('SIG', 'int32'),
+    ('Ref', 'int32'),
+    ('Gain', 'int32'),
+    ('Filter', 'int32'),
+    ('Threshold', 'int32'),
+    ('Method', 'int32'),
+    ('NUnits', 'int32'),
+    ('Template', 'uint16', (320,)),
+    ('Fit', 'int32', (5,)),
+    ('SortWidth', 'int32'),
+    ('Boxes', 'uint16', (40,)),
+    ('SortBeg', 'int32'),
+    # version 105
+    ('Comment', 'S128'),
+    # version 106
+    ('SrcId', 'uint8'),
+    ('reserved', 'uint8'),
+    ('ChanId', 'uint16'),
+
+    ('Padding', 'int32', (10,)),
+]
+
+EventChannelHeader = [
+    ('Name', 'S32'),
+    ('Channel', 'int32'),
+    # version 105
+    ('Comment', 'S128'),
+    # version 106
+    ('SrcId', 'uint8'),
+    ('reserved', 'uint8'),
+    ('ChanId', 'uint16'),
+
+    ('Padding', 'int32', (32,)),
+]
+
+SlowChannelHeader = [
+    ('Name', 'S32'),
+    ('Channel', 'int32'),
+    ('ADFreq', 'int32'),
+    ('Gain', 'int32'),
+    ('Enabled', 'int32'),
+    ('PreampGain', 'int32'),
+    # version 104
+    ('SpikeChannel', 'int32'),
+    # version 105
+    ('Comment', 'S128'),
+    # version 106
+    ('SrcId', 'uint8'),
+    ('reserved', 'uint8'),
+    ('ChanId', 'uint16'),
+
+    ('Padding', 'int32', (27,)),
+]
+
+DataBlockHeader = [
+    ('Type', 'uint16'),
+    ('UpperByteOf5ByteTimestamp', 'uint16'),
+    ('TimeStamp', 'int32'),
+    ('Channel', 'uint16'),
+    ('Unit', 'uint16'),
+    ('NumberOfWaveforms', 'uint16'),
+    ('NumberOfWordsInWaveform', 'uint16'),
+]  # 16 bytes

+ 107 - 0
code/python-neo/neo/rawio/rawbinarysignalrawio.py

@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading data in a raw binary interleaved compact file.
+Sampling rate, units, number of channel and dtype must be externally known.
+This generic format is quite widely used in old acquisition systems
+and is quite universal for sharing data.
+
+The write part of this IO is only available at neo.io level with the other
+class RawBinarySignalIO
+
+Important release note:
+  * Since the version neo 0.6.0 and the neo.rawio API,
+    argmuents of the IO (dtype, nb_channel, sampling_rate) must be
+    given at the __init__ and not at read_segment() because there is
+    no read_segment() in neo.rawio classes.
+
+
+Author: Samuel Garcia
+"""
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+
+import os
+import sys
+
+
+class RawBinarySignalRawIO(BaseRawIO):
+    extensions = ['raw', '*']
+    rawmode = 'one-file'
+
+    def __init__(self, filename='', dtype='int16', sampling_rate=10000.,
+                 nb_channel=2, signal_gain=1., signal_offset=0., bytesoffset=0):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+        self.dtype = dtype
+        self.sampling_rate = sampling_rate
+        self.nb_channel = nb_channel
+        self.signal_gain = signal_gain
+        self.signal_offset = signal_offset
+        self.bytesoffset = bytesoffset
+
+    def _source_name(self):
+        return self.filename
+
+    def _parse_header(self):
+
+        if os.path.exists(self.filename):
+            self._raw_signals = np.memmap(self.filename, dtype=self.dtype, mode='r',
+                                          offset=self.bytesoffset).reshape(-1, self.nb_channel)
+        else:
+            # The the neo.io.RawBinarySignalIO is used for write_segment
+            self._raw_signals = None
+
+        sig_channels = []
+        if self._raw_signals is not None:
+            for c in range(self.nb_channel):
+                name = 'ch{}'.format(c)
+                chan_id = c
+                units = ''
+                group_id = 0
+                sig_channels.append((name, chan_id, self.sampling_rate, self.dtype,
+                                     units, self.signal_gain, self.signal_offset, group_id))
+
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        # No events
+        event_channels = []
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # No spikes
+        unit_channels = []
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [1]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # insert some annotation at some place
+        self._generate_minimal_annotations()
+
+    def _segment_t_start(self, block_index, seg_index):
+        return 0.
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = self._raw_signals.shape[0] / self.sampling_rate
+        return t_stop
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        return self._raw_signals.shape[0]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return 0.
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if channel_indexes is None:
+            channel_indexes = slice(None)
+        raw_signals = self._raw_signals[slice(i_start, i_stop), channel_indexes]
+
+        return raw_signals

+ 155 - 0
code/python-neo/neo/rawio/rawmcsrawio.py

@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading data from "Raw" Multi Channel System (MCS) format.
+This format is NOT the native MCS format (*.mcd).
+This format is a raw format with an internal binary header exported by the
+"MC_DataTool binary conversion" with the option header selected.
+
+The internal header contains sampling rate, channel names, gain and units.
+Not so bad: everything that Neo needs, so this IO is without parameters.
+
+If some MCS customers read this you should lobby to get the real specification
+of the real MCS format (.mcd), then an IO module for the native MCS format
+could be written instead of this ersatz.
+
+Author: Samuel Garcia
+"""
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+
+import os
+import sys
+
+
+class RawMCSRawIO(BaseRawIO):
+    extensions = ['raw']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+    def _source_name(self):
+        return self.filename
+
+    def _parse_header(self):
+        self._info = info = parse_mcs_raw_header(self.filename)
+
+        self.dtype = 'uint16'
+        self.sampling_rate = info['sampling_rate']
+        self.nb_channel = len(info['channel_names'])
+
+        self._raw_signals = np.memmap(self.filename, dtype=self.dtype, mode='r',
+                                      offset=info['header_size']).reshape(-1, self.nb_channel)
+
+        sig_channels = []
+        for c in range(self.nb_channel):
+            chan_id = c
+            group_id = 0
+            sig_channels.append((info['channel_names'][c], chan_id, self.sampling_rate,
+                                self.dtype, info['signal_units'], info['signal_gain'],
+                                info['signal_offset'], group_id))
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        # No events
+        event_channels = []
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # No spikes
+        unit_channels = []
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [1]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # insert some annotation at some place
+        self._generate_minimal_annotations()
+
+    def _segment_t_start(self, block_index, seg_index):
+        return 0.
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = self._raw_signals.shape[0] / self.sampling_rate
+        return t_stop
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        return self._raw_signals.shape[0]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return 0.
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if channel_indexes is None:
+            channel_indexes = slice(None)
+        raw_signals = self._raw_signals[slice(i_start, i_stop), channel_indexes]
+
+        return raw_signals
+
+
+def parse_mcs_raw_header(filename):
+    """
+    This is a from-scratch implementation, with some inspiration
+    (but no code) taken from the following files:
+    https://github.com/spyking-circus/spyking-circus/blob/master/circus/files/mcs_raw_binary.py
+    https://github.com/jeffalstott/Multi-Channel-Systems-Import/blob/master/MCS.py
+    """
+    MAX_HEADER_SIZE = 5000
+
+    with open(filename, mode='rb') as f:
+        raw_header = f.read(MAX_HEADER_SIZE)
+
+        header_size = raw_header.find(b'EOH')
+        assert header_size != -1, 'Error in reading raw mcs header'
+        header_size = header_size + 5
+        raw_header = raw_header[:header_size]
+        raw_header = raw_header.replace(b'\r', b'')
+
+        info = {}
+        info['header_size'] = header_size
+
+        def parse_line(line, key):
+            if key + b' = ' in line:
+                v = line.replace(key, b'').replace(b' ', b'').replace(b'=', b'')
+                return v
+
+        keys = (b'Sample rate', b'ADC zero', b'ADC zero', b'El', b'Streams')
+
+        for line in raw_header.split(b'\n'):
+            for key in keys:
+                v = parse_line(line, key)
+                if v is None:
+                    continue
+
+                if key == b'Sample rate':
+                    info['sampling_rate'] = float(v)
+
+                elif key == b'ADC zero':
+                    info['adc_zero'] = int(v)
+
+                elif key == b'El':
+                    v = v.decode('Windows-1252')
+                    v = v.replace('/AD', '')
+                    split_pos = 0
+                    while v[split_pos] in '1234567890.':
+                        split_pos += 1
+                        if split_pos == len(v):
+                            split_pos = None
+                            break
+                    assert split_pos is not None, 'Impossible to find units and scaling'
+                    info['signal_gain'] = float(v[:split_pos])
+                    info['signal_units'] = v[split_pos:].replace(u'µ', u'u')
+                    info['signal_offset'] = -info['signal_gain'] * info['adc_zero']
+
+                elif key == b'Streams':
+                    info['channel_names'] = v.decode('Windows-1252').split(';')
+
+    return info

+ 659 - 0
code/python-neo/neo/rawio/spike2rawio.py

@@ -0,0 +1,659 @@
+# -*- coding: utf-8 -*-
+"""
+Classe for reading data in CED spike2 files (.smr).
+
+This code is based on:
+ - sonpy, written by Antonio Gonzalez <Antonio.Gonzalez@cantab.net>
+    Disponible here ::
+    http://www.neuro.ki.se/broberger/
+
+and sonpy come from :
+ - SON Library 2.0 for MATLAB, written by Malcolm Lidierth at
+    King's College London.
+    See http://www.kcl.ac.uk/depsta/biomedical/cfnr/lidierth.html
+
+This IO support old (<v6) and new files (>v7) of spike2
+
+
+Author: Samuel Garcia
+
+"""
+from __future__ import print_function, division, absolute_import
+# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+from collections import OrderedDict
+
+
+class Spike2RawIO(BaseRawIO):
+    """
+
+    """
+    extensions = ['smr']
+    rawmode = 'one-file'
+
+    def __init__(self, filename='', take_ideal_sampling_rate=False, ced_units=True):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+        self.take_ideal_sampling_rate = take_ideal_sampling_rate
+        self.ced_units = ced_units
+
+    def _parse_header(self):
+
+        # get header info and channel_info
+        with open(self.filename, 'rb') as fid:
+            self._global_info = read_as_dict(fid, headerDescription)
+            info = self._global_info
+            if info['system_id'] < 6:
+                info['dtime_base'] = 1e-6
+                info['datetime_detail'] = 0
+                info['datetime_year'] = 0
+
+            self._time_factor = info['us_per_time'] * info['dtime_base']
+
+            self._channel_infos = []
+            for chan_id in range(info['channels']):
+                fid.seek(512 + 140 * chan_id)
+                chan_info = read_as_dict(fid, channelHeaderDesciption1)
+
+                if chan_info['kind'] in [1, 6]:
+                    dt = [('scale', 'f4'), ('offset', 'f4'), ('unit', 'S6'), ]
+                    chan_info.update(read_as_dict(fid, dt))
+
+                elif chan_info['kind'] in [7, 9]:
+                    dt = [('min', 'f4'), ('max', 'f4'), ('unit', 'S6'), ]
+                    chan_info.update(read_as_dict(fid, dt))
+
+                elif chan_info['kind'] in [4]:
+                    dt = [('init_low', 'u1'), ('next_low', 'u1'), ]
+                    chan_info.update(read_as_dict(fid, dt))
+
+                if chan_info['kind'] in [1, 6, 7, 9]:
+                    if info['system_id'] < 6:
+                        chan_info.update(read_as_dict(fid, [('divide', 'i2')]))
+                    else:
+                        chan_info.update(read_as_dict(fid, [('interleave', 'i2')]))
+
+                chan_info['type'] = dict_kind[chan_info['kind']]
+
+                if chan_info['blocks'] == 0:
+                    chan_info['t_start'] = 0.  # this means empty signals
+                else:
+                    fid.seek(chan_info['firstblock'])
+                    block_info = read_as_dict(fid, blockHeaderDesciption)
+                    chan_info['t_start'] = float(block_info['start_time']) * \
+                        float(info['us_per_time']) * float(info['dtime_base'])
+
+                self._channel_infos.append(chan_info)
+
+        # get data blocks index for all channel
+        # run through all data block of of channel to prepare chan to block maps
+        self._memmap = np.memmap(self.filename, dtype='u1', offset=0, mode='r')
+        self._all_data_blocks = {}
+        self._by_seg_data_blocks = {}
+        for chan_id, chan_info in enumerate(self._channel_infos):
+            data_blocks = []
+            ind = chan_info['firstblock']
+            for b in range(chan_info['blocks']):
+                block_info = self._memmap[ind:ind + 20].view(blockHeaderDesciption)[0]
+                data_blocks.append((ind, block_info['items'], 0,
+                                    block_info['start_time'], block_info['end_time']))
+                ind = block_info['succ_block']
+
+            data_blocks = np.array(data_blocks, dtype=[(
+                'pos', 'int32'), ('size', 'int32'), ('cumsum', 'int32'),
+                ('start_time', 'int32'), ('end_time', 'int32')])
+            data_blocks['pos'] += 20  # 20 is ths header size
+
+            self._all_data_blocks[chan_id] = data_blocks
+            self._by_seg_data_blocks[chan_id] = []
+
+        # For all signal channel detect gaps between data block (pause in rec) so new Segment.
+        # then check that all channel have the same gaps.
+        # this part is tricky because we need to check that all channel have same pause.
+        all_gaps_block_ind = {}
+        for chan_id, chan_info in enumerate(self._channel_infos):
+            if chan_info['kind'] in [1, 9]:
+                data_blocks = self._all_data_blocks[chan_id]
+                sig_size = np.sum(self._all_data_blocks[chan_id]['size'])
+                if sig_size > 0:
+                    interval = get_sample_interval(info, chan_info) / self._time_factor
+                    # detect gaps
+                    inter_block_sizes = data_blocks['start_time'][1:] - \
+                        data_blocks['end_time'][:-1]
+                    gaps_block_ind, = np.nonzero(inter_block_sizes > interval)
+                    all_gaps_block_ind[chan_id] = gaps_block_ind
+
+        # find t_start/t_stop for each seg based on gaps indexe
+        self._sig_t_starts = {}
+        self._sig_t_stops = {}
+        if len(all_gaps_block_ind) == 0:
+            # this means no signal channels
+            nb_segment = 1
+            # loop over event/spike channel to get the min/max time
+            t_start, t_stop = None, None
+            for chan_id, chan_info in enumerate(self._channel_infos):
+                data_blocks = self._all_data_blocks[chan_id]
+                if data_blocks.size > 0:
+                    # if t_start is None or data_blocks[0]['start_time']<t_start:
+                    # t_start = data_blocks[0]['start_time']
+                    if t_stop is None or data_blocks[-1]['end_time'] > t_stop:
+                        t_stop = data_blocks[-1]['end_time']
+            # self._seg_t_starts = [t_start]
+            self._seg_t_starts = [0]
+            self._seg_t_stops = [t_stop]
+        else:
+            all_nb_seg = np.array([v.size + 1 for v in all_gaps_block_ind.values()])
+            assert np.all(all_nb_seg[0] == all_nb_seg), \
+                'Signal channel have differents pause so diffrents nb_segment'
+            nb_segment = int(all_nb_seg[0])
+
+            for chan_id, gaps_block_ind in all_gaps_block_ind.items():
+                data_blocks = self._all_data_blocks[chan_id]
+                self._sig_t_starts[chan_id] = []
+                self._sig_t_stops[chan_id] = []
+
+                for seg_ind in range(nb_segment):
+                    if seg_ind == 0:
+                        fisrt_bl = 0
+                    else:
+                        fisrt_bl = gaps_block_ind[seg_ind - 1] + 1
+                    self._sig_t_starts[chan_id].append(data_blocks[fisrt_bl]['start_time'])
+
+                    if seg_ind < nb_segment - 1:
+                        last_bl = gaps_block_ind[seg_ind]
+                    else:
+                        last_bl = data_blocks.size - 1
+
+                    self._sig_t_stops[chan_id].append(data_blocks[last_bl]['end_time'])
+
+                    in_seg_data_block = data_blocks[fisrt_bl:last_bl + 1]
+                    in_seg_data_block['cumsum'][1:] = np.cumsum(in_seg_data_block['size'][:-1])
+                    self._by_seg_data_blocks[chan_id].append(in_seg_data_block)
+
+            self._seg_t_starts = []
+            self._seg_t_stops = []
+            for seg_ind in range(nb_segment):
+                # there is a small delay between all channel so take the max/min for t_start/t_stop
+                t_start = min(
+                    self._sig_t_starts[chan_id][seg_ind] for chan_id in self._sig_t_starts)
+                t_stop = max(self._sig_t_stops[chan_id][seg_ind] for chan_id in self._sig_t_stops)
+                self._seg_t_starts.append(t_start)
+                self._seg_t_stops.append(t_stop)
+
+        # create typed channels
+        sig_channels = []
+        unit_channels = []
+        event_channels = []
+
+        self.internal_unit_ids = {}
+        for chan_id, chan_info in enumerate(self._channel_infos):
+            if chan_info['kind'] in [1, 6, 7, 9]:
+                if self.take_ideal_sampling_rate:
+                    sampling_rate = info['ideal_rate']
+                else:
+                    sample_interval = get_sample_interval(info, chan_info)
+                    sampling_rate = (1. / sample_interval)
+
+            name = chan_info['title']
+
+            if chan_info['kind'] in [1, 9]:
+                # AnalogSignal
+                if chan_id not in self._sig_t_starts:
+                    continue
+                units = chan_info['unit']
+                if chan_info['kind'] == 1:  # int16
+                    gain = chan_info['scale'] / 6553.6
+                    offset = chan_info['offset']
+                    sig_dtype = 'int16'
+                elif chan_info['kind'] == 9:  # float32
+                    gain = 1.
+                    offset = 0.
+                    sig_dtype = 'int32'
+                group_id = 0
+                sig_channels.append((name, chan_id, sampling_rate, sig_dtype,
+                                     units, gain, offset, group_id))
+
+            elif chan_info['kind'] in [2, 3, 4, 5, 8]:
+                # Event
+                event_channels.append((name, chan_id, 'event'))
+
+            elif chan_info['kind'] in [6, 7]:  # SpikeTrain with waveforms
+                wf_units = chan_info['unit']
+                if chan_info['kind'] == 6:
+                    wf_gain = chan_info['scale'] / 6553.6
+                    wf_offset = chan_info['offset']
+                    wf_left_sweep = chan_info['n_extra'] // 4
+                elif chan_info['kind'] == 7:
+                    wf_gain = 1.
+                    wf_offset = 0.
+                    wf_left_sweep = chan_info['n_extra'] // 8
+                wf_sampling_rate = sampling_rate
+                if self.ced_units:
+                    # this is a hudge pain because need
+                    # to jump over all blocks
+                    data_blocks = self._all_data_blocks[chan_id]
+                    dt = get_channel_dtype(chan_info)
+                    unit_ids = set()
+                    for bl in range(data_blocks.size):
+                        ind0 = data_blocks[bl]['pos']
+                        ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
+                        raw_data = self._memmap[ind0:ind1].view(dt)
+                        marker = raw_data['marker'] & 255
+                        unit_ids.update(np.unique(marker))
+                    unit_ids = sorted(list(unit_ids))
+                else:
+                    # All spike from one channel are group in one SpikeTrain
+                    unit_ids = ['all']
+                for unit_id in unit_ids:
+                    unit_index = len(unit_channels)
+                    self.internal_unit_ids[unit_index] = (chan_id, unit_id)
+                    _id = "ch{}#{}".format(chan_id, unit_id)
+                    unit_channels.append((name, _id, wf_units, wf_gain, wf_offset,
+                                          wf_left_sweep, wf_sampling_rate))
+
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        if len(sig_channels) > 0:
+            # signal channel can different sampling_rate/dtype/t_start/signal_length...
+            # grouping them is difficults, so each channe = one group
+
+            sig_channels['group_id'] = np.arange(sig_channels.size)
+            self._sig_dtypes = {s['group_id']: np.dtype(s['dtype']) for s in sig_channels}
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [nb_segment]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # Annotations
+        self._generate_minimal_annotations()
+        bl_ann = self.raw_annotations['blocks'][0]
+        bl_ann['system_id'] = info['system_id']
+        seg_ann = bl_ann['segments'][0]
+        seg_ann['system_id'] = info['system_id']
+
+        for c, sig_channel in enumerate(sig_channels):
+            chan_id = sig_channel['id']
+            anasig_an = seg_ann['signals'][c]
+            anasig_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
+            anasig_an['comment'] = self._channel_infos[chan_id]['comment']
+
+        for c, unit_channel in enumerate(unit_channels):
+            chan_id, unit_id = self.internal_unit_ids[c]
+            unit_an = seg_ann['units'][c]
+            unit_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
+            unit_an['comment'] = self._channel_infos[chan_id]['comment']
+
+        for c, event_channel in enumerate(event_channels):
+            chan_id = int(event_channel['id'])
+            ev_an = seg_ann['events'][c]
+            ev_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
+            ev_an['comment'] = self._channel_infos[chan_id]['comment']
+
+    def _source_name(self):
+        return self.filename
+
+    def _segment_t_start(self, block_index, seg_index):
+        return self._seg_t_starts[seg_index] * self._time_factor
+
+    def _segment_t_stop(self, block_index, seg_index):
+        return self._seg_t_stops[seg_index] * self._time_factor
+
+    def _check_channel_indexes(self, channel_indexes):
+        if channel_indexes is None:
+            channel_indexes = slice(None)
+        channel_indexes = np.arange(self.header['signal_channels'].size)[channel_indexes]
+        assert len(channel_indexes) == 1
+        return channel_indexes
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        channel_indexes = self._check_channel_indexes(channel_indexes)
+        chan_id = self.header['signal_channels'][channel_indexes[0]]['id']
+        sig_size = np.sum(self._by_seg_data_blocks[chan_id][seg_index]['size'])
+        return sig_size
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        channel_indexes = self._check_channel_indexes(channel_indexes)
+        chan_id = self.header['signal_channels'][channel_indexes[0]]['id']
+        return self._sig_t_starts[chan_id][seg_index] * self._time_factor
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        if i_start is None:
+            i_start = 0
+        if i_stop is None:
+            i_stop = self._get_signal_size(block_index, seg_index, channel_indexes)
+
+        channel_indexes = self._check_channel_indexes(channel_indexes)
+        chan_index = channel_indexes[0]
+        chan_id = self.header['signal_channels'][chan_index]['id']
+        group_id = self.header['signal_channels'][channel_indexes[0]]['group_id']
+        dt = self._sig_dtypes[group_id]
+
+        raw_signals = np.zeros((i_stop - i_start, len(channel_indexes)), dtype=dt)
+        for c, channel_index in enumerate(channel_indexes):
+            # NOTE: this actual way is slow because we run throught
+            # the file for each channel. The loop should be reversed.
+            # But there is no garanty that channels shared the same data block
+            # indexes. So this make the job too difficult.
+            chan_header = self.header['signal_channels'][channel_index]
+            chan_id = chan_header['id']
+            data_blocks = self._by_seg_data_blocks[chan_id][seg_index]
+
+            # loop over data blocks and get chunks
+            bl0 = np.searchsorted(data_blocks['cumsum'], i_start, side='left')
+            bl1 = np.searchsorted(data_blocks['cumsum'], i_stop, side='left')
+            ind = 0
+            for bl in range(bl0, bl1):
+                ind0 = data_blocks[bl]['pos']
+                ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
+                data = self._memmap[ind0:ind1].view(dt)
+                if bl == bl1 - 1:
+                    # right border
+                    # be carfull that bl could be both bl0 and bl1!!
+                    border = data.size - (i_stop - data_blocks[bl]['cumsum'])
+                    if border > 0:
+                        data = data[:-border]
+                if bl == bl0:
+                    # left border
+                    border = i_start - data_blocks[bl]['cumsum']
+                    data = data[border:]
+                raw_signals[ind:data.size + ind, c] = data
+                ind += data.size
+        return raw_signals
+
+    def _count_in_time_slice(self, seg_index, chan_id, lim0, lim1, marker_filter=None):
+        # count event or spike in time slice
+        data_blocks = self._all_data_blocks[chan_id]
+        chan_info = self._channel_infos[chan_id]
+        dt = get_channel_dtype(chan_info)
+        nb = 0
+        for bl in range(data_blocks.size):
+            ind0 = data_blocks[bl]['pos']
+            ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
+            raw_data = self._memmap[ind0:ind1].view(dt)
+            ts = raw_data['tick']
+            keep = (ts >= lim0) & (ts <= lim1)
+            if marker_filter is not None:
+                keep2 = (raw_data['marker'] & 255) == marker_filter
+                keep = keep & keep2
+            nb += np.sum(keep)
+            if ts[-1] > lim1:
+                break
+        return nb
+
+    def _get_internal_timestamp_(self, seg_index, chan_id,
+                                 t_start, t_stop, other_field=None, marker_filter=None):
+        chan_info = self._channel_infos[chan_id]
+        # data_blocks = self._by_seg_data_blocks[chan_id][seg_index]
+        data_blocks = self._all_data_blocks[chan_id]
+        dt = get_channel_dtype(chan_info)
+
+        if t_start is None:
+            # lim0 = 0
+            lim0 = self._seg_t_starts[seg_index]
+        else:
+            lim0 = int(t_start / self._time_factor)
+
+        if t_stop is None:
+            # lim1 = 2**32
+            lim1 = self._seg_t_stops[seg_index]
+        else:
+            lim1 = int(t_stop / self._time_factor)
+
+        timestamps = []
+        othervalues = []
+        for bl in range(data_blocks.size):
+            ind0 = data_blocks[bl]['pos']
+            ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
+            raw_data = self._memmap[ind0:ind1].view(dt)
+            ts = raw_data['tick']
+            keep = (ts >= lim0) & (ts <= lim1)
+            if marker_filter is not None:
+                keep2 = (raw_data['marker'] & 255) == marker_filter
+                keep = keep & keep2
+
+            timestamps.append(ts[keep])
+            if other_field is not None:
+                othervalues.append(raw_data[other_field][keep])
+            if ts[-1] > lim1:
+                break
+
+        if len(timestamps) > 0:
+            timestamps = np.concatenate(timestamps)
+        else:
+            timestamps = np.zeros(0, dtype='int16')
+
+        if other_field is None:
+            return timestamps
+        else:
+            if len(timestamps) > 0:
+                othervalues = np.concatenate(othervalues)
+            else:
+                othervalues = np.zeros(0, dtype=dt.fields[other_field][0])
+            return timestamps, othervalues
+
+    def _spike_count(self, block_index, seg_index, unit_index):
+        chan_id, unit_id = self.internal_unit_ids[unit_index]
+        if self.ced_units:
+            marker_filter = unit_id
+        else:
+            marker_filter = None
+        lim0 = self._seg_t_starts[seg_index]
+        lim1 = self._seg_t_stops[seg_index]
+        return self._count_in_time_slice(seg_index, chan_id,
+                                         lim0, lim1, marker_filter=marker_filter)
+
+    def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
+        unit_header = self.header['unit_channels'][unit_index]
+        chan_id, unit_id = self.internal_unit_ids[unit_index]
+
+        if self.ced_units:
+            marker_filter = unit_id
+        else:
+            marker_filter = None
+
+        spike_timestamps = self._get_internal_timestamp_(seg_index,
+                                                         chan_id, t_start, t_stop,
+                                                         marker_filter=marker_filter)
+
+        return spike_timestamps
+
+    def _rescale_spike_timestamp(self, spike_timestamps, dtype):
+        spike_times = spike_timestamps.astype(dtype)
+        spike_times *= self._time_factor
+        return spike_times
+
+    def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
+        unit_header = self.header['unit_channels'][unit_index]
+        chan_id, unit_id = self.internal_unit_ids[unit_index]
+
+        if self.ced_units:
+            marker_filter = unit_id
+        else:
+            marker_filter = None
+
+        timestamps, waveforms = self._get_internal_timestamp_(seg_index, chan_id,
+                                                              t_start, t_stop,
+                                                              other_field='waveform',
+                                                              marker_filter=marker_filter)
+
+        waveforms = waveforms.reshape(timestamps.size, 1, -1)
+
+        return waveforms
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        event_header = self.header['event_channels'][event_channel_index]
+        chan_id = int(event_header['id'])  # because set to string in header
+        lim0 = self._seg_t_starts[seg_index]
+        lim1 = self._seg_t_stops[seg_index]
+        return self._count_in_time_slice(seg_index, chan_id, lim0, lim1, marker_filter=None)
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        event_header = self.header['event_channels'][event_channel_index]
+        chan_id = int(event_header['id'])  # because set to string in header
+        chan_info = self._channel_infos[chan_id]
+
+        if chan_info['kind'] == 5:
+            timestamps, labels = self._get_internal_timestamp_(seg_index,
+                                                               chan_id, t_start, t_stop,
+                                                               other_field='marker')
+        elif chan_info['kind'] == 8:
+            timestamps, labels = self._get_internal_timestamp_(seg_index,
+                                                               chan_id, t_start, t_stop,
+                                                               other_field='label')
+        else:
+            timestamps = self._get_internal_timestamp_(seg_index,
+                                                       chan_id, t_start, t_stop, other_field=None)
+            labels = np.zeros(timestamps.size, dtype='U')
+
+        labels = labels.astype('U')
+        durations = None
+
+        return timestamps, durations, labels
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        event_times = event_timestamps.astype(dtype)
+        event_times *= self._time_factor
+        return event_times
+
+
+def read_as_dict(fid, dtype):
+    """
+    Given a file descriptor (seek at the good place externally)
+    and a numpy.dtype of the binary struct return a dict.
+    Make conversion for strings.
+    """
+    dt = np.dtype(dtype)
+    h = np.frombuffer(fid.read(dt.itemsize), dt)[0]
+    info = OrderedDict()
+    for k in dt.names:
+        v = h[k]
+
+        if dt[k].kind == 'S':
+            v = v.decode('iso-8859-1')
+            if len(v) > 0:
+                l = ord(v[0])
+                v = v[1:l + 1]
+
+        info[k] = v
+    return info
+
+
+def get_channel_dtype(chan_info):
+    """
+    Get dtype by kind.
+    """
+    if chan_info['kind'] == 1:  # Raw signal
+        dt = 'int16'
+    elif chan_info['kind'] in [2, 3, 4]:  # Event data
+        dt = [('tick', 'i4')]
+    elif chan_info['kind'] in [5]:  # Marker data
+        dt = [('tick', 'i4'), ('marker', 'i4')]
+    elif chan_info['kind'] in [6]:  # AdcMark data (waveform)
+        dt = [('tick', 'i4'), ('marker', 'i4'),
+              # ('adc', 'S%d' % chan_info['n_extra'])]
+              ('waveform', 'int16', chan_info['n_extra'] // 2)]
+    elif chan_info['kind'] in [7]:  # RealMark data (waveform)
+        dt = [('tick', 'i4'), ('marker', 'i4'),
+              # ('real', 'S%d' % chan_info['n_extra'])]
+              ('waveform', 'float32', chan_info['n_extra'] // 4)]
+    elif chan_info['kind'] in [8]:  # TextMark data
+        dt = [('tick', 'i4'), ('marker', 'i4'),
+              ('label', 'S%d' % chan_info['n_extra'])]
+    elif chan_info['kind'] == 9:  # Float signal
+        dt = 'float32'
+    dt = np.dtype(dt)
+    return dt
+
+
+def get_sample_interval(info, chan_info):
+    """
+    Get sample interval for one channel
+    """
+    if info['system_id'] in [1, 2, 3, 4, 5]:  # Before version 5
+        sample_interval = (chan_info['divide'] * info['us_per_time'] *
+                           info['time_per_adc']) * 1e-6
+    else:
+        sample_interval = (chan_info['l_chan_dvd'] *
+                           info['us_per_time'] * info['dtime_base'])
+    return sample_interval
+
+
+# headers structures :
+headerDescription = [
+    ('system_id', 'i2'),
+    ('copyright', 'S10'),
+    ('creator', 'S8'),
+    ('us_per_time', 'i2'),
+    ('time_per_adc', 'i2'),
+    ('filestate', 'i2'),
+    ('first_data', 'i4'),  # i8
+    ('channels', 'i2'),
+    ('chan_size', 'i2'),
+    ('extra_data', 'i2'),
+    ('buffersize', 'i2'),
+    ('os_format', 'i2'),
+    ('max_ftime', 'i4'),  # i8
+    ('dtime_base', 'f8'),
+    ('datetime_detail', 'u1'),
+    ('datetime_year', 'i2'),
+    ('pad', 'S52'),
+    ('comment1', 'S80'),
+    ('comment2', 'S80'),
+    ('comment3', 'S80'),
+    ('comment4', 'S80'),
+    ('comment5', 'S80'),
+]
+
+channelHeaderDesciption1 = [
+    ('del_size', 'i2'),
+    ('next_del_block', 'i4'),  # i8
+    ('firstblock', 'i4'),  # i8
+    ('lastblock', 'i4'),  # i8
+    ('blocks', 'i2'),
+    ('n_extra', 'i2'),
+    ('pre_trig', 'i2'),
+    ('free0', 'i2'),
+    ('py_sz', 'i2'),
+    ('max_data', 'i2'),
+    ('comment', 'S72'),
+    ('max_chan_time', 'i4'),  # i8
+    ('l_chan_dvd', 'i4'),  # i8
+    ('phy_chan', 'i2'),
+    ('title', 'S10'),
+    ('ideal_rate', 'f4'),
+    ('kind', 'u1'),
+    ('unused1', 'i1'),
+]
+
+blockHeaderDesciption = [
+    ('pred_block', 'i4'),  # i8
+    ('succ_block', 'i4'),  # i8
+    ('start_time', 'i4'),  # i8
+    ('end_time', 'i4'),  # i8
+    ('channel_num', 'i2'),
+    ('items', 'i2'),
+]
+
+dict_kind = {
+    0: 'empty',
+    1: 'Adc',
+    2: 'EventFall',
+    3: 'EventRise',
+    4: 'EventBoth',
+    5: 'Marker',
+    6: 'AdcMark',
+    7: 'RealMark',
+    8: 'TextMark',
+    9: 'RealWave',
+}

+ 529 - 0
code/python-neo/neo/rawio/tdtrawio.py

@@ -0,0 +1,529 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading data from from Tucker Davis TTank format.
+Terminology:
+TDT hold data with tanks (actually a directory). And tanks hold sub block
+(sub directories).
+Tanks correspond to neo.Block and tdt block correspond to neo.Segment.
+
+Note the name Block is ambiguous because it does not refer to same thing in TDT
+terminology and neo.
+
+
+In a directory there are several files:
+  * TSQ timestamp index of data
+  * TBK some kind of channel info and maybe more
+  * TEV contains data : spike + event + signal (for old version)
+  * SEV contains signals (for new version)
+  * ./sort/ can contain offline spikesorting label for spike
+     and can be use place of TEV.
+
+Units in this IO are not guaranteed.
+
+Author: Samuel Garcia, SummitKwan, Chadwick Boulay
+
+"""
+from __future__ import print_function, division, absolute_import
+# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
+
+from .baserawio import BaseRawIO, _signal_channel_dtype, _unit_channel_dtype, _event_channel_dtype
+
+import numpy as np
+import os
+import re
+from collections import OrderedDict
+
+
+class TdtRawIO(BaseRawIO):
+    rawmode = 'one-dir'
+
+    def __init__(self, dirname='', sortname=''):
+        """
+        'sortname' is used to specify the external sortcode generated by offline spike sorting.
+        if sortname=='PLX', there should be a ./sort/PLX/*.SortResult file in the tdt block,
+        which stores the sortcode for every spike; defaults to '',
+        which uses the original online sort.
+        """
+        BaseRawIO.__init__(self)
+        if dirname.endswith('/'):
+            dirname = dirname[:-1]
+        self.dirname = dirname
+
+        self.sortname = sortname
+
+    def _source_name(self):
+        return self.dirname
+
+    def _parse_header(self):
+
+        tankname = os.path.basename(self.dirname)
+
+        segment_names = []
+        for segment_name in os.listdir(self.dirname):
+            path = os.path.join(self.dirname, segment_name)
+            if is_tdtblock(path):
+                segment_names.append(segment_name)
+
+        nb_segment = len(segment_names)
+
+        # TBK (channel info)
+        info_channel_groups = None
+        for seg_index, segment_name in enumerate(segment_names):
+            path = os.path.join(self.dirname, segment_name)
+
+            # TBK contain channels
+            tbk_filename = os.path.join(path, tankname + '_' + segment_name + '.Tbk')
+            _info_channel_groups = read_tbk(tbk_filename)
+            if info_channel_groups is None:
+                info_channel_groups = _info_channel_groups
+            else:
+                assert np.array_equal(info_channel_groups,
+                                      _info_channel_groups), 'Channels differ across segments'
+
+        # TEV (mixed data)
+        self._tev_datas = []
+        for seg_index, segment_name in enumerate(segment_names):
+            path = os.path.join(self.dirname, segment_name)
+            tev_filename = os.path.join(path, tankname + '_' + segment_name + '.tev')
+            if os.path.exists(tev_filename):
+                tev_data = np.memmap(tev_filename, mode='r', offset=0, dtype='uint8')
+            else:
+                tev_data = None
+            self._tev_datas.append(tev_data)
+
+        # TSQ index with timestamp
+        self._tsq = []
+        self._seg_t_starts = []
+        self._seg_t_stops = []
+        for seg_index, segment_name in enumerate(segment_names):
+            path = os.path.join(self.dirname, segment_name)
+            tsq_filename = os.path.join(path, tankname + '_' + segment_name + '.tsq')
+            tsq = np.fromfile(tsq_filename, dtype=tsq_dtype)
+            self._tsq.append(tsq)
+            # Start and stop times are only found in the second and last header row, respectively.
+            if tsq[1]['evname'] == chr(EVMARK_STARTBLOCK).encode():
+                self._seg_t_starts.append(tsq[1]['timestamp'])
+            else:
+                self._seg_t_starts.append(np.nan)
+                print('segment start time not found')
+            if tsq[-1]['evname'] == chr(EVMARK_STOPBLOCK).encode():
+                self._seg_t_stops.append(tsq[-1]['timestamp'])
+            else:
+                self._seg_t_stops.append(np.nan)
+                print('segment stop time not found')
+
+            # If there exists an external sortcode in ./sort/[sortname]/*.SortResult
+            #  (generated after offline sorting)
+            if self.sortname is not '':
+                try:
+                    for file in os.listdir(os.path.join(path, 'sort', sortname)):
+                        if file.endswith(".SortResult"):
+                            sortresult_filename = os.path.join(path, 'sort', sortname, file)
+                            # get new sortcode
+                            newsortcode = np.fromfile(sortresult_filename, 'int8')[
+                                1024:]  # first 1024 bytes are header
+                            # update the sort code with the info from this file
+                            tsq['sortcode'][1:-1] = newsortcode
+                            # print('sortcode updated')
+                            break
+                except OSError:
+                    pass
+                except IOError:
+                    pass
+
+        # Re-order segments according to their start times
+        sort_inds = np.argsort(self._seg_t_starts)
+        if not np.array_equal(sort_inds, list(range(nb_segment))):
+            segment_names = [segment_names[x] for x in sort_inds]
+            self._tev_datas = [self._tev_datas[x] for x in sort_inds]
+            self._seg_t_starts = [self._seg_t_starts[x] for x in sort_inds]
+            self._seg_t_stops = [self._seg_t_stops[x] for x in sort_inds]
+            self._tsq = [self._tsq[x] for x in sort_inds]
+        self._global_t_start = self._seg_t_starts[0]
+
+        # signal channels EVTYPE_STREAM
+        signal_channels = []
+        self._sigs_data_buf = {seg_index: {} for seg_index in range(nb_segment)}
+        self._sigs_index = {seg_index: {} for seg_index in range(nb_segment)}
+        self._sig_dtype_by_group = {}  # key = group_id
+        self._sig_sample_per_chunk = {}  # key = group_id
+        self._sigs_lengths = {seg_index: {}
+                              for seg_index in range(nb_segment)}  # key = seg_index then group_id
+        self._sigs_t_start = {seg_index: {}
+                              for seg_index in range(nb_segment)}  # key = seg_index then group_id
+
+        keep = info_channel_groups['TankEvType'] == EVTYPE_STREAM
+        for group_id, info in enumerate(info_channel_groups[keep]):
+            self._sig_sample_per_chunk[group_id] = info['NumPoints']
+
+            for c in range(info['NumChan']):
+                chan_index = len(signal_channels)
+                chan_id = c + 1  # If several StoreName then chan_id is not unique in TDT!!!!!
+
+                # loop over segment to get sampling_rate/data_index/data_buffer
+                sampling_rate = None
+                dtype = None
+                for seg_index, segment_name in enumerate(segment_names):
+                    # get data index
+                    tsq = self._tsq[seg_index]
+                    mask = (tsq['evtype'] == EVTYPE_STREAM) & \
+                           (tsq['evname'] == info['StoreName']) & \
+                           (tsq['channel'] == chan_id)
+                    data_index = tsq[mask].copy()
+                    self._sigs_index[seg_index][chan_index] = data_index
+
+                    size = info['NumPoints'] * data_index.size
+                    if group_id not in self._sigs_lengths[seg_index]:
+                        self._sigs_lengths[seg_index][group_id] = size
+                    else:
+                        assert self._sigs_lengths[seg_index][group_id] == size
+
+                    # signal start time, relative to start of segment
+                    t_start = data_index['timestamp'][0]
+                    if group_id not in self._sigs_t_start[seg_index]:
+                        self._sigs_t_start[seg_index][group_id] = t_start
+                    else:
+                        assert self._sigs_t_start[seg_index][group_id] == t_start
+
+                    # sampling_rate and dtype
+                    _sampling_rate = float(data_index['frequency'][0])
+                    _dtype = data_formats[data_index['dataformat'][0]]
+                    if sampling_rate is None:
+                        sampling_rate = _sampling_rate
+                        dtype = _dtype
+                        if group_id not in self._sig_dtype_by_group:
+                            self._sig_dtype_by_group[group_id] = np.dtype(dtype)
+                        else:
+                            assert self._sig_dtype_by_group[group_id] == dtype
+                    else:
+                        assert sampling_rate == _sampling_rate, 'sampling is changing!!!'
+                        assert dtype == _dtype, 'sampling is changing!!!'
+
+                    # data buffer test if SEV file exists otherwise TEV
+                    path = os.path.join(self.dirname, segment_name)
+                    sev_filename = os.path.join(path, tankname + '_' + segment_name + '_'
+                                                + info['StoreName'].decode('ascii')
+                                                + '_ch' + str(chan_id) + '.sev')
+                    if os.path.exists(sev_filename):
+                        data = np.memmap(sev_filename, mode='r', offset=0, dtype='uint8')
+                    else:
+                        data = self._tev_datas[seg_index]
+                    assert data is not None, 'no TEV nor SEV'
+                    self._sigs_data_buf[seg_index][chan_index] = data
+
+                chan_name = '{} {}'.format(info['StoreName'], c + 1)
+                sampling_rate = sampling_rate
+                units = 'V'  # WARNING this is not sur at all
+                gain = 1.
+                offset = 0.
+                signal_channels.append((chan_name, chan_id, sampling_rate, dtype,
+                                        units, gain, offset, group_id))
+        signal_channels = np.array(signal_channels, dtype=_signal_channel_dtype)
+
+        # unit channels EVTYPE_SNIP
+        self.internal_unit_ids = {}
+        self._waveforms_size = []
+        self._waveforms_dtype = []
+        unit_channels = []
+        keep = info_channel_groups['TankEvType'] == EVTYPE_SNIP
+        tsq = np.hstack(self._tsq)
+        # If there is no chance the differet TSQ files will have different units,
+        #  then we can do tsq = self._tsq[0]
+        for info in info_channel_groups[keep]:
+            for c in range(info['NumChan']):
+                chan_id = c + 1
+                mask = (tsq['evtype'] == EVTYPE_SNIP) & \
+                       (tsq['evname'] == info['StoreName']) & \
+                       (tsq['channel'] == chan_id)
+                unit_ids = np.unique(tsq[mask]['sortcode'])
+                for unit_id in unit_ids:
+                    unit_index = len(unit_channels)
+                    self.internal_unit_ids[unit_index] = (info['StoreName'], chan_id, unit_id)
+
+                    unit_name = "ch{}#{}".format(chan_id, unit_id)
+                    wf_units = 'V'
+                    wf_gain = 1.
+                    wf_offset = 0.
+                    wf_left_sweep = info['NumPoints'] // 2
+                    wf_sampling_rate = info['SampleFreq']
+                    unit_channels.append((unit_name, '{}'.format(unit_id),
+                                          wf_units, wf_gain, wf_offset,
+                                          wf_left_sweep, wf_sampling_rate))
+
+                    self._waveforms_size.append(info['NumPoints'])
+                    self._waveforms_dtype.append(np.dtype(data_formats[info['DataFormat']]))
+
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # signal channels EVTYPE_STRON
+        event_channels = []
+        keep = info_channel_groups['TankEvType'] == EVTYPE_STRON
+        for info in info_channel_groups[keep]:
+            chan_name = info['StoreName']
+            chan_id = 1
+            event_channels.append((chan_name, chan_id, 'event'))
+
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # fill into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [nb_segment]
+        self.header['signal_channels'] = signal_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # Annotations only standard ones:
+        self._generate_minimal_annotations()
+
+    def _block_count(self):
+        return 1
+
+    def _segment_count(self, block_index):
+        return self.header['nb_segment'][block_index]
+
+    def _segment_t_start(self, block_index, seg_index):
+        return self._seg_t_starts[seg_index] - self._global_t_start
+
+    def _segment_t_stop(self, block_index, seg_index):
+        return self._seg_t_stops[seg_index] - self._global_t_start
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        group_id = self.header['signal_channels'][channel_indexes[0]]['group_id']
+        size = self._sigs_lengths[seg_index][group_id]
+        return size
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        group_id = self.header['signal_channels'][channel_indexes[0]]['group_id']
+        return self._sigs_t_start[seg_index][group_id] - self._global_t_start
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        # check of channel_indexes is same group_id is done outside (BaseRawIO)
+        # so first is identique to others
+        group_id = self.header['signal_channels'][channel_indexes[0]]['group_id']
+
+        if i_start is None:
+            i_start = 0
+        if i_stop is None:
+            i_stop = self._sigs_lengths[seg_index][group_id]
+
+        dt = self._sig_dtype_by_group[group_id]
+        raw_signals = np.zeros((i_stop - i_start, len(channel_indexes)), dtype=dt)
+
+        sample_per_chunk = self._sig_sample_per_chunk[group_id]
+        bl0 = i_start // sample_per_chunk
+        bl1 = int(np.ceil(i_stop / sample_per_chunk))
+        chunk_nb_bytes = sample_per_chunk * dt.itemsize
+
+        for c, channel_index in enumerate(channel_indexes):
+            data_index = self._sigs_index[seg_index][channel_index]
+            data_buf = self._sigs_data_buf[seg_index][channel_index]
+
+            # loop over data blocks and get chunks
+            ind = 0
+            for bl in range(bl0, bl1):
+                ind0 = data_index[bl]['offset']
+                ind1 = ind0 + chunk_nb_bytes
+                data = data_buf[ind0:ind1].view(dt)
+
+                if bl == bl1 - 1:
+                    # right border
+                    # be careful that bl could be both bl0 and bl1!!
+                    border = data.size - (i_stop % sample_per_chunk)
+                    data = data[:-border]
+                if bl == bl0:
+                    # left border
+                    border = i_start % sample_per_chunk
+                    data = data[border:]
+
+                raw_signals[ind:data.size + ind, c] = data
+                ind += data.size
+
+        return raw_signals
+
+    def _get_mask(self, tsq, seg_index, evtype, evname, chan_id, unit_id, t_start, t_stop):
+        """Used inside spike and events methods"""
+        mask = (tsq['evtype'] == evtype) & \
+               (tsq['evname'] == evname) & \
+               (tsq['channel'] == chan_id)
+
+        if unit_id is not None:
+            mask &= (tsq['sortcode'] == unit_id)
+
+        if t_start is not None:
+            mask &= tsq['timestamp'] >= (t_start + self._global_t_start)
+
+        if t_stop is not None:
+            mask &= tsq['timestamp'] <= (t_stop + self._global_t_start)
+
+        return mask
+
+    def _spike_count(self, block_index, seg_index, unit_index):
+        store_name, chan_id, unit_id = self.internal_unit_ids[unit_index]
+        tsq = self._tsq[seg_index]
+        mask = self._get_mask(tsq, seg_index, EVTYPE_SNIP, store_name,
+                              chan_id, unit_id, None, None)
+        nb_spike = np.sum(mask)
+        return nb_spike
+
+    def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
+        store_name, chan_id, unit_id = self.internal_unit_ids[unit_index]
+        tsq = self._tsq[seg_index]
+        mask = self._get_mask(tsq, seg_index, EVTYPE_SNIP, store_name,
+                              chan_id, unit_id, t_start, t_stop)
+        timestamps = tsq[mask]['timestamp']
+        timestamps -= self._global_t_start
+        return timestamps
+
+    def _rescale_spike_timestamp(self, spike_timestamps, dtype):
+        # already in s
+        spike_times = spike_timestamps.astype(dtype)
+        return spike_times
+
+    def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
+        store_name, chan_id, unit_id = self.internal_unit_ids[unit_index]
+        tsq = self._tsq[seg_index]
+        mask = self._get_mask(tsq, seg_index, EVTYPE_SNIP, store_name,
+                              chan_id, unit_id, t_start, t_stop)
+        nb_spike = np.sum(mask)
+
+        data = self._tev_datas[seg_index]
+
+        dt = self._waveforms_dtype[unit_index]
+        nb_sample = self._waveforms_size[unit_index]
+        waveforms = np.zeros((nb_spike, 1, nb_sample), dtype=dt)
+
+        for i, e in enumerate(tsq[mask]):
+            ind0 = e['offset']
+            ind1 = ind0 + nb_sample * dt.itemsize
+            waveforms[i, 0, :] = data[ind0:ind1].view(dt)
+
+        return waveforms
+
+    def _event_count(self, block_index, seg_index, event_channel_index):
+        h = self.header['event_channels'][event_channel_index]
+        store_name = h['name'].encode('ascii')
+        tsq = self._tsq[seg_index]
+        chan_id = 0
+        mask = self._get_mask(tsq, seg_index, EVTYPE_STRON, store_name, chan_id, None, None, None)
+        nb_event = np.sum(mask)
+        return nb_event
+
+    def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
+        h = self.header['event_channels'][event_channel_index]
+        store_name = h['name'].encode('ascii')
+        tsq = self._tsq[seg_index]
+        chan_id = 0
+        mask = self._get_mask(tsq, seg_index, EVTYPE_STRON, store_name, chan_id, None, None, None)
+
+        timestamps = tsq[mask]['timestamp']
+        timestamps -= self._global_t_start
+        labels = tsq[mask]['offset'].astype('U')
+        durations = None
+        # TODO if user demand event to epoch
+        # with EVTYPE_STROFF=258
+        # and so durations would be not None
+        # it was not implemented in previous IO.
+        return timestamps, durations, labels
+
+    def _rescale_event_timestamp(self, event_timestamps, dtype):
+        # already in s
+        ev_times = event_timestamps.astype(dtype)
+        return ev_times
+
+
+tbk_field_types = [
+    ('StoreName', 'S4'),
+    ('HeadName', 'S16'),
+    ('Enabled', 'bool'),
+    ('CircType', 'int'),
+    ('NumChan', 'int'),
+    ('StrobeMode', 'int'),
+    ('TankEvType', 'int32'),
+    ('NumPoints', 'int'),
+    ('DataFormat', 'int'),
+    ('SampleFreq', 'float64'),
+]
+
+
+def read_tbk(tbk_filename):
+    """
+    Tbk contains some visible header in txt mode to describe
+    channel group info.
+    """
+    with open(tbk_filename, mode='rb') as f:
+        txt_header = f.read()
+
+    infos = []
+    for chan_grp_header in txt_header.split(b'[STOREHDRITEM]'):
+        if chan_grp_header.startswith(b'[USERNOTEDELIMITER]'):
+            break
+
+        # parse into a dict
+        info = OrderedDict()
+        pattern = br'NAME=(\S+);TYPE=(\S+);VALUE=(\S+);'
+        r = re.findall(pattern, chan_grp_header)
+        for name, _type, value in r:
+            info[name.decode('ascii')] = value
+        infos.append(info)
+
+    # and put into numpy
+    info_channel_groups = np.zeros(len(infos), dtype=tbk_field_types)
+    for i, info in enumerate(infos):
+        for k, dt in tbk_field_types:
+            v = np.dtype(dt).type(info[k])
+            info_channel_groups[i][k] = v
+
+    return info_channel_groups
+
+
+tsq_dtype = [
+    ('size', 'int32'),  # bytes 0-4
+    ('evtype', 'int32'),  # bytes 5-8
+    ('evname', 'S4'),  # bytes 9-12
+    ('channel', 'uint16'),  # bytes 13-14
+    ('sortcode', 'uint16'),  # bytes 15-16
+    ('timestamp', 'float64'),  # bytes 17-24
+    ('offset', 'int64'),  # bytes 25-32
+    ('dataformat', 'int32'),  # bytes 33-36
+    ('frequency', 'float32'),  # bytes 37-40
+]
+
+EVTYPE_UNKNOWN = int('00000000', 16)  # 0
+EVTYPE_STRON = int('00000101', 16)  # 257
+EVTYPE_STROFF = int('00000102', 16)  # 258
+EVTYPE_SCALAR = int('00000201', 16)  # 513
+EVTYPE_STREAM = int('00008101', 16)  # 33025
+EVTYPE_SNIP = int('00008201', 16)  # 33281
+EVTYPE_MARK = int('00008801', 16)  # 34817
+EVTYPE_HASDATA = int('00008000', 16)  # 32768
+EVTYPE_UCF = int('00000010', 16)  # 16
+EVTYPE_PHANTOM = int('00000020', 16)  # 32
+EVTYPE_MASK = int('0000FF0F', 16)  # 65295
+EVTYPE_INVALID_MASK = int('FFFF0000', 16)  # 4294901760
+EVMARK_STARTBLOCK = int('0001', 16)  # 1
+EVMARK_STOPBLOCK = int('0002', 16)  # 2
+
+data_formats = {
+    0: 'float32',
+    1: 'int32',
+    2: 'int16',
+    3: 'int8',
+    4: 'float64',
+}
+
+
+def is_tdtblock(blockpath):
+    """Is tha path a  TDT block (=neo.Segment) ?"""
+    file_ext = list()
+    if os.path.isdir(blockpath):
+        # for every file, get extension, convert to lowercase and append
+        for file in os.listdir(blockpath):
+            file_ext.append(os.path.splitext(file)[1].lower())
+
+    file_ext = set(file_ext)
+    tdt_ext = {'.tbk', '.tdx', '.tev', '.tsq'}
+    if file_ext >= tdt_ext:  # if containing all the necessary files
+        return True
+    else:
+        return False

+ 1 - 0
code/python-neo/neo/rawio/tests/__init__.py

@@ -0,0 +1 @@
+# -*- coding: utf-8 -*-

+ 161 - 0
code/python-neo/neo/rawio/tests/common_rawio_test.py

@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+'''
+Common tests for RawIOs:
+
+It is copy/paste from neo/test/iotests/common_io_test.py
+
+The code should be shared for common parts.
+
+
+The public URL is in url_for_tests.
+
+To deposite new testing files,  please create a account at
+gin.g-node.org and upload files at NeuralEnsemble/ephy_testing_data
+data repo.
+
+
+'''
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+__test__ = False
+
+# url_for_tests = "https://portal.g-node.org/neo/" #This is the old place
+url_for_tests = "https://web.gin.g-node.org/NeuralEnsemble/ephy_testing_data/raw/master/"
+
+import os
+import logging
+import unittest
+
+from neo.rawio.tests.tools import (can_use_network, make_all_directories,
+                                   download_test_file, create_local_temp_dir)
+
+from neo.rawio.tests import rawio_compliance as compliance
+
+
+class BaseTestRawIO(object):
+    '''
+    This class make common tests for all IOs.
+
+    Basically download files from G-node portal.
+    And test the IO is working.
+
+    '''
+    # ~ __test__ = False
+
+    # all IO test need to modify this:
+    rawioclass = None  # the IOclass to be tested
+
+    files_to_test = []  # list of files to test compliances
+    files_to_download = []  # when files are at G-Node
+
+    # allow environment to tell avoid using network
+    use_network = can_use_network()
+
+    local_test_dir = None
+
+    def setUp(self):
+        '''
+        Set up the test fixture.  This is run for every test
+        '''
+        self.shortname = self.rawioclass.__name__.lower().replace('rawio', '')
+        self.create_local_dir_if_not_exists()
+        self.download_test_files_if_not_present()
+
+    def create_local_dir_if_not_exists(self):
+        '''
+        Create a local directory to store testing files and return it.
+
+        The directory path is also written to self.local_test_dir
+        '''
+        self.local_test_dir = create_local_temp_dir(self.shortname)
+        return self.local_test_dir
+
+    def download_test_files_if_not_present(self):
+        '''
+        Download %s file at G-node for testing
+        url_for_tests is global at beginning of this file.
+
+        ''' % self.rawioclass.__name__
+
+        if not self.use_network:
+            raise unittest.SkipTest("Requires download of data from the web")
+
+        url = url_for_tests + self.shortname
+        try:
+            make_all_directories(self.files_to_download, self.local_test_dir)
+            download_test_file(self.files_to_download,
+                               self.local_test_dir, url)
+        except IOError as exc:
+            raise unittest.SkipTest(exc)
+
+    download_test_files_if_not_present.__test__ = False
+
+    def cleanup_file(self, path):
+        '''
+        Remove test files or directories safely.
+        '''
+        cleanup_test_file(self.rawioclass, path, directory=self.local_test_dir)
+
+    def get_filename_path(self, filename):
+        '''
+        Get the path to a filename in the current temporary file directory
+        '''
+        return os.path.join(self.local_test_dir, filename)
+
+    def test_read_all(self):
+        # Read all file in self.entities_to_test
+
+        for entity_name in self.entities_to_test:
+            entity_name = self.get_filename_path(entity_name)
+
+            if self.rawioclass.rawmode.endswith('-file'):
+                reader = self.rawioclass(filename=entity_name)
+            elif self.rawioclass.rawmode.endswith('-dir'):
+                reader = self.rawioclass(dirname=entity_name)
+
+            txt = reader.__repr__()
+            assert 'nb_block' not in txt, 'Before parser_header() nb_block should be NOT known'
+
+            reader.parse_header()
+
+            txt = reader.__repr__()
+            assert 'nb_block' in txt, 'After parser_header() nb_block should be known'
+            # ~ print(txt)
+
+            #
+            txt = reader._repr_annotations()
+            # ~ reader.print_annotations()
+
+            # ~ sigs = reader.get_analogsignal_chunk(block_index=0, seg_index=0,
+            # ~ i_start=None, i_stop=None, channel_indexes=[1])
+            # ~ import matplotlib.pyplot as plt
+            # ~ fig, ax = plt.subplots()
+            # ~ ax.plot(sigs[:, 0])
+            # ~ plt.show()
+
+            # ~ nb_unit = reader.unit_channels_count()
+            # ~ for unit_index in range(nb_unit):
+            # ~ wfs = reader.spike_raw_waveforms(block_index=0, seg_index=0,
+            # ~ unit_index=unit_index)
+            # ~ if wfs is not None:
+            # ~ import matplotlib.pyplot as plt
+            # ~ fig, ax = plt.subplots()
+            # ~ ax.plot(wfs[:, 0, :50].T)
+            # ~ plt.show()
+
+            # lanch a series of test compliance
+            compliance.header_is_total(reader)
+            compliance.count_element(reader)
+            compliance.read_analogsignals(reader)
+            compliance.read_spike_times(reader)
+            compliance.read_spike_waveforms(reader)
+            compliance.read_events(reader)
+            compliance.has_annotations(reader)
+
+            # basic benchmark
+            level = logging.getLogger().getEffectiveLevel()
+            logging.getLogger().setLevel(logging.INFO)
+            compliance.benchmark_speed_read_signals(reader)
+            logging.getLogger().setLevel(level)

+ 349 - 0
code/python-neo/neo/rawio/tests/rawio_compliance.py

@@ -0,0 +1,349 @@
+# -*- coding: utf-8 -*-
+"""
+Here a list for testing neo.rawio API compliance.
+This is called automatically by `BaseTestRawIO`
+
+All rules are listed as function so it should be easier to:
+  * identify the rawio API
+  * debug
+  * discuss rules
+
+"""
+import time
+
+if not hasattr(time, 'perf_counter'):
+    time.perf_counter = time.time
+import logging
+
+import numpy as np
+
+from neo.rawio.baserawio import (_signal_channel_dtype, _unit_channel_dtype,
+                                 _event_channel_dtype, _common_sig_characteristics)
+
+
+def print_class(reader):
+    return reader.__class__.__name__
+
+
+def header_is_total(reader):
+    """
+    Test if hedaer contains:
+      * 'signal_channels'
+      * 'unit_channels'
+      * 'event_channels'
+
+    """
+    h = reader.header
+
+    assert 'signal_channels' in h, 'signal_channels missing in header'
+    if h['signal_channels'] is not None:
+        dt = h['signal_channels'].dtype
+        for k, _ in _signal_channel_dtype:
+            assert k in dt.fields, '%s not in signal_channels.dtype' % k
+
+    assert 'unit_channels' in h, 'unit_channels missing in header'
+    if h['unit_channels'] is not None:
+        dt = h['unit_channels'].dtype
+        for k, _ in _unit_channel_dtype:
+            assert k in dt.fields, '%s not in unit_channels.dtype' % k
+
+    assert 'event_channels' in h, 'event_channels missing in header'
+    if h['event_channels'] is not None:
+        dt = h['event_channels'].dtype
+        for k, _ in _event_channel_dtype:
+            assert k in dt.fields, '%s not in event_channels.dtype' % k
+
+
+def count_element(reader):
+    """
+    Count block/segment/signals/spike/events
+
+    """
+
+    nb_sig = reader.signal_channels_count()
+    nb_unit = reader.unit_channels_count()
+    nb_event_channel = reader.event_channels_count()
+
+    nb_block = reader.block_count()
+    assert nb_block > 0, '{} have {} block'.format(print_class(reader), nb_block)
+
+    for block_index in range(nb_block):
+        nb_seg = reader.segment_count(block_index)
+
+        for seg_index in range(nb_seg):
+            t_start = reader.segment_t_start(block_index=block_index, seg_index=seg_index)
+            t_stop = reader.segment_t_stop(block_index=block_index, seg_index=seg_index)
+            assert t_stop > t_start
+
+            if nb_sig > 0:
+                if reader._several_channel_groups:
+                    channel_indexes_list = reader.get_group_channel_indexes()
+                    for channel_indexes in channel_indexes_list:
+                        sig_size = reader.get_signal_size(block_index, seg_index,
+                                                          channel_indexes=channel_indexes)
+                else:
+                    sig_size = reader.get_signal_size(block_index, seg_index,
+                                                      channel_indexes=None)
+
+                for unit_index in range(nb_unit):
+                    nb_spike = reader.spike_count(block_index=block_index, seg_index=seg_index,
+                                                  unit_index=unit_index)
+
+                for event_channel_index in range(nb_event_channel):
+                    nb_event = reader.event_count(block_index=block_index, seg_index=seg_index,
+                                                  event_channel_index=event_channel_index)
+
+
+def iter_over_sig_chunks(reader, channel_indexes, chunksize=1024):
+    if channel_indexes is None:
+        nb_sig = reader.signal_channels_count()
+    else:
+        nb_sig = len(channel_indexes)
+    if nb_sig == 0:
+        return
+
+    nb_block = reader.block_count()
+
+    # read all chunk in RAW data
+    chunksize = 1024
+    for block_index in range(nb_block):
+        nb_seg = reader.segment_count(block_index)
+        for seg_index in range(nb_seg):
+            sig_size = reader.get_signal_size(block_index, seg_index, channel_indexes)
+
+            nb = sig_size // chunksize + 1
+            for i in range(nb):
+                i_start = i * chunksize
+                i_stop = min((i + 1) * chunksize, sig_size)
+                raw_chunk = reader.get_analogsignal_chunk(block_index=block_index,
+                                                          seg_index=seg_index,
+                                                          i_start=i_start, i_stop=i_stop,
+                                                          channel_indexes=channel_indexes)
+                yield raw_chunk
+
+
+def read_analogsignals(reader):
+    """
+    Read and convert some signals chunks.
+
+    Test special case when signal_channels do not have same sampling_rate.
+    AKA _need_chan_index_check
+    """
+    nb_sig = reader.signal_channels_count()
+    if nb_sig == 0:
+        return
+
+    if reader._several_channel_groups:
+        channel_indexes_list = reader.get_group_channel_indexes()
+    else:
+        channel_indexes_list = [None]
+
+    # read all chunk for all channel all block all segment
+    for channel_indexes in channel_indexes_list:
+        for raw_chunk in iter_over_sig_chunks(reader, channel_indexes, chunksize=1024):
+            assert raw_chunk.ndim == 2
+            # ~ pass
+
+    for channel_indexes in channel_indexes_list:
+        sr = reader.get_signal_sampling_rate(channel_indexes=channel_indexes)
+        assert type(sr) == float, 'Type of sampling is {} should float'.format(type(sr))
+
+    # make other test on the first chunk of first block first block
+    block_index = 0
+    seg_index = 0
+    for channel_indexes in channel_indexes_list:
+        i_start = 0
+        sig_size = reader.get_signal_size(block_index, seg_index,
+                                          channel_indexes=channel_indexes)
+        i_stop = min(1024, sig_size)
+
+        if channel_indexes is None:
+            nb_sig = reader.header['signal_channels'].size
+            channel_indexes = np.arange(nb_sig, dtype=int)
+
+        all_signal_channels = reader.header['signal_channels']
+
+        signal_names = all_signal_channels['name'][channel_indexes]
+        signal_ids = all_signal_channels['id'][channel_indexes]
+
+        unique_chan_name = (np.unique(signal_names).size == all_signal_channels.size)
+        unique_chan_id = (np.unique(signal_ids).size == all_signal_channels.size)
+
+        # acces by channel inde/ids/names should give the same chunk
+        channel_indexes2 = channel_indexes[::2]
+        channel_names2 = signal_names[::2]
+        channel_ids2 = signal_ids[::2]
+
+        raw_chunk0 = reader.get_analogsignal_chunk(block_index=block_index, seg_index=seg_index,
+                                                   i_start=i_start, i_stop=i_stop,
+                                                   channel_indexes=channel_indexes2)
+        assert raw_chunk0.ndim == 2
+        assert raw_chunk0.shape[0] == i_stop
+        assert raw_chunk0.shape[1] == len(channel_indexes2)
+
+        if unique_chan_name:
+            raw_chunk1 = reader.get_analogsignal_chunk(block_index=block_index, seg_index=seg_index,
+                                                       i_start=i_start, i_stop=i_stop,
+                                                       channel_names=channel_names2)
+            np.testing.assert_array_equal(raw_chunk0, raw_chunk1)
+
+        if unique_chan_id:
+            raw_chunk2 = reader.get_analogsignal_chunk(block_index=block_index, seg_index=seg_index,
+                                                       i_start=i_start, i_stop=i_stop,
+                                                       channel_ids=channel_ids2)
+            np.testing.assert_array_equal(raw_chunk0, raw_chunk2)
+
+        # convert to float32/float64
+        for dt in ('float32', 'float64'):
+            float_chunk0 = reader.rescale_signal_raw_to_float(raw_chunk0, dtype=dt,
+                                                              channel_indexes=channel_indexes2)
+            if unique_chan_name:
+                float_chunk1 = reader.rescale_signal_raw_to_float(raw_chunk1, dtype=dt,
+                                                                  channel_names=channel_names2)
+            if unique_chan_id:
+                float_chunk2 = reader.rescale_signal_raw_to_float(raw_chunk2, dtype=dt,
+                                                                  channel_ids=channel_ids2)
+
+            assert float_chunk0.dtype == dt
+            if unique_chan_name:
+                np.testing.assert_array_equal(float_chunk0, float_chunk1)
+            if unique_chan_id:
+                np.testing.assert_array_equal(float_chunk0, float_chunk2)
+
+
+def benchmark_speed_read_signals(reader):
+    """
+    A very basic speed measurement that read all signal
+    in a file.
+    """
+
+    if reader._several_channel_groups:
+        channel_indexes_list = reader.get_group_channel_indexes()
+    else:
+        channel_indexes_list = [None]
+
+    for channel_indexes in channel_indexes_list:
+        if channel_indexes is None:
+            nb_sig = reader.signal_channels_count()
+        else:
+            nb_sig = len(channel_indexes)
+        if nb_sig == 0:
+            continue
+
+        nb_samples = 0
+        t0 = time.perf_counter()
+        for raw_chunk in iter_over_sig_chunks(reader, channel_indexes, chunksize=1024):
+            nb_samples += raw_chunk.shape[0]
+        t1 = time.perf_counter()
+        speed = (nb_samples * nb_sig) / (t1 - t0) / 1e6
+        logging.info(
+            '{} read ({}signals x {}samples) in {:0.3f} s so speed {:0.3f} MSPS from {}'.format(
+                print_class(reader),
+                nb_sig, nb_samples, t1 - t0, speed, reader.source_name()))
+
+
+def read_spike_times(reader):
+    """
+    Read and convert all spike times.
+    """
+
+    nb_block = reader.block_count()
+    nb_unit = reader.unit_channels_count()
+
+    for block_index in range(nb_block):
+        nb_seg = reader.segment_count(block_index)
+        for seg_index in range(nb_seg):
+            for unit_index in range(nb_unit):
+                nb_spike = reader.spike_count(block_index=block_index,
+                                              seg_index=seg_index, unit_index=unit_index)
+                if nb_spike == 0:
+                    continue
+
+                spike_timestamp = reader.get_spike_timestamps(block_index=block_index,
+                                                              seg_index=seg_index,
+                                                              unit_index=unit_index, t_start=None,
+                                                              t_stop=None)
+                assert spike_timestamp.shape[0] == nb_spike, 'nb_spike {} != {}'.format(
+                    spike_timestamp.shape[0], nb_spike)
+
+                spike_times = reader.rescale_spike_timestamp(spike_timestamp, 'float64')
+                assert spike_times.dtype == 'float64'
+
+                if spike_times.size > 3:
+                    # load only one spike by forcing limits
+                    t_start = spike_times[1] - 0.001
+                    t_stop = spike_times[1] + 0.001
+
+                    spike_timestamp2 = reader.get_spike_timestamps(block_index=block_index,
+                                                                   seg_index=seg_index,
+                                                                   unit_index=unit_index,
+                                                                   t_start=t_start, t_stop=t_stop)
+                    assert spike_timestamp2.shape[0] == 1
+
+                    spike_times2 = reader.rescale_spike_timestamp(spike_timestamp2, 'float64')
+                    assert spike_times2[0] == spike_times[1]
+
+
+def read_spike_waveforms(reader):
+    """
+    Read and convert some all waveforms.
+    """
+    nb_block = reader.block_count()
+    nb_unit = reader.unit_channels_count()
+
+    for block_index in range(nb_block):
+        nb_seg = reader.segment_count(block_index)
+        for seg_index in range(nb_seg):
+            for unit_index in range(nb_unit):
+                nb_spike = reader.spike_count(block_index=block_index,
+                                              seg_index=seg_index, unit_index=unit_index)
+                if nb_spike == 0:
+                    continue
+
+                raw_waveforms = reader.get_spike_raw_waveforms(block_index=block_index,
+                                                               seg_index=seg_index,
+                                                               unit_index=unit_index,
+                                                               t_start=None, t_stop=None)
+                if raw_waveforms is None:
+                    continue
+                assert raw_waveforms.shape[0] == nb_spike
+                assert raw_waveforms.ndim == 3
+
+                for dt in ('float32', 'float64'):
+                    float_waveforms = reader.rescale_waveforms_to_float(
+                        raw_waveforms, dtype=dt, unit_index=unit_index)
+                    assert float_waveforms.dtype == dt
+                    assert float_waveforms.shape == raw_waveforms.shape
+
+
+def read_events(reader):
+    """
+    Read and convert some event or epoch.
+    """
+    nb_block = reader.block_count()
+    nb_event_channel = reader.event_channels_count()
+
+    for block_index in range(nb_block):
+        nb_seg = reader.segment_count(block_index)
+        for seg_index in range(nb_seg):
+            for ev_chan in range(nb_event_channel):
+                nb_event = reader.event_count(block_index=block_index, seg_index=seg_index,
+                                              event_channel_index=ev_chan)
+                if nb_event == 0:
+                    continue
+
+                ev_timestamps, ev_durations, ev_labels = reader.get_event_timestamps(
+                    block_index=block_index, seg_index=seg_index,
+                    event_channel_index=ev_chan)
+                assert ev_timestamps.shape[0] == nb_event, 'Wrong shape {}, {}'.format(
+                    ev_timestamps.shape[0], nb_event)
+                if ev_durations is not None:
+                    assert ev_durations.shape[0] == nb_event
+                assert ev_labels.shape[0] == nb_event
+
+                ev_times = reader.rescale_event_timestamp(ev_timestamps, dtype='float64')
+                assert ev_times.dtype == 'float64'
+
+
+def has_annotations(reader):
+    assert hasattr(reader, 'raw_annotations'), 'raw_annotation are not set'

+ 34 - 0
code/python-neo/neo/rawio/tests/test_axonrawio.py

@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.axonrawio import AxonRawIO
+
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestAxonRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = AxonRawIO
+    entities_to_test = [
+        'File_axon_1.abf',  # V2.0
+        'File_axon_2.abf',  # V1.8
+        'File_axon_3.abf',  # V1.8
+        'File_axon_4.abf',  # 2.0
+        'File_axon_5.abf',  # V.20
+        'File_axon_6.abf',  # V.20
+        'File_axon_7.abf',  # V2.6
+    ]
+    files_to_download = entities_to_test
+
+    def test_read_raw_protocol(self):
+        reader = AxonRawIO(filename=self.get_filename_path('File_axon_7.abf'))
+        reader.parse_header()
+
+        reader.read_raw_protocol()
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 20 - 0
code/python-neo/neo/rawio/tests/test_bci2000rawio.py

@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+"""
+Tests of neo.rawio.bci2000rawio
+"""
+
+import unittest
+
+from neo.rawio.bci2000rawio import BCI2000RawIO
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestBCI2000RawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = BCI2000RawIO
+
+    files_to_download = ['eeg1_1.dat', 'eeg1_2.dat', 'eeg1_3.dat']
+    entities_to_test = files_to_download
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 193 - 0
code/python-neo/neo/rawio/tests/test_blackrockrawio.py

@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+"""
+Tests of neo.rawio.examplerawio
+"""
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.blackrockrawio import BlackrockRawIO
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+import numpy as np
+from numpy.testing import assert_equal
+
+try:
+    import scipy.io
+
+    HAVE_SCIPY = True
+except ImportError:
+    HAVE_SCIPY = False
+
+
+class TestBlackrockRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = BlackrockRawIO
+    entities_to_test = ['FileSpec2.3001']
+
+    files_to_download = [
+        'FileSpec2.3001.nev',
+        'FileSpec2.3001.ns5',
+        'FileSpec2.3001.ccf',
+        'FileSpec2.3001.mat',
+        'blackrock_2_1/l101210-001.mat',
+        'blackrock_2_1/l101210-001_nev-02_ns5.mat',
+        'blackrock_2_1/l101210-001.ns2',
+        'blackrock_2_1/l101210-001.ns5',
+        'blackrock_2_1/l101210-001.nev',
+        'blackrock_2_1/l101210-001-02.nev']
+
+    @unittest.skipUnless(HAVE_SCIPY, "requires scipy")
+    def test_compare_blackrockio_with_matlabloader(self):
+        """
+        This test compares the output of ReachGraspIO.read_block() with the
+        output generated by a Matlab implementation of a Blackrock file reader
+        provided by the company. The output for comparison is provided in a
+        .mat file created by the script create_data_matlab_blackrock.m.
+        The function tests LFPs, spike times, and digital events on channels
+        80-83 and spike waveforms on channel 82, unit 1.
+        For details on the file contents, refer to FileSpec2.3.txt
+
+        Ported to the rawio API by Samuel Garcia.
+        """
+
+        # Load data from Matlab generated files
+        ml = scipy.io.loadmat(self.get_filename_path('FileSpec2.3001.mat'))
+
+        lfp_ml = ml['lfp']  # (channel x time) LFP matrix
+        ts_ml = ml['ts']  # spike time stamps
+        elec_ml = ml['el']  # spike electrodes
+        unit_ml = ml['un']  # spike unit IDs
+        wf_ml = ml['wf']  # waveform unit 1 channel 1
+        mts_ml = ml['mts']  # marker time stamps
+        mid_ml = ml['mid']  # marker IDs
+
+        # Load data in channels 1-3 from original data files using the Neo
+        # BlackrockIO
+        reader = BlackrockRawIO(filename=self.get_filename_path('FileSpec2.3001'))
+        reader.parse_header()
+
+        # Check if analog data on channels 1-8 are equal
+        self.assertGreater(reader.signal_channels_count(), 0)
+        for c in range(0, 8):
+            raw_sigs = reader.get_analogsignal_chunk(channel_indexes=[c])
+            raw_sigs = raw_sigs.flatten()
+            assert_equal(raw_sigs[:-1], lfp_ml[c, :])
+
+        # Check if spikes in channels are equal
+        nb_unit = reader.unit_channels_count()
+        for unit_index in range(nb_unit):
+            unit_name = reader.header['unit_channels'][unit_index]['name']
+            # name is chXX#YY where XX is channel_id and YY is unit_id
+            channel_id, unit_id = unit_name.split('#')
+            channel_id = int(channel_id.replace('ch', ''))
+            unit_id = int(unit_id)
+
+            matlab_spikes = ts_ml[(elec_ml == channel_id) & (unit_ml == unit_id)]
+
+            io_spikes = reader.get_spike_timestamps(unit_index=unit_index)
+            assert_equal(io_spikes, matlab_spikes)
+
+            # Check waveforms of channel 1, unit 0
+            if channel_id == 1 and unit_id == 0:
+                io_waveforms = reader.get_spike_raw_waveforms(unit_index=unit_index)
+                io_waveforms = io_waveforms[:, 0, :]  # remove dim 1
+                assert_equal(io_waveforms, wf_ml)
+
+        # Check if digital input port events are equal
+        nb_ev_chan = reader.event_channels_count()
+        # ~ print(reader.header['event_channels'])
+        for ev_chan in range(nb_ev_chan):
+            name = reader.header['event_channels']['name'][ev_chan]
+            # ~ print(name)
+            all_timestamps, _, labels = reader.get_event_timestamps(
+                event_channel_index=ev_chan)
+            if name == 'digital_input_port':
+                for label in np.unique(labels):
+                    python_digievents = all_timestamps[labels == label]
+                    matlab_digievents = mts_ml[mid_ml == int(label)]
+                    assert_equal(python_digievents, matlab_digievents)
+            elif name == 'comments':
+                pass
+                # TODO: Save comments to Matlab file.
+
+    @unittest.skipUnless(HAVE_SCIPY, "requires scipy")
+    def test_compare_blackrockio_with_matlabloader_v21(self):
+        """
+        This test compares the output of ReachGraspIO.read_block() with the
+        output generated by a Matlab implementation of a Blackrock file reader
+        provided by the company. The output for comparison is provided in a
+        .mat file created by the script create_data_matlab_blackrock.m.
+        The function tests LFPs, spike times, and digital events.
+
+        Ported to the rawio API by Samuel Garcia.
+        """
+
+        dirname = self.get_filename_path('blackrock_2_1/l101210-001')
+        # First run with parameters for ns5, then run with correct parameters for ns2
+        parameters = [('blackrock_2_1/l101210-001_nev-02_ns5.mat',
+                       {'nsx_to_load': 5, 'nev_override': '-'.join([dirname, '02'])}, 96),
+                      ('blackrock_2_1/l101210-001.mat', {'nsx_to_load': 2}, 6)]
+        for param in parameters:
+            # Load data from Matlab generated files
+            ml = scipy.io.loadmat(self.get_filename_path(filename=param[0]))
+            lfp_ml = ml['lfp']  # (channel x time) LFP matrix
+            ts_ml = ml['ts']  # spike time stamps
+            elec_ml = ml['el']  # spike electrodes
+            unit_ml = ml['un']  # spike unit IDs
+            wf_ml = ml['wf']  # waveforms
+            mts_ml = ml['mts']  # marker time stamps
+            mid_ml = ml['mid']  # marker IDs
+
+            # Load data from original data files using the Neo BlackrockIO
+            reader = BlackrockRawIO(dirname, **param[1])
+            reader.parse_header()
+
+            # Check if analog data are equal
+            self.assertGreater(reader.signal_channels_count(), 0)
+
+            for c in range(0, param[2]):
+                raw_sigs = reader.get_analogsignal_chunk(channel_indexes=[c])
+                raw_sigs = raw_sigs.flatten()
+                assert_equal(raw_sigs[:], lfp_ml[c, :])
+
+            # Check if spikes in channels are equal
+            nb_unit = reader.unit_channels_count()
+            for unit_index in range(nb_unit):
+                unit_name = reader.header['unit_channels'][unit_index]['name']
+                # name is chXX#YY where XX is channel_id and YY is unit_id
+                channel_id, unit_id = unit_name.split('#')
+                channel_id = int(channel_id.replace('ch', ''))
+                unit_id = int(unit_id)
+
+                matlab_spikes = ts_ml[(elec_ml == channel_id) & (unit_ml == unit_id)]
+
+                io_spikes = reader.get_spike_timestamps(unit_index=unit_index)
+                assert_equal(io_spikes, matlab_spikes)
+
+                # Check all waveforms
+                io_waveforms = reader.get_spike_raw_waveforms(unit_index=unit_index)
+                io_waveforms = io_waveforms[:, 0, :]  # remove dim 1
+                matlab_wf = wf_ml[np.nonzero(
+                    np.logical_and(elec_ml == channel_id, unit_ml == unit_id)), :][0]
+                assert_equal(io_waveforms, matlab_wf)
+
+            # Check if digital input port events are equal
+            nb_ev_chan = reader.event_channels_count()
+            # ~ print(reader.header['event_channels'])
+            for ev_chan in range(nb_ev_chan):
+                name = reader.header['event_channels']['name'][ev_chan]
+                # ~ print(name)
+                if name == 'digital_input_port':
+                    all_timestamps, _, labels = reader.get_event_timestamps(
+                        event_channel_index=ev_chan)
+
+                    for label in np.unique(labels):
+                        python_digievents = all_timestamps[labels == label]
+                        matlab_digievents = mts_ml[mid_ml == int(label)]
+                        assert_equal(python_digievents, matlab_digievents)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 43 - 0
code/python-neo/neo/rawio/tests/test_brainvisionrawio.py

@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+"""
+
+"""
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.brainvisionrawio import BrainVisionRawIO
+
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestBrainVisionRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = BrainVisionRawIO
+    entities_to_test = ['File_brainvision_1.vhdr',
+                        'File_brainvision_2.vhdr',
+                        'File_brainvision_3_float32.vhdr',
+                        'File_brainvision_3_int16.vhdr',
+                        'File_brainvision_3_int32.vhdr',
+                        ]
+    files_to_download = ['File_brainvision_1.eeg',
+                         'File_brainvision_1.vhdr',
+                         'File_brainvision_1.vmrk',
+                         'File_brainvision_2.eeg',
+                         'File_brainvision_2.vhdr',
+                         'File_brainvision_2.vmrk',
+                         'File_brainvision_3_float32.eeg',
+                         'File_brainvision_3_float32.vhdr',
+                         'File_brainvision_3_float32.vmrk',
+                         'File_brainvision_3_int16.eeg',
+                         'File_brainvision_3_int16.vhdr',
+                         'File_brainvision_3_int16.vmrk',
+                         'File_brainvision_3_int32.eeg',
+                         'File_brainvision_3_int32.vhdr',
+                         'File_brainvision_3_int32.vmrk',
+                         ]
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 24 - 0
code/python-neo/neo/rawio/tests/test_elanrawio.py

@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.elanrawio import ElanRawIO
+
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestElanRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = ElanRawIO
+    entities_to_test = ['File_elan_1.eeg']
+    files_to_download = [
+        'File_elan_1.eeg',
+        'File_elan_1.eeg.ent',
+        'File_elan_1.eeg.pos',
+    ]
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 48 - 0
code/python-neo/neo/rawio/tests/test_examplerawio.py

@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+"""
+Tests of neo.rawio.examplerawio
+
+Note for dev:
+if you write a new RawIO class your need to put some file
+to be tested at g-node portal, Ask neuralensemble list for that.
+The file need to be small.
+
+Then you have to copy/paste/renamed the TestExampleRawIO
+class and a full test will be done to test if the new coded IO
+is compliant with the RawIO API.
+
+If you have problems, do not hesitate to ask help github (prefered)
+of neuralensemble list.
+
+Note that same mechanism is used a neo.io API so files are tested
+several time with neo.rawio (numpy buffer) and neo.io (neo object tree).
+See neo.test.iotest.*
+
+
+Author: Samuel Garcia
+
+"""
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.examplerawio import ExampleRawIO
+
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestExampleRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = ExampleRawIO
+    # here obsvisously there is nothing to download:
+    files_to_download = []
+    # here we will test 2 fake files
+    # not that IO base on dirname you can put the dirname here.
+    entities_to_test = ['fake1',
+                        'fake2',
+                        ]
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 23 - 0
code/python-neo/neo/rawio/tests/test_intanrawio.py

@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.intanrawio import IntanRawIO
+
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestIntanRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = IntanRawIO
+    files_to_download = [
+        'intan_rhs_test_1.rhs',
+        'intan_rhd_test_1.rhd',
+    ]
+    entities_to_test = files_to_download
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 23 - 0
code/python-neo/neo/rawio/tests/test_micromedrawio.py

@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+"""
+
+"""
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.micromedrawio import MicromedRawIO
+
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestMicromedRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = MicromedRawIO
+    files_to_download = ['File_micromed_1.TRC']
+    entities_to_test = files_to_download
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 66 - 0
code/python-neo/neo/rawio/tests/test_neuralynxrawio.py

@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.neuralynxrawio import NeuralynxRawIO
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+import logging
+
+logging.getLogger().setLevel(logging.INFO)
+
+
+class TestNeuralynxRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = NeuralynxRawIO
+    entities_to_test = [
+        'Cheetah_v5.5.1/original_data',
+        'Cheetah_v5.6.3/original_data',
+        'Cheetah_v5.7.4/original_data',
+    ]
+    files_to_download = [
+        'Cheetah_v5.5.1/original_data/CheetahLogFile.txt',
+        'Cheetah_v5.5.1/original_data/CheetahLostADRecords.txt',
+        'Cheetah_v5.5.1/original_data/Events.nev',
+        'Cheetah_v5.5.1/original_data/STet3a.nse',
+        'Cheetah_v5.5.1/original_data/STet3b.nse',
+        'Cheetah_v5.5.1/original_data/Tet3a.ncs',
+        'Cheetah_v5.5.1/original_data/Tet3b.ncs',
+        'Cheetah_v5.5.1/plain_data/STet3a.txt',
+        'Cheetah_v5.5.1/plain_data/STet3b.txt',
+        'Cheetah_v5.5.1/plain_data/Tet3a.txt',
+        'Cheetah_v5.5.1/plain_data/Tet3b.txt',
+        'Cheetah_v5.5.1/plain_data/Events.txt',
+        'Cheetah_v5.5.1/README.txt',
+        'Cheetah_v5.6.3/original_data/CheetahLogFile.txt',
+        'Cheetah_v5.6.3/original_data/CheetahLostADRecords.txt',
+        'Cheetah_v5.6.3/original_data/Events.nev',
+        'Cheetah_v5.6.3/original_data/CSC1.ncs',
+        'Cheetah_v5.6.3/original_data/CSC2.ncs',
+        'Cheetah_v5.6.3/original_data/TT1.ntt',
+        'Cheetah_v5.6.3/original_data/TT2.ntt',
+        'Cheetah_v5.6.3/original_data/VT1.nvt',
+        'Cheetah_v5.6.3/plain_data/Events.txt',
+        'Cheetah_v5.6.3/plain_data/CSC1.txt',
+        'Cheetah_v5.6.3/plain_data/CSC2.txt',
+        'Cheetah_v5.6.3/plain_data/TT1.txt',
+        'Cheetah_v5.6.3/plain_data/TT2.txt',
+        'Cheetah_v5.7.4/original_data/CSC1.ncs',
+        'Cheetah_v5.7.4/original_data/CSC2.ncs',
+        'Cheetah_v5.7.4/original_data/CSC3.ncs',
+        'Cheetah_v5.7.4/original_data/CSC4.ncs',
+        'Cheetah_v5.7.4/original_data/CSC5.ncs',
+        'Cheetah_v5.7.4/original_data/Events.nev',
+        'Cheetah_v5.7.4/plain_data/CSC1.txt',
+        'Cheetah_v5.7.4/plain_data/CSC2.txt',
+        'Cheetah_v5.7.4/plain_data/CSC3.txt',
+        'Cheetah_v5.7.4/plain_data/CSC4.txt',
+        'Cheetah_v5.7.4/plain_data/CSC5.txt',
+        'Cheetah_v5.7.4/plain_data/Events.txt',
+        'Cheetah_v5.7.4/README.txt']
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 23 - 0
code/python-neo/neo/rawio/tests/test_neuroexplorerrawio.py

@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.neuroexplorerrawio import NeuroExplorerRawIO
+
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestNeuroExplorerRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = NeuroExplorerRawIO
+    files_to_download = [
+        'File_neuroexplorer_1.nex',
+        'File_neuroexplorer_2.nex',
+    ]
+    entities_to_test = files_to_download
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 21 - 0
code/python-neo/neo/rawio/tests/test_neuroscoperawio.py

@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.neuroscoperawio import NeuroScopeRawIO
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestNeuroScopeRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = NeuroScopeRawIO
+    files_to_download = ['test1/test1.xml',
+                         'test1/test1.dat',
+                         ]
+    entities_to_test = ['test1/test1']
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 16 - 0
code/python-neo/neo/rawio/tests/test_nixrawio.py

@@ -0,0 +1,16 @@
+import unittest
+from neo.rawio.nixrawio import NIXRawIO
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+testfname = "nixrawio-1.5.nix"
+
+
+class TestNixRawIO(BaseTestRawIO, unittest.TestCase):
+    rawioclass = NIXRawIO
+    entities_to_test = [testfname]
+    files_to_download = [testfname]
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 82 - 0
code/python-neo/neo/rawio/tests/test_openephysrawio.py

@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.openephysrawio import OpenEphysRawIO
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestOpenEphysRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = OpenEphysRawIO
+    entities_to_test = ['OpenEphys_SampleData_1',
+        # 'OpenEphys_SampleData_2_(multiple_starts)',  # This not implemented this raise error
+        # 'OpenEphys_SampleData_3',
+                        ]
+
+    files_to_download = [
+        # One segment
+        'OpenEphys_SampleData_1/101_CH0.continuous',
+        'OpenEphys_SampleData_1/101_CH1.continuous',
+        'OpenEphys_SampleData_1/all_channels.events',
+        'OpenEphys_SampleData_1/Continuous_Data.openephys',
+        'OpenEphys_SampleData_1/messages.events',
+        'OpenEphys_SampleData_1/settings.xml',
+        'OpenEphys_SampleData_1/STp106.0n0.spikes',
+
+        # Multi segment with multi file
+        # NOT implemented for now in the IO
+        # Raise Error
+        'OpenEphys_SampleData_2_(multiple_starts)/101_CH0_2.continuous',
+        'OpenEphys_SampleData_2_(multiple_starts)/101_CH1_2.continuous',
+        'OpenEphys_SampleData_2_(multiple_starts)/all_channels_2.events',
+        'OpenEphys_SampleData_2_(multiple_starts)/Continuous_Data_2.openephys',
+        'OpenEphys_SampleData_2_(multiple_starts)/messages_2.events',
+        'OpenEphys_SampleData_2_(multiple_starts)/settings_2.xml',
+        'OpenEphys_SampleData_2_(multiple_starts)/STp106.0n0_2.spikes',
+        'OpenEphys_SampleData_2_(multiple_starts)/101_CH0.continuous',
+        'OpenEphys_SampleData_2_(multiple_starts)/101_CH1.continuous',
+        'OpenEphys_SampleData_2_(multiple_starts)/all_channels.events',
+        'OpenEphys_SampleData_2_(multiple_starts)/Continuous_Data.openephys',
+        'OpenEphys_SampleData_2_(multiple_starts)/messages.events',
+        'OpenEphys_SampleData_2_(multiple_starts)/settings.xml',
+        'OpenEphys_SampleData_2_(multiple_starts)/STp106.0n0.spikes',
+
+        # Multi segment with corrupted file (CH32) : implemenetd
+        'OpenEphys_SampleData_3/100_CH1_2.continuous',
+        'OpenEphys_SampleData_3/100_CH2_2.continuous',
+        'OpenEphys_SampleData_3/100_CH32_2.continuous',
+        'OpenEphys_SampleData_3/100_CH32.continuous',
+        'OpenEphys_SampleData_3/all_channels_2.events',
+        'OpenEphys_SampleData_3/Continuous_Data_2.openephys',
+        'OpenEphys_SampleData_3/messages_2.events',
+        'OpenEphys_SampleData_3/settings_2.xml',
+        'OpenEphys_SampleData_3/100_CH1.continuous',
+        'OpenEphys_SampleData_3/100_CH2.continuous',
+        'OpenEphys_SampleData_3/100_CH3_2.continuous',
+        'OpenEphys_SampleData_3/100_CH3.continuous',
+        'OpenEphys_SampleData_3/all_channels.events',
+        'OpenEphys_SampleData_3/Continuous_Data.openephys',
+        'OpenEphys_SampleData_3/messages.events',
+        'OpenEphys_SampleData_3/settings.xml',
+    ]
+
+    def test_raise_error_if_discontinuous_files(self):
+        # the case of discontinuous signals is NOT cover by the IO for the moment
+        # It must raise an error
+        reader = OpenEphysRawIO(dirname=self.get_filename_path(
+            'OpenEphys_SampleData_2_(multiple_starts)'))
+        with self.assertRaises(Exception):
+            reader.parse_header()
+
+    def test_raise_error_if_strange_timestamps(self):
+        # In this dataset CH32 have strange timestamps
+        reader = OpenEphysRawIO(dirname=self.get_filename_path('OpenEphys_SampleData_3'))
+        with self.assertRaises(Exception):
+            reader.parse_header()
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 24 - 0
code/python-neo/neo/rawio/tests/test_plexonrawio.py

@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.plexonrawio import PlexonRawIO
+
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestPlexonRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = PlexonRawIO
+    files_to_download = [
+        'File_plexon_1.plx',
+        'File_plexon_2.plx',
+        'File_plexon_3.plx',
+    ]
+    entities_to_test = files_to_download
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
code/python-neo/neo/rawio/tests/test_rawbinarysignalrawio.py

@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.rawbinarysignalrawio import RawBinarySignalRawIO
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestRawBinarySignalRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = RawBinarySignalRawIO
+    entities_to_test = ['File_rawbinary_10kHz_2channels_16bit.raw']
+    files_to_download = entities_to_test
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
code/python-neo/neo/rawio/tests/test_rawmcsrawio.py

@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.rawmcsrawio import RawMCSRawIO
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestRawMCSRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = RawMCSRawIO
+    entities_to_test = ['raw_mcs_with_header_1.raw']
+    files_to_download = entities_to_test
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 26 - 0
code/python-neo/neo/rawio/tests/test_spike2rawio.py

@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.spike2rawio import Spike2RawIO
+
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestSpike2RawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = Spike2RawIO
+    files_to_download = [
+        'File_spike2_1.smr',
+        'File_spike2_2.smr',
+        'File_spike2_3.smr',
+        '130322-1LY.smr',  # this is for bug 182
+        'multi_sampling.smr',  # this is for bug 466
+    ]
+    entities_to_test = files_to_download
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 31 - 0
code/python-neo/neo/rawio/tests/test_tdtrawio.py

@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.tdtrawio import TdtRawIO
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestTdtRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = TdtRawIO
+    entities_to_test = ['aep_05']
+
+    files_to_download = [
+        'aep_05/Block-1/aep_05_Block-1.Tbk',
+        'aep_05/Block-1/aep_05_Block-1.Tdx',
+        'aep_05/Block-1/aep_05_Block-1.tev',
+        'aep_05/Block-1/aep_05_Block-1.tsq',
+
+        'aep_05/Block-2/aep_05_Block-2.Tbk',
+        'aep_05/Block-2/aep_05_Block-2.Tdx',
+        'aep_05/Block-2/aep_05_Block-2.tev',
+        'aep_05/Block-2/aep_05_Block-2.tsq',
+
+    ]
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 23 - 0
code/python-neo/neo/rawio/tests/test_winedrrawio.py

@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.winedrrawio import WinEdrRawIO
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestWinEdrRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = WinEdrRawIO
+    files_to_download = [
+        'File_WinEDR_1.EDR',
+        'File_WinEDR_2.EDR',
+        'File_WinEDR_3.EDR',
+    ]
+    entities_to_test = files_to_download
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
code/python-neo/neo/rawio/tests/test_winwcprawio.py

@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+import unittest
+
+from neo.rawio.winwcprawio import WinWcpRawIO
+from neo.rawio.tests.common_rawio_test import BaseTestRawIO
+
+
+class TestWinWcpRawIO(BaseTestRawIO, unittest.TestCase, ):
+    rawioclass = WinWcpRawIO
+    entities_to_test = ['File_winwcp_1.wcp']
+    files_to_download = entities_to_test
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 92 - 0
code/python-neo/neo/rawio/tests/tools.py

@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+"""
+Common tools that are useful for neo.io object tests
+"""
+
+# needed for python 3 compatibility
+from __future__ import absolute_import
+
+import logging
+import os
+import shutil
+import tempfile
+
+try:
+    from urllib2 import urlopen
+except ImportError:
+    from urllib.request import urlopen
+
+
+def can_use_network():
+    """
+    Return True if network access is allowed
+    """
+    if os.environ.get('NOSETESTS_NO_NETWORK', False):
+        return False
+    if os.environ.get('TRAVIS') == 'true':
+        return False
+    return True
+
+
+def make_all_directories(filename, localdir):
+    """
+    Make the directories needed to store test files
+    """
+    # handle case of multiple filenames
+    if not hasattr(filename, 'lower'):
+        for ifilename in filename:
+            make_all_directories(ifilename, localdir)
+        return
+
+    fullpath = os.path.join(localdir, os.path.dirname(filename))
+    if os.path.dirname(filename) != '' and not os.path.exists(fullpath):
+        if not os.path.exists(os.path.dirname(fullpath)):
+            make_all_directories(os.path.dirname(filename), localdir)
+        os.mkdir(fullpath)
+
+
+def download_test_file(filename, localdir, url):
+    """
+    Download a test file from a server if it isn't already available.
+
+    filename is the name of the file.
+
+    localdir is the local directory to store the file in.
+
+    url is the remote url that the file should be downloaded from.
+    """
+    # handle case of multiple filenames
+    if not hasattr(filename, 'lower'):
+        for ifilename in filename:
+            download_test_file(ifilename, localdir, url)
+        return
+
+    localfile = os.path.join(localdir, filename)
+    distantfile = url + '/' + filename
+
+    if not os.path.exists(localfile):
+        logging.info('Downloading %s here %s', distantfile, localfile)
+        dist = urlopen(distantfile)
+        with open(localfile, 'wb') as f:
+            f.write(dist.read())
+
+
+def create_local_temp_dir(name, directory=None):
+    """
+    Create a directory for storing temporary files needed for testing neo
+
+    If directory is None or not specified, automatically create the directory
+    in {tempdir}/files_for_testing_neo on linux/unix/mac or
+    {tempdir}\files_for_testing_neo on windows, where {tempdir} is the system
+    temporary directory returned by tempfile.gettempdir().
+    """
+    if directory is None:
+        directory = os.path.join(tempfile.gettempdir(),
+                                 'files_for_testing_neo')
+
+    if not os.path.exists(directory):
+        os.mkdir(directory)
+    directory = os.path.join(directory, name)
+    if not os.path.exists(directory):
+        os.mkdir(directory)
+    return directory

+ 122 - 0
code/python-neo/neo/rawio/winedrrawio.py

@@ -0,0 +1,122 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading data from WinEdr, a software tool written by
+John Dempster.
+
+WinEdr is free:
+http://spider.science.strath.ac.uk/sipbs/software.htm
+
+Author: Samuel Garcia
+
+"""
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+
+import os
+import sys
+
+
+class WinEdrRawIO(BaseRawIO):
+    extensions = ['EDR', 'edr']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+    def _source_name(self):
+        return self.filename
+
+    def _parse_header(self):
+        with open(self.filename, 'rb') as fid:
+            headertext = fid.read(2048)
+            headertext = headertext.decode('ascii')
+            header = {}
+            for line in headertext.split('\r\n'):
+                if '=' not in line:
+                    continue
+                # print '#' , line , '#'
+                key, val = line.split('=')
+                if key in ['NC', 'NR', 'NBH', 'NBA', 'NBD', 'ADCMAX', 'NP', 'NZ', 'ADCMAX']:
+                    val = int(val)
+                elif key in ['AD', 'DT', ]:
+                    val = val.replace(',', '.')
+                    val = float(val)
+                header[key] = val
+
+        self._raw_signals = np.memmap(self.filename, dtype='int16', mode='r',
+                                      shape=(header['NP'] // header['NC'], header['NC'],),
+                                      offset=header['NBH'])
+
+        DT = header['DT']
+        if 'TU' in header:
+            if header['TU'] == 'ms':
+                DT *= .001
+        self._sampling_rate = 1. / DT
+
+        sig_channels = []
+        for c in range(header['NC']):
+            YCF = float(header['YCF%d' % c].replace(',', '.'))
+            YAG = float(header['YAG%d' % c].replace(',', '.'))
+            YZ = float(header['YZ%d' % c].replace(',', '.'))
+            ADCMAX = header['ADCMAX']
+            AD = header['AD']
+
+            name = header['YN%d' % c]
+            chan_id = header['YO%d' % c]
+            units = header['YU%d' % c]
+            gain = AD / (YCF * YAG * (ADCMAX + 1))
+            offset = -YZ * gain
+            group_id = 0
+            sig_channels.append((name, chan_id, self._sampling_rate, 'int16',
+                                 units, gain, offset, group_id))
+
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        # No events
+        event_channels = []
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # No spikes
+        unit_channels = []
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [1]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # insert some annotation at some place
+        self._generate_minimal_annotations()
+
+    def _segment_t_start(self, block_index, seg_index):
+        return 0.
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = self._raw_signals.shape[0] / self._sampling_rate
+        return t_stop
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        return self._raw_signals.shape[0]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return 0.
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        # WARNING check if id or index for signals (in the old IO it was ids
+        # ~ raw_signals = self._raw_signals[slice(i_start, i_stop), channel_indexes]
+        if channel_indexes is None:
+            channel_indexes = np.arange(self.header['signal_channels'].size)
+
+        l = self.header['signal_channels']['id'].tolist()
+        channel_ids = [l.index(c) for c in channel_indexes]
+        raw_signals = self._raw_signals[slice(i_start, i_stop), channel_ids]
+
+        return raw_signals

+ 171 - 0
code/python-neo/neo/rawio/winwcprawio.py

@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+"""
+Class for reading data from WinWCP, a software tool written by
+John Dempster.
+
+WinWCP is free:
+http://spider.science.strath.ac.uk/sipbs/software.htm
+
+Author : sgarcia
+Author: Samuel Garcia
+"""
+from __future__ import unicode_literals, print_function, division, absolute_import
+
+from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
+                        _event_channel_dtype)
+
+import numpy as np
+
+import os
+import sys
+import struct
+
+
+class WinWcpRawIO(BaseRawIO):
+    extensions = ['wcp']
+    rawmode = 'one-file'
+
+    def __init__(self, filename=''):
+        BaseRawIO.__init__(self)
+        self.filename = filename
+
+    def _source_name(self):
+        return self.filename
+
+    def _parse_header(self):
+        SECTORSIZE = 512
+
+        # only one memmap for all segment to avoid
+        # "error: [Errno 24] Too many open files"
+        self._memmap = np.memmap(self.filename, dtype='uint8', mode='r')
+
+        with open(self.filename, 'rb') as fid:
+
+            headertext = fid.read(1024)
+            headertext = headertext.decode('ascii')
+            header = {}
+            for line in headertext.split('\r\n'):
+                if '=' not in line:
+                    continue
+                # print '#' , line , '#'
+                key, val = line.split('=')
+                if key in ['NC', 'NR', 'NBH', 'NBA', 'NBD', 'ADCMAX', 'NP', 'NZ', ]:
+                    val = int(val)
+                elif key in ['AD', 'DT', ]:
+                    val = val.replace(',', '.')
+                    val = float(val)
+                header[key] = val
+
+            nb_segment = header['NR']
+            self._raw_signals = {}
+            all_sampling_interval = []
+            # loop for record number
+            for seg_index in range(header['NR']):
+                offset = 1024 + seg_index * (SECTORSIZE * header['NBD'] + 1024)
+
+                # read analysis zone
+                analysisHeader = HeaderReader(fid, AnalysisDescription).read_f(offset=offset)
+
+                # read data
+                NP = (SECTORSIZE * header['NBD']) // 2
+                NP = NP - NP % header['NC']
+                NP = NP // header['NC']
+                NC = header['NC']
+                ind0 = offset + header['NBA'] * SECTORSIZE
+                ind1 = ind0 + NP * NC * 2
+                sigs = self._memmap[ind0:ind1].view('int16').reshape(NP, NC)
+                self._raw_signals[seg_index] = sigs
+
+                all_sampling_interval.append(analysisHeader['SamplingInterval'])
+
+        assert np.unique(all_sampling_interval).size == 1
+
+        self._sampling_rate = 1. / all_sampling_interval[0]
+
+        sig_channels = []
+        for c in range(header['NC']):
+            YG = float(header['YG%d' % c].replace(',', '.'))
+            ADCMAX = header['ADCMAX']
+            VMax = analysisHeader['VMax'][c]
+
+            name = header['YN%d' % c]
+            chan_id = header['YO%d' % c]
+            units = header['YU%d' % c]
+            gain = VMax / ADCMAX / YG
+            offset = 0.
+            group_id = 0
+            sig_channels.append((name, chan_id, self._sampling_rate, 'int16',
+                                 units, gain, offset, group_id))
+
+        sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
+
+        # No events
+        event_channels = []
+        event_channels = np.array(event_channels, dtype=_event_channel_dtype)
+
+        # No spikes
+        unit_channels = []
+        unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
+
+        # fille into header dict
+        self.header = {}
+        self.header['nb_block'] = 1
+        self.header['nb_segment'] = [nb_segment]
+        self.header['signal_channels'] = sig_channels
+        self.header['unit_channels'] = unit_channels
+        self.header['event_channels'] = event_channels
+
+        # insert some annotation at some place
+        self._generate_minimal_annotations()
+
+    def _segment_t_start(self, block_index, seg_index):
+        return 0.
+
+    def _segment_t_stop(self, block_index, seg_index):
+        t_stop = self._raw_signals[seg_index].shape[0] / self._sampling_rate
+        return t_stop
+
+    def _get_signal_size(self, block_index, seg_index, channel_indexes):
+        return self._raw_signals[seg_index].shape[0]
+
+    def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
+        return 0.
+
+    def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
+        # WARNING check if id or index for signals (in the old IO it was ids
+        # ~ raw_signals = self._raw_signals[seg_index][slice(i_start, i_stop), channel_indexes]
+        if channel_indexes is None:
+            channel_indexes = np.arange(self.header['signal_channels'].size)
+
+        ids = self.header['signal_channels']['id'].tolist()
+        channel_ids = [ids.index(c) for c in channel_indexes]
+        raw_signals = self._raw_signals[seg_index][slice(i_start, i_stop), channel_ids]
+        return raw_signals
+
+
+AnalysisDescription = [
+    ('RecordStatus', '8s'),
+    ('RecordType', '4s'),
+    ('GroupNumber', 'f'),
+    ('TimeRecorded', 'f'),
+    ('SamplingInterval', 'f'),
+    ('VMax', '8f'),
+]
+
+
+class HeaderReader():
+    def __init__(self, fid, description):
+        self.fid = fid
+        self.description = description
+
+    def read_f(self, offset=0):
+        self.fid.seek(offset)
+        d = {}
+        for key, fmt in self.description:
+            val = struct.unpack(fmt, self.fid.read(struct.calcsize(fmt)))
+            if len(val) == 1:
+                val = val[0]
+            else:
+                val = list(val)
+            d[key] = val
+        return d

+ 188 - 0
code/python-neo/neo/test/coretest/test_dataobject.py

@@ -0,0 +1,188 @@
+import copy
+
+import numpy as np
+import unittest
+
+from neo.core.dataobject import DataObject, _normalize_array_annotations, ArrayDict
+
+
+class Test_DataObject(unittest.TestCase):
+    def test(self):
+        pass
+
+
+class Test_array_annotations(unittest.TestCase):
+    def test_check_arr_ann(self):
+        # DataObject instance that handles checking
+        datobj = DataObject([1, 2])  # Inherits from Quantity, so some data is required
+
+        # Correct annotations
+        arr1 = np.asarray(["ABC", "DEF"])
+        arr2 = np.asarray([3, 6])
+        corr_ann = {'anno1': arr1, 'anno2': arr2}
+
+        corr_ann_copy = copy.deepcopy(corr_ann)
+
+        # Checking correct annotations should work fine
+        corr_ann = _normalize_array_annotations(corr_ann, datobj._get_arr_ann_length())
+
+        # Make sure the annotations have not been altered
+        self.assertSequenceEqual(corr_ann.keys(), corr_ann_copy.keys())
+        self.assertTrue((corr_ann['anno1'] == corr_ann_copy['anno1']).all())
+        self.assertTrue((corr_ann['anno2'] == corr_ann_copy['anno2']).all())
+
+        # Now creating incorrect inputs:
+
+        # Nested dict
+        nested_ann = {'anno1': {'val1': arr1}, 'anno2': {'val2': arr2}}
+        with self.assertRaises(ValueError):
+            nested_ann = _normalize_array_annotations(nested_ann, datobj._get_arr_ann_length())
+
+        # Containing None
+        none_ann = corr_ann_copy
+        # noinspection PyTypeChecker
+        none_ann['anno2'] = None
+        with self.assertRaises(ValueError):
+            none_ann = _normalize_array_annotations(none_ann, datobj._get_arr_ann_length())
+
+        # Multi-dimensional arrays in annotations
+        multi_dim_ann = copy.deepcopy(corr_ann)
+        multi_dim_ann['anno2'] = multi_dim_ann['anno2'].reshape(1, 2)
+        with self.assertRaises(ValueError):
+            multi_dim_ann = _normalize_array_annotations(multi_dim_ann,
+                                                         datobj._get_arr_ann_length())
+
+        # Wrong length of annotations
+        len_ann = corr_ann
+        len_ann['anno1'] = np.asarray(['ABC', 'DEF', 'GHI'])
+        with self.assertRaises(ValueError):
+            len_ann = _normalize_array_annotations(len_ann, datobj._get_arr_ann_length())
+
+        # Scalar as array annotation raises Error if len(datobj)!=1
+        scalar_ann = copy.deepcopy(corr_ann)
+        # noinspection PyTypeChecker
+        scalar_ann['anno2'] = 3
+        with self.assertRaises(ValueError):
+            scalar_ann = _normalize_array_annotations(scalar_ann, datobj._get_arr_ann_length())
+
+        # But not if len(datobj) == 1, then it's wrapped into an array
+        # noinspection PyTypeChecker
+        scalar_ann['anno1'] = 'ABC'
+        datobj2 = DataObject([1])
+        scalar_ann = _normalize_array_annotations(scalar_ann, datobj2._get_arr_ann_length())
+        self.assertIsInstance(scalar_ann['anno1'], np.ndarray)
+        self.assertIsInstance(scalar_ann['anno2'], np.ndarray)
+
+        # Lists are also made to np.ndarrays
+        list_ann = {'anno1': [3, 6], 'anno2': ['ABC', 'DEF']}
+        list_ann = _normalize_array_annotations(list_ann, datobj._get_arr_ann_length())
+        self.assertIsInstance(list_ann['anno1'], np.ndarray)
+        self.assertIsInstance(list_ann['anno2'], np.ndarray)
+
+    def test_implicit_dict_check(self):
+        # DataObject instance that handles checking
+        datobj = DataObject([1, 2])  # Inherits from Quantity, so some data is required
+
+        # Correct annotations
+        arr1 = np.asarray(["ABC", "DEF"])
+        arr2 = np.asarray([3, 6])
+        corr_ann = {'anno1': arr1, 'anno2': arr2}
+
+        corr_ann_copy = copy.deepcopy(corr_ann)
+
+        # Implicit checks when setting item in dict directly
+        # Checking correct annotations should work fine
+        datobj.array_annotations['anno1'] = arr1
+        datobj.array_annotations.update({'anno2': arr2})
+
+        # Make sure the annotations have not been altered
+        self.assertTrue((datobj.array_annotations['anno1'] == corr_ann_copy['anno1']).all())
+        self.assertTrue((datobj.array_annotations['anno2'] == corr_ann_copy['anno2']).all())
+
+        # Now creating incorrect inputs:
+
+        # Nested dict
+        nested_ann = {'anno1': {'val1': arr1}, 'anno2': {'val2': arr2}}
+        with self.assertRaises(ValueError):
+            datobj.array_annotations['anno1'] = {'val1': arr1}
+
+        # Containing None
+        none_ann = corr_ann_copy
+        # noinspection PyTypeChecker
+        none_ann['anno2'] = None
+        with self.assertRaises(ValueError):
+            datobj.array_annotations['anno1'] = None
+
+        # Multi-dimensional arrays in annotations
+        multi_dim_ann = copy.deepcopy(corr_ann)
+        multi_dim_ann['anno2'] = multi_dim_ann['anno2'].reshape(1, 2)
+        with self.assertRaises(ValueError):
+            datobj.array_annotations.update(multi_dim_ann)
+
+        # Wrong length of annotations
+        len_ann = corr_ann
+        len_ann['anno1'] = np.asarray(['ABC', 'DEF', 'GHI'])
+        with self.assertRaises(ValueError):
+            datobj.array_annotations.update(len_ann)
+
+        # Scalar as array annotation raises Error if len(datobj)!=1
+        scalar_ann = copy.deepcopy(corr_ann)
+        # noinspection PyTypeChecker
+        scalar_ann['anno2'] = 3
+        with self.assertRaises(ValueError):
+            datobj.array_annotations.update(scalar_ann)
+
+        # But not if len(datobj) == 1, then it's wrapped into an array
+        # noinspection PyTypeChecker
+        scalar_ann['anno1'] = 'ABC'
+        datobj2 = DataObject([1])
+        datobj2.array_annotations.update(scalar_ann)
+        self.assertIsInstance(datobj2.array_annotations['anno1'], np.ndarray)
+        self.assertIsInstance(datobj2.array_annotations['anno2'], np.ndarray)
+
+        # Lists are also made to np.ndarrays
+        list_ann = {'anno1': [3, 6], 'anno2': ['ABC', 'DEF']}
+        datobj.array_annotations.update(list_ann)
+        self.assertIsInstance(datobj.array_annotations['anno1'], np.ndarray)
+        self.assertIsInstance(datobj.array_annotations['anno2'], np.ndarray)
+
+    def test_array_annotate(self):
+        # Calls _check_array_annotations, so no need to test for these Errors here
+        datobj = DataObject([2, 3, 4])
+        arr_ann = {'anno1': [3, 4, 5], 'anno2': ['ABC', 'DEF', 'GHI']}
+
+        # Pass annotations
+        datobj.array_annotate(**arr_ann)
+
+        # Make sure they are correct
+        self.assertTrue((datobj.array_annotations['anno1'] == np.array([3, 4, 5])).all())
+        self.assertTrue(
+            (datobj.array_annotations['anno2'] == np.array(['ABC', 'DEF', 'GHI'])).all())
+        self.assertIsInstance(datobj.array_annotations, ArrayDict)
+
+    def test_arr_anns_at_index(self):
+        # Get them, test for desired type and size, content
+        datobj = DataObject([1, 2, 3, 4])
+        arr_ann = {'anno1': [3, 4, 5, 6], 'anno2': ['ABC', 'DEF', 'GHI', 'JKL']}
+        datobj.array_annotate(**arr_ann)
+
+        # Integer as index
+        ann_int = datobj.array_annotations_at_index(1)
+        self.assertEqual(ann_int, {'anno1': 4, 'anno2': 'DEF'})
+        # Negative integer as index
+        ann_int_back = datobj.array_annotations_at_index(-2)
+        self.assertEqual(ann_int_back, {'anno1': 5, 'anno2': 'GHI'})
+
+        # Slice as index
+        ann_slice = datobj.array_annotations_at_index(slice(1, 3))
+        self.assert_((ann_slice['anno1'] == np.array([4, 5])).all())
+        self.assert_((ann_slice['anno2'] == np.array(['DEF', 'GHI'])).all())
+
+        # Slice from beginning to end
+        ann_slice_all = datobj.array_annotations_at_index(slice(0, None))
+        self.assert_((ann_slice_all['anno1'] == np.array([3, 4, 5, 6])).all())
+        self.assert_((ann_slice_all['anno2'] == np.array(['ABC', 'DEF', 'GHI', 'JKL'])).all())
+
+        # Make sure that original object is edited when editing extracted array_annotations
+        ann_slice_all['anno1'][2] = 10
+        self.assertEqual(datobj.array_annotations_at_index(2)['anno1'], 10)

+ 28 - 0
code/python-neo/neo/test/iotest/test_axographio.py

@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+"""
+Tests of neo.io.axographio
+"""
+
+# needed for python 3 compatibility
+from __future__ import absolute_import
+
+import sys
+
+import unittest
+
+from neo.io import AxographIO
+from neo.io.axographio import HAS_AXOGRAPHIO
+from neo.test.iotest.common_io_test import BaseTestIO
+
+
+@unittest.skipUnless(HAS_AXOGRAPHIO, "requires axographio")
+class TestAxographIO(BaseTestIO, unittest.TestCase):
+    files_to_test = [
+        'File_axograph.axgd'
+    ]
+    files_to_download = files_to_test
+    ioclass = AxographIO
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 26 - 0
code/python-neo/neo/test/iotest/test_bci2000.py

@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+"""
+Tests of neo.io.bci2000io
+"""
+
+# needed for python 3 compatibility
+from __future__ import absolute_import, division
+
+import unittest
+
+from neo.io import BCI2000IO
+from neo.test.iotest.common_io_test import BaseTestIO
+
+
+class TestBCI2000IO(BaseTestIO, unittest.TestCase, ):
+    ioclass = BCI2000IO
+    files_to_test = [
+        'eeg1_1.dat',
+        'eeg1_2.dat',
+        'eeg1_3.dat',
+    ]
+    files_to_download = files_to_test
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 27 - 0
code/python-neo/neo/test/iotest/test_intanio.py

@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+"""
+Tests of neo.io.intanio
+"""
+
+# needed for python 3 compatibility
+from __future__ import absolute_import, division
+
+import sys
+
+import unittest
+
+from neo.io import IntanIO
+from neo.test.iotest.common_io_test import BaseTestIO
+
+
+class TestIntanIO(BaseTestIO, unittest.TestCase, ):
+    ioclass = IntanIO
+    files_to_download = [
+        'intan_rhs_test_1.rhs',
+        'intan_rhd_test_1.rhd',
+    ]
+    files_to_test = files_to_download
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 104 - 0
code/python-neo/neo/test/iotest/test_nixio_fr.py

@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+"""
+Tests of neo.io.nixio_fr
+"""
+from __future__ import absolute_import
+import numpy as np
+import unittest
+from neo.io.nixio_fr import NixIO as NixIOfr
+import quantities as pq
+from neo.io.nixio import NixIO
+from neo.test.iotest.common_io_test import BaseTestIO
+from neo.test.iotest.tools import get_test_file_full_path
+try:
+    import nixio as nix
+
+    HAVE_NIX = True
+except ImportError:
+    HAVE_NIX = False
+
+
+@unittest.skipUnless(HAVE_NIX, "Requires NIX")
+class TestNixfr(BaseTestIO, unittest.TestCase, ):
+    ioclass = NixIOfr
+
+    files_to_test = ['nixio_fr.nix']
+
+    files_to_download = ['nixio_fr.nix']
+
+    def setUp(self):
+        super(TestNixfr, self).setUp()
+        self.testfilename = self.get_filename_path('nixio_fr.nix')
+        self.reader_fr = NixIOfr(filename=self.testfilename)
+        self.reader_norm = NixIO(filename=self.testfilename, mode='ro')
+        self.blk = self.reader_fr.read_block(block_index=1, load_waveforms=True)
+        # read block with NixIOfr
+        self.blk1 = self.reader_norm.read_block(index=1)  # read same block with NixIO
+
+    def tearDown(self):
+        self.reader_fr.file.close()
+        self.reader_norm.close()
+
+    def test_check_same_neo_structure(self):
+        self.assertEqual(len(self.blk.segments), len(self.blk1.segments))
+        for seg1, seg2 in zip(self.blk.segments, self.blk1.segments):
+            self.assertEqual(len(seg1.analogsignals), len(seg2.analogsignals))
+            self.assertEqual(len(seg1.spiketrains), len(seg2.spiketrains))
+            self.assertEqual(len(seg1.events), len(seg2.events))
+            self.assertEqual(len(seg1.epochs), len(seg2.epochs))
+
+    def test_check_same_data_content(self):
+        for seg1, seg2 in zip(self.blk.segments, self.blk1.segments):
+            for asig1, asig2 in zip(seg1.analogsignals, seg2.analogsignals):
+                np.testing.assert_almost_equal(asig1.magnitude, asig2.magnitude)
+                # not completely equal
+            for st1, st2 in zip(seg1.spiketrains, seg2.spiketrains):
+                np.testing.assert_array_equal(st1.magnitude, st2.times)
+                for wf1, wf2 in zip(st1.waveforms, st2.waveforms):
+                    np.testing.assert_array_equal(wf1.shape, wf2.shape)
+                    np.testing.assert_almost_equal(wf1.magnitude, wf2.magnitude)
+            for ev1, ev2 in zip(seg1.events, seg2.events):
+                np.testing.assert_almost_equal(ev1.times, ev2.times)
+                assert np.all(ev1.labels == ev2.labels)
+            for ep1, ep2 in zip(seg1.epochs, seg2.epochs):
+                assert len(ep1.durations) == len(ep2.times)
+                np.testing.assert_almost_equal(ep1.times, ep2.times)
+                np.testing.assert_array_equal(ep1.durations, ep2.durations)
+                np.testing.assert_array_equal(ep1.labels, ep2.labels)
+
+        # Not testing for channel_index as rawio always read from seg
+        for chid1, chid2 in zip(self.blk.channel_indexes, self.blk1.channel_indexes):
+            for asig1, asig2 in zip(chid1.analogsignals, chid2.analogsignals):
+                np.testing.assert_almost_equal(asig1.magnitude, asig2.magnitude)
+
+    def test_analog_signal(self):
+        seg1 = self.blk.segments[0]
+        an_sig1 = seg1.analogsignals[0]
+        assert len(an_sig1) == 30
+        an_sig2 = seg1.analogsignals[1]
+        assert an_sig2.shape == (50, 3)
+
+    def test_spike_train(self):
+        st1 = self.blk.segments[0].spiketrains[0]
+        assert np.all(st1.times == np.cumsum(np.arange(0, 1, 0.1)).tolist() * pq.s + 10 * pq.s)
+
+    def test_event(self):
+        seg1 = self.blk.segments[0]
+        event1 = seg1.events[0]
+        raw_time = 10 + np.cumsum(np.array([0, 1, 2, 3, 4]))
+        assert np.all(event1.times == np.array(raw_time * pq.s / 1000))
+        assert np.all(event1.labels == np.array([b'A', b'B', b'C', b'D', b'E']))
+        assert len(seg1.events) == 1
+
+    def test_epoch(self):
+        seg1 = self.blk.segments[1]
+        seg2 = self.blk1.segments[1]
+        epoch1 = seg1.epochs[0]
+        epoch2 = seg2.epochs[0]
+        assert len(epoch1.durations) == len(epoch1.times)
+        assert np.all(epoch1.durations == epoch2.durations)
+        assert np.all(epoch1.labels == epoch2.labels)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 29 - 0
code/python-neo/neo/test/iotest/test_openephysio.py

@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+"""
+
+"""
+
+# needed for python 3 compatibility
+from __future__ import absolute_import, division
+
+import unittest
+
+import quantities as pq
+
+from neo.io import OpenEphysIO
+from neo.test.iotest.common_io_test import BaseTestIO
+from neo.rawio.tests.test_openephysrawio import TestOpenEphysRawIO
+
+
+class TestOpenEphysIO(BaseTestIO, unittest.TestCase, ):
+    ioclass = OpenEphysIO
+    files_to_test = ['OpenEphys_SampleData_1',
+        # 'OpenEphys_SampleData_2_(multiple_starts)',  # This not implemented this raise error
+        # 'OpenEphys_SampleData_3',
+                     ]
+
+    files_to_download = TestOpenEphysRawIO.files_to_download
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 21 - 0
code/python-neo/neo/test/iotest/test_rawmcsio.py

@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+
+# needed for python 3 compatibility
+from __future__ import absolute_import, division
+
+import sys
+
+import unittest
+
+from neo.io import RawMCSIO
+from neo.test.iotest.common_io_test import BaseTestIO
+
+
+class TestRawMcsIO(BaseTestIO, unittest.TestCase, ):
+    ioclass = RawMCSIO
+    files_to_test = ['raw_mcs_with_header_1.raw']
+    files_to_download = files_to_test
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 35 - 0
code/python-odml/.gitignore

@@ -0,0 +1,35 @@
+# python files and dirs
+*.pyc
+*.pyo
+*.egg
+*.egg-info
+dist
+build
+eggs
+.eggs
+parts
+
+# odml files
+# *.odml
+
+# Include a sample file
+!THGTTG.odml
+
+# installer logs
+pip-log.txt
+
+# Unit test / coverage reports
+.coverage
+.tox
+.cache
+
+# temp files
+*~
+*.log
+
+# idea / pycharm files
+.idea
+*.iml
+
+# doc/_build files
+doc/_build/

+ 70 - 0
code/python-odml/.travis.yml

@@ -0,0 +1,70 @@
+sudo: required
+dist: trusty
+
+language: python
+
+matrix:
+  include:
+    - os: linux
+      python: "2.7"
+      env: COVERALLS=1
+    - os: linux
+      python: "3.4"
+    - os: linux
+      python: "3.5"
+    - os: linux
+      python: "3.6"
+
+    - os: osx
+      language: generic
+      env:
+        - OSXENV=3.6.0
+    - os: osx
+      language: generic
+      env:
+        - OSXENV=2.7.14
+
+install:
+  - export PYVER=${TRAVIS_PYTHON_VERSION:0:1}
+  - if [ $PYVER = 3 ]; then
+      export PYCMD=python3;
+      export PIPCMD=pip3;
+    else
+      export PYCMD=python;
+      export PIPCMD=pip;
+    fi;
+
+  - if [ $COVERALLS = 1 ]; then
+        $PIPCMD install --upgrade coveralls;
+    fi;
+
+  - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
+      brew update;
+      brew install pyenv;
+      brew upgrade pyenv;
+      brew install pyenv-virtualenv;
+      eval "$(pyenv init -)";
+      eval "$(pyenv virtualenv-init -)";
+      pyenv install $OSXENV;
+      pyenv virtualenv $OSXENV venv;
+      pyenv activate venv;
+      which python;
+      python --version;
+      which pip;
+      export PYCMD=python;
+      export PIPCMD=pip;
+    fi;
+
+  - $PIPCMD install lxml enum34 pyyaml rdflib
+
+script:
+  - which $PYCMD
+  - $PYCMD setup.py build
+  - if [ $COVERALLS = 1 ]; then
+        coverage${PYVER} run --source=odml setup.py test && coverage${PYVER} report -m;
+    else
+        $PYCMD setup.py test;
+    fi;
+
+after_success:
+- if [ $COVERALLS = 1 ]; then coveralls; fi;

+ 326 - 0
code/python-odml/CHANGELOG.md

@@ -0,0 +1,326 @@
+# Changelog
+
+Used to document all changes from previous releases and collect changes 
+until the next release.
+
+# Latest changes in master
+
+...
+
+# Version 1.4.2
+
+## Print methods
+
+`pprint` methods have been added to both `Section` and `Property`
+to print whole Section trees with their child sections and properties.
+The `__repr__` style of `Section` and `Property` has been changed to
+be more similar to the [nixpy](https://github.com/G-Node/nixpy) `__repr__` style.
+Printing a `Section` now also features the immediate `Property` child count
+in addition to the immediate `Section` child count. See #309.
+
+## Deprecation of 'Property.value' in favor of 'Property.values'
+
+To make working with odML more similar to working with the 
+metadata part of [nixpy](https://github.com/G-Node/nixpy), the `Property.value` 
+attribute has been marked deprecated and the `Property.values` 
+attribute has been added. See #308. 
+
+## Uncertainty changes
+
+Uncertainty is now limited to float only. See #294.
+
+## Version converter changes
+
+The VersionConverter dealt with an edge case of XML test files with opening <B0> tags 
+that were missing their closing tag rendering them broken. Catching this one edge case 
+circumvented opening XML files via lxml, leaving the resulting document open to various 
+encoding problems.
+
+Support to resolve the specific tag edge cases is dropped in favour of properly opening 
+XML files via lxml. See #301.
+
+## Additional console script
+
+The `odmlconversion` convenience console script has been added to convert multiple 
+previous odML version files to the latest odML version.
+
+## Changes in cloning behaviour
+
+When cloning a `Section` or a `Property` by default the id of any object is changed
+to a new UUID. The cloning methods now feature a new `keep_id` attribute. If set to
+`True`, the cloned object and any cloned children retain their original id. This
+is meant to create exact copies of Section-Property trees in different documents.
+
+## Additional validation
+
+When a document is saved, a new validation check makes sure, that a document
+contains only unique UUIDs this is required due to the introduction of creating
+clones with identical ids. 
+
+
+# Version 1.4.1
+
+## Dependency changes
+
+- `pyyaml` was version fixed on 3.13 to circumvent introduced breaking changes in the library. See #291, #292, #296, #298.
+- `docopt` was added to support console scripts
+
+## Converter and Parser fixes
+
+- Fixes that an XML file with an UTF-8 encoding file header was not being properly parsed by the `VersionConverter` XML parser. See #288, #296.
+- Fixes the `XMLParser` that when reading a single string value from csv which contains commata, it now remains a single value and is not split up at each comma. See #295, #296.
+- In the `XMLParser` any leading or trailing whitespaces are removed from any string values when it is written to csv. Along the same lines, multiple values that are saved to file via the `VersionConverter` do not contain leading whitespaces any longer. See #296.
+- Thorough encoding and usage of `unicode` has been introduced to all Parsers and Converters to avoid encoding errors with Python 2 and Python 3. See #297.
+
+## Changes in `Section` and `Property` SmartList
+
+- Adds `SmartList.sort()`. By default `Document` and `Section` child lists will retain the order in which child elements were added, but now a sort by name can be manually triggered. See #290.
+- Adds `SmartList` comparison magic methods to partially address #265. The introduction of the RDF backend led to an issue when comparing odML entities. The used RDF library `rdflib` does not respect child order upon loading of a file, odML entities with children can not be compared without sorting the child elements. The added magic methods sort child elements by name before comparison without changing the actual order of the child elements. This only addresses the issue for `Section` and `Property` child lists, but does not solve the problem for the order of `Property.values`. See #290.
+
+## Document format update
+
+- A new private attribute `_origin_file_name` is added to the `Document` entity. When an odML document is loaded from file, this attribute is now set with the file name from whence the document was loaded. See #297.
+
+## RDF format changes
+
+- The RDF class `Seq` is now used instead of `Bag` to store `odml.Property` values to respect the order of values. See #292.
+- Since `rdflib` currently does not support proper `Seq` behaviour with RDF `li` items, for now the index of the value items will be manually written as RDF properties, which `rdflib` supports when reading an RDF file. See #292.
+- When writing an RDF file from an odML document that features an `_origin_file_name`, the value is exported as `odml:hasFileName`. See #297.
+- `xml` is now the default `ODMLWriter` format when writing a document to RDF since the XML format of RDF is still the format with the broadest acceptance. See #297.
+
+## Addition of console scripts
+
+- The `odmltordf` convenience console script has been added to convert multiple odML files to the RDF format from any odML format or version. See #298.
+
+
+# Version 1.4.0
+## Breaking changes
+
+The switch from odML version 1.3 to 1.4 contains many cool updates which should make work more comfortable, but also includes some breaking changes.
+
+### Update of the odML file format version
+- The odML format version number in odML files has changed from "1" to "1.1".
+
+### Changes in odML classes
+- The odML class hierarchy has been flattened:
+  - removing `base._baseobj` class, leaving `BaseObject` as the root odML class.
+  - removing `doc.Document` class, leaving `BaseDocument` as the only odML Document class.
+  - removing `section.Section` class, leaving `BaseSection` as the only odML Section class.
+  - removing `property.Property` class leaving `BaseProperty` as the only odML Property class.
+- `baseobject` and `sectionable` are renamed to `BaseObject` and `Sectionable` respectively.
+- `base.SafeList` and `base.SmartList` have been merged, `base.SafeList` has been removed.
+- `base.SmartList` can now only contain Sections or Properties. See #272.
+- The `reorder` method is moved from the `base` to the `Section` class. See #267.
+
+### Changes in Value handling: 
+- The `Value` class has been removed.
+- `Property.value` now always holds a list of uniform values. `Property.value` always 
+    returns a copy of the actual value list. See #227.
+- Values can only be changed directly via the `__setitem__` method of a `Property`
+- `Value` attributes `uncertainty`, `unit`, `dtype` and `reference` have been moved to 
+    `Property` and now apply to all values of the `Property.value` list.
+- The `Value` attributes `filename`, `encoder` and `checksum` have been removed.
+
+### DType changes:
+- The `binary` dtype has been removed. Providing binary content via odML files is 
+    discouraged in favor of providing a reference to the origin files using the `URL` 
+    dtype instead.
+
+### Mapping
+- Any `mapping` functionality has been removed.
+
+### Minor breaking changes
+- `XMLReader.fromFile()` and `.fromString()` have been renamed to `.from_file()` and `.from_string()` respectively.
+
+
+## Features and changes
+
+### Required odML entity attributes handling
+- Required attributes of odML entities in `odml.format` where changed: `Section.name`, 
+    `Section.type` and `Property.name` are the only attributes set to be required for 
+    their respective odML entities. See #240.
+- `Section.name` and `Property.name` can now be `None` on init. If this is the case, the 
+    entities' `id` value is used as `name` value.
+- Hardcoded checks for existing `name` attributes in the XML Parser are removed. Only 
+    attributes set as required in `format` are now used to check for missing required odML 
+    entity attributes. See #241.
+- The `name` attribute of a `Section` or a `Property` can now only be rewritten if there 
+    is no sibling with the same name on the same hierarchical level. See #283.
+
+### Addition of the 'id' attribute
+- `Document`, `Section` and `Property` now have an `id` attribute to uniquely identify any 
+    entity. If no valid id is provided when an entity is initialized, an id is 
+    automatically generated and assigned.
+- Adding the `new_id()` method to `Document`, `Section` and `Property` which generates 
+    and sets a new valid id for any entity. See #262.
+
+### Changes in DType handling
+- Setting a dtype now also supports odML style tuple types. See #254.
+- DTypes now always return the defined default values if a value is `None` or `""`.
+- Any boolean dtype value other than `"false", "f", 0, False, "true", "t", 1` or `True` 
+    will now raise a `ValueError`. See #224
+
+### 'base.Sectionable' (Document and Section) changes
+- Adds a `base.Sectionable.extend` method for child Sections and Properties. See #237.
+- Refactors the `base.Sectionable.insert` and `.append` methods. Only proper 
+    `BaseSections` with a unique name can be added to the Section child list of a 
+    `Sectionable`.
+- Appending multiple Sections or Properties has been removed from the `append` method to 
+    mirror Property `append` functionality and since `extend` now serves this need.
+
+### 'Section' and 'Property' merge 
+- `Property` now provides a `merge` method to merge two properties. This will sync all but 
+    the dependency and dependencyValue attributes. ValueErrors are raised, if information 
+    is set in both properties but are in conflict. See #221.
+- Adds a `Section.merge_check()` method which validates whether a Section including all 
+    its sub-sections and sub-properties can properly be merged. A `ValueError` is raised 
+    if any potential merge problem arises. This is necessary since a recursive Section 
+    merge cannot be easily rolled back once begun.
+- A Section merge imports `reference` and `definition` from the "source" Section if they 
+    were `None` in the "destination" Section. See #273.
+- Adds a `strict` flag to any `merge` method. Now all `Section` and `Property` attribute 
+    checks during a merge will only be done, if `strict=True`. On `strict=False` a 
+    `Section` or `Property` attribute will only be replaced with the "source" value, if 
+    the "destination" value is `None`. Otherwise the "destination" value will be kept and 
+    the "source" value lost. See #270.
+
+### Changes of 'Section' and 'Property' clone
+- When a `Section` or a `Property` is cloned, a new id is set for the clone and of any 
+    cloned children. See #259.
+
+### 'Document' changes
+- Tuples of Sections can now no longer be used with `Document.append` since 
+    `Document.extend` should be used to add multiple new Sections to a Document.
+
+### 'Section' changes
+- Adds a `Section.extend` method.
+
+### 'Property' changes
+- `Property` has the new attribute `value_origin` which may contain the origin of the 
+    property's value e.g. a filename.
+- `Property` init now supports setting all attributes as well as its parent.
+- `Property` now provides `append`, `extend` and `remove` methods to change the actual 
+    value list. This approach is required to ensure DType checks when adding new values 
+    to an existing list. See #223. 
+- Only valid dtypes can now be set on `Property` init. See #253.
+
+### Terminology changes
+- The default odML terminology repository is set to `http://portal.g-node.org/odml/terminologies/v1.1/terminologies.xml`.
+
+### Changes in Tools and IO
+- The `XMLParser` can now be run in warning mode: any errors encountered during parsing 
+    will just print a warning, but will not stop and exit during the parsing process.
+- An odML document can now only be saved, if the validation does not show any errors. 
+    Saving an invalid document will stop the process before saving and print all 
+    encountered errors.
+- All parsers are now more relaxed when encountering unsupported or missing tags and only 
+    print warnings instead of ending with an exception. Warnings are collected and can be 
+    accessed via the parser object.
+- When trying to open a file with any of the odML parsers, the document format version 
+    number is checked. If the version number does not match the supported one, file 
+    loading will fail with an exception. 
+
+## New tools
+- Added the `tools.RDFWriter` and `toosl.RDFReader` classes, which enable the export of 
+    odML documents to RDF and also provides the used ontology OWL file at `doc/odml_terminology/`.
+- Added the `tools.ODMLWriter` and `tools.ODMLReader` classes which serve as an easy 
+    entry point to saving and loading for all the supported file formats `XML`, `YAML`, 
+    `JSON` and `RDF`.
+- Added the `tools.DictWriter` and `tools.DictReader` classes which convert Python 
+    dictionary data to odML data and vice versa, which in turn is required for both YAML 
+    and JSON format loading and saving.
+- Removed the `tools.jsonparser` file which is no longer required due to the classes in 
+    `tools.odmlparser` and `tools.dict_parser`. 
+- Added the `tools.FormatConverter` class which enables batch conversion of one odML 
+    format into another.
+- Added the `tools.VersionConverter` class which enables conversion of pre-v1.4 odML files 
+    into valid v1.4 odML.
+  - The `VersionConverter` converts `XML`, `JSON` and `YAML` based odML files of odML file 
+        version 1.0 to odML file version 1.1.
+  - Only attributes supported by `Document`, `Section` and `Property` are exported. Any 
+        non supported attribute will produce a warning message, the content will be 
+        discarded.
+  - The value content is moved from a `Value` object to its parent `Property` value list.
+  - The first encountered `unit` or `uncertainty` of values of a `Property` will be moved 
+        to its parent `Property`. Any differing subsequent `unit` or `uncertainty` of 
+        values of the same `Property` will produce a warning message, the content will be 
+        discarded.
+  - The first `filename` attribute content of a `Value` is moved to the `value_origin` 
+        attribute of its parent `Property`.
+  - Any g-node terminology URL in `repository` or `link` is updated from v1.0 to their 
+        v1.1 counterparts if available. 
+  - A `VersionConverter` object provides a `.conversion_log` list attribute to access all 
+        info and warning messages after a conversion has taken place. See #234.
+
+## Fixes
+- Various installation issues have been resolved for Linux and MacOS.
+- `False` as well as `F` are now properly converted to bool values in both 
+    Python 2 and 3. See #222.
+- Fixes saving datetime related values to JSON. See #248.
+- odML style custom tuples can now properly be saved using the `XMLParser`.
+- `Document` now properly uses the dtypes date setter on init. See #249.
+- Fixes load errors on Empty and `None` boolean and datetime related values. See #245.
+- Excludes `id` when comparing odML entities for equality. See #260.
+- When a `Property` is cloned, the parent of the clone is now properly set to `None`.
+- Avoids an `AttributeError` on `get_path()` when a `Property` has no parent. See #256.
+- Avoids an `AttributeError` on `get_merged_equivalent()` when a `Property` 
+    has no parent. See #257.
+- Avoids an error on `Property.append()`, if the dtype was not set. See #266.
+- Makes sure that `Property.append()` exits on empty values but accepts `0` and `False`.
+- Sets `Property.uncertainty` to `None` if an empty string is passed to it.
+- Changes the `Property.__init__` set attributes order: In the previous set attribute 
+    order, the repository attribute was overwritten with `None` by the `super.__init__` 
+    after it had been set.
+- Fixes set `Property.parent = None` bugs in `remove()` and `insert()` methods.
+- Consistently use relative imports to address circular imports and remove code that 
+    circumvents previous circular import errors in the `ODMLParser` class. See #199.
+- Consistently uses `BaseSection` or `BaseDocument` for isinstance checks throughout 
+    `base` instead of a mixture of `BaseSection` and `Section`.
+
+
+# Version 1.3.4
+
+## Fixes
+- Potential installation issues due to import from `info.py`.
+
+
+# Version 1.3.3
+## Features
+
+- Terminology caching and loading update.
+- Terminology section access and type listing functions.
+- Define and use common format version number for all parsers.
+- Supported format version check: When trying to open a file with any of the odml parsers, 
+    first the document format version number is checked. If the found version number does 
+    not match the supported one, file loading will fail an exception, since this is the 
+    oldest format version. If anyone tries to open a newer format, they should first 
+    update their odML package and not use this one.
+- Document saving: An odML document can now only be saved, if the validation does not show 
+    any errors. Saving an invalid document will exit while printing all encountered 
+    errors.
+- Parser: All parsers are now more relaxed when encountering unsupported tags or missing 
+    tags and only print warnings instead of ending with an exception. Warnings are 
+    collected and can be accessed via the parser object (required for display in 
+    [odml-ui](https://github.com/G-Node/odml-ui) to avoid potential loss of information).
+- Package and format information added or updated: `Version`, `Format version`, `Contact`, 
+    `Homepage`, `Author`, PyPI `Classifiers`, `Copyright`.
+- Removes the license text from `setup.py`. The license text interfered with the PyPI 
+    process in a way, that the description was not displayed on PyPI.
+- Removes the image folder from the project, since they are exclusively used in the 
+    outsourced [odml-ui](https://github.com/G-Node/odml-ui) project.
+
+## Fixes
+- Fixes a bug that prohibits the parsing of `json` or `yaml` files; #191.
+- Fixes a bug that fails parsing of `json` or `yaml` files when `Section.repository`, `Section.link` or `Section.include` are present; #194.
+
+
+# Version 1.3.2
+- Expose load, save, and display functions to top level module
+    - These functions accept a `backend` argument that specifies the parser or writer. 
+        Can be one of `XML`, `JSON`, or `YAML`.
+
+
+# Version 1.3.1
+- move ui to a separate repository https://github.com/g-node/odml-ui
+- python3 compatibility
+- add json and yaml storage backends

+ 77 - 0
code/python-odml/CONTRIBUTING.md

@@ -0,0 +1,77 @@
+How to contribute to python-odml
+================================
+
+This document gives some information about how to contribute to the odML project.
+
+
+Contributing
+------------
+
+If you want to contribute to the project please first create a fork of the repository on GitHub.
+When you are done with implementing a new feature or with fixing a bug, please send
+us a pull request.
+
+If you contribute to the project regularly, it would be very much appreciated if you
+would stick to the following development workflow:
+
+1. Select an *issue* from the issue tracker that you want to work on and assign the issue to your account.
+   If the *issue* is about a relatively complex matter or requires larger API changes the description of the
+   *issue* or its respective discussion should contain a brief concept about how the solution will look like.
+
+2. During the implementation of the feature or bug-fix add your changes in small atomic commits.
+   Commit messages should be short but expressive.
+   The first line of the message should not exceed **50** characters and the 2nd line should be empty.
+   If you want to add further text you can do so from the 3rd line on without limitations.
+   If possible reference fixed issues in the commit message (e.g. "fixes #101").
+
+3. When done with the implementation, compile and test the code.
+   If your work includes a new function or class please write a small unit test for it.
+
+4. Send us a pull request with your changes.
+   The pull request message should explain the changes and reference the *issue* addressed by your code.
+   Your pull request will be reviewed by one of our team members.
+   Pull requests should never be merged by the author of the contribution, but by another team member.
+   Merge conflicts or errors reported by travis should be resolved by the original author before the request is merged.
+
+
+Google Summer of Code contributors
+---------------------
+
+Please see the corresponding [Google Summer of Code](GSoC.md) file if you are interested in contributing as part of the GSoC programme.
+
+
+The issue tracker
+-----------------
+
+Please try to avoid duplicates of issues. If you encounter duplicated issues, please close all of them except
+one, reference the closed issues in the one that is left open and add missing information from the closed issues
+(if necessary) to the remaining issue.
+
+Assign meaningful tags to newly crated issues and if possible assign them to milestones.
+
+
+Reviewing pull requests
+-----------------------
+
+Every code (even small contributions from core developers) should be added to the project via pull requests.
+Before reviewing a pull request it should pass all builds and tests on travis-ci.
+Each pull request that passes all builds and tests should be reviewed by at least one of the core developers.
+If a contribution is rather complex or leads to significant API changes, the respective pull request should be
+reviewed by two other developers.
+In such cases the first reviewer or the contributor should request a second review in a comment.
+
+
+Testing
+-------
+
+* Unit test can be found in the test sub directory. Currently, the test coverage is a bit low but we are working on improving it.
+
+* Provide a unit test for every class, method or function.
+
+* Please make sure that all tests pass before merging/sending pull requests.
+
+
+Style guide
+-----------
+
+Always keep your code PEP8 compliant.

+ 31 - 0
code/python-odml/GSoC.md

@@ -0,0 +1,31 @@
+Google Summer of Code contributions
+===================================
+
+
+General guidelines
+------------------
+
+Google Summer of Code candidates should follow the [general contribution guidelines](CONTRIBUTING.md) before beginning work on an issue and submitting pull requests.
+Students interested in working on python-odml as part of GSoC 2017 should read the guidelines described in the [GSoC student guide](http://write.flossmanuals.net/gsocstudentguide/making-first-contact/) regarding making first contact.
+They're quite useful for general open source contributions as well.
+
+
+Open communication
+------------------
+
+The GSoC programme encourages open communication and so do we.
+While directly contacting the mentors may get a response, please refrain from doing so unless discussing personal matters.
+For all topics regarding the project, issues, patches, preparing proposals, please use the [discussion thread on Trellis](https://www.trelliscience.com/#/discussions-about/13798/0), or comment directly on a relevant issue or pull request, whichever is more appropriate.
+
+There is a #gnode IRC channel on Freenode which you may join for more casual discussions with the team.
+
+
+Discussion venues
+-----------------
+
+Please keep discussion topics in their relevant venue.
+Thoughts and concerns regarding python-odml should be discussed in GitHub issues.
+Project ideas should be discussed on Trellis.
+Less formal discussions can be had in the IRC chatroom.
+If you are new to IRC, this [etiquette guide](https://github.com/fizerkhan/irc-etiquette) may be useful.
+

+ 3 - 0
code/python-odml/MANIFEST.in

@@ -0,0 +1,3 @@
+include LICENSE
+include README.rst
+include odml/info.json

+ 0 - 0
code/python-odml/README.rst


Some files were not shown because too many files changed in this diff