Browse Source

gin commit from nit-ope-de04

New files: 1
Modified files: 1
sprenger 3 years ago
parent
commit
fbcfa0db21

+ 41 - 46
code/data_overview_1.py

@@ -128,9 +128,6 @@ def get_arraygrid(blackrock_elid_list, chosen_el, rej_el=None):
 # monkey = 'Nikos2'
 monkey = 'Lilou'
 
-nsx_none = {'Lilou': None, 'Nikos2': None}
-nsx_lfp = {'Lilou': 2, 'Nikos2': 2}
-nsx_raw = {'Lilou': 5, 'Nikos2': 6}
 chosen_el = {'Lilou': 71, 'Nikos2': 63}
 chosen_units = {'Lilou': range(1, 5), 'Nikos2': range(1, 5)}
 
@@ -141,19 +138,24 @@ session = reachgraspio.ReachGraspIO(
     odml_directory=datasetdir,
     verbose=False)
 
-bl = session.read_block(lazy=False, load_waveforms=False, scaling='voltage')
+# loads only ns2 data of all channels an chosen units
+#bl_lfp = session.read_block(
 
-seg = bl.segments[0]
+# loads raw data of chosen electrode and chosen units
+#bl_raw = session.read_block(
+
+block = session.read_block(lazy=True)
+segment = block.segment[0]
 
 # Displaying loaded data structure as string output
 print("\nBlock")
-print('Attributes ', bl.__dict__.keys())
-print('Annotations', bl_raw.annotations)
+print('Attributes ', block.__dict__.keys())
+print('Annotations', block.annotations)
 print("\nSegment")
-print('Attributes ', seg_raw.__dict__.keys())
-print('Annotations', seg_raw.annotations)
+print('Attributes ', segment.__dict__.keys())
+print('Annotations', segment.annotations)
 print("\nEvents")
-for x in seg_raw.events:
+for x in segment.events:
     print('\tEvent with name', x.name)
     print('\t\tAttributes ', x.__dict__.keys())
     print('\t\tAnnotation keys', x.annotations.keys())
@@ -162,43 +164,39 @@ for x in seg_raw.events:
                      'trial_reject_IFC']:
         print('\t\t'+anno_key, x.annotations[anno_key][:20])
 
-print("\nChannels")
-for x in bl_raw.channel_indexes:
-    print('\tChannel with name', x.name)
-    print('\t\tAttributes ', x.__dict__.keys())
-    print('\t\tchannel_ids', x.channel_ids)
-    print('\t\tchannel_names', x.channel_names)
-    print('\t\tAnnotations', x.annotations)
-print("\nUnits")
-for x in bl_raw.list_units:
-    print('\tUnit with name', x.name)
+print("\nGroups")
+for x in block.groups:
+    print('\tGroup with name', x.name)
     print('\t\tAttributes ', x.__dict__.keys())
     print('\t\tAnnotations', x.annotations)
-    print('\t\tchannel_id', x.annotations['channel_id'])
-    assert(x.annotations['channel_id'] == x.channel_index.channel_ids[0])
+    # TODO: Add more here
+
 print("\nSpikeTrains")
-for x in seg_raw.spiketrains:
+for x in segment.spiketrains:
     print('\tSpiketrain with name', x.name)
     print('\t\tAttributes ', x.__dict__.keys())
     print('\t\tAnnotations', x.annotations)
     print('\t\tchannel_id', x.annotations['channel_id'])
+    print('\t\tunit_id', x.annotations['unit_id'])
+    print('\t\tis sua', x.annotations['sua'])
+    print('\t\tis mua', x.annotations['mua'])
     print('\t\tspike times', x.times[0:20])
 print("\nAnalogSignals")
-for x in seg_raw.analogsignals:
+for x in segment.analogsignals:
     print('\tAnalogSignal with name', x.name)
     print('\t\tAttributes ', x.__dict__.keys())
     print('\t\tAnnotations', x.annotations)
-    print('\t\tchannel_id', x.annotations['channel_id'])
+    print('\t\tchannel_ids', x.annotations['channel_ids'])
 
 # get start and stop events of trials
 start_events = neo_utils.get_events(
-    seg_raw,
+    segment,
     properties={
         'name': 'TrialEvents',
         'trial_event_labels': 'TS-ON',
         'performance_in_trial': 255})
 stop_events = neo_utils.get_events(
-    seg_raw,
+    segment,
     properties={
         'name': 'TrialEvents',
         'trial_event_labels': 'STOP',
@@ -210,7 +208,7 @@ assert len(stop_events) == 1
 
 # insert epochs between 10ms before TS to 50ms after RW corresponding to trails
 neo_utils.add_epoch(
-    seg_raw,
+    segment,
     start_events[0],
     stop_events[0],
     pre=-250 * pq.ms,
@@ -220,30 +218,26 @@ neo_utils.add_epoch(
     trial_performance=start_events[0].annotations['performance_in_trial'])
 
 # access single epoch of this data_segment
-epochs = neo_utils.get_epochs(seg_raw,
+epochs = neo_utils.get_epochs(segment,
                               properties={'trial_status': 'complete_trials'})
 assert len(epochs) == 1
 
 # cut segments according to inserted 'complete_trials' epochs and reset trial
 #  times
-cut_segments_raw = neo_utils.cut_segment_by_epoch(
-    seg_raw, epochs[0], reset_time=True)
-
-cut_segments_lfp = neo_utils.cut_segment_by_epoch(
-    seg_lfp, epochs[0], reset_time=True)
+cut_segments = neo_utils.cut_segment_by_epoch(
+    segment, epochs[0], reset_time=True)
 
 # =============================================================================
 # Define data for overview plots
 # =============================================================================
 trial_index = {'Lilou': 0, 'Nikos2': 6}
 
-trial_seg_raw = cut_segments_raw[trial_index[monkey]]
-trial_seg_lfp = cut_segments_lfp[trial_index[monkey]]
+trial_segment = cut_segments[trial_index[monkey]]
 
-blackrock_elid_list = bl_lfp.annotations['avail_electrode_ids']
+blackrock_elid_list = block.annotations['avail_electrode_ids']
 
 # get 'TrialEvents'
-event = trial_seg_lfp.events[2]
+event = trial_segment.events[2]
 start = event.annotations['trial_event_labels'].index('TS-ON')
 trialx_trty = event.annotations['belongs_to_trialtype'][start]
 trialx_trtimeid = event.annotations['trial_timestamp_id'][start]
@@ -255,7 +249,7 @@ if 'LF' in trialx_trty:
 else:
     trialz_trty = trialx_trty.replace('HF', 'LF')
 
-for i, tr in enumerate(cut_segments_lfp):
+for i, tr in enumerate(cut_segments):
     eventz = tr.events[2]
     nextft = eventz.annotations['trial_event_labels'].index('TS-ON')
     if eventz.annotations['belongs_to_trialtype'][nextft] == trialz_trty:
@@ -428,7 +422,7 @@ unit_type = {1: '', 2: '', 3: ''}
 
 wf_lim = []
 # plotting waveform for all spiketrains available
-for spiketrain in trial_seg_raw.spiketrains:
+for spiketrain in trial_segment.spiketrains:
     unit_id = spiketrain.annotations['unit_id']
     # get unit type
     if spiketrain.annotations['sua']:
@@ -479,7 +473,7 @@ ax2d.yaxis.set_label_position("right")
 plotted_unit_ids = []
 
 # plotting all available spiketrains
-for st in trial_seg_raw.spiketrains:
+for st in trial_segment.spiketrains:
     unit_id = st.annotations['unit_id']
     plotted_unit_ids.append(unit_id)
     ax3.plot(st.times.rescale(plotting_time_unit),
@@ -500,8 +494,8 @@ ax3.set_title('spiketrains', fontdict_titles)
 # PLOT "raw" SIGNAL of chosen trial of chosen electrode
 # =============================================================================
 # get "raw" data from chosen electrode
-assert len(trial_seg_raw.analogsignals) == 1
-el_raw_sig = trial_seg_raw.analogsignals[0]
+assert len(trial_segment.analogsignals) == 1
+el_raw_sig = trial_segment.analogsignals[0]
 
 # plotting raw signal trace
 ax4.plot(el_raw_sig.times.rescale(plotting_time_unit),
@@ -515,8 +509,8 @@ ax4.tick_params(axis='y', direction='in', length=3, labelsize='xx-small',
                 labelleft='off', labelright='on')
 ax4.set_title('"raw" signal', fontdict_titles)
 
-ax4.set_xlim(trial_seg_raw.t_start.rescale(plotting_time_unit),
-             trial_seg_raw.t_stop.rescale(plotting_time_unit))
+ax4.set_xlim(trial_segment.t_start.rescale(plotting_time_unit),
+             trial_segment.t_stop.rescale(plotting_time_unit))
 ax4.xaxis.set_major_locator(ticker.MultipleLocator(base=1))
 
 
@@ -565,7 +559,8 @@ ax4.text(timebar_xmin + 0.25 * pq.s, timebar_ypos + timebar_labeloffset,
 # PLOT BEHAVIORAL SIGNALS of chosen trial
 # =============================================================================
 # get behavioral signals
-ainp_signals = [nsig for nsig in trial_seg_lfp.analogsignals if
+# TODO: Adjust this to merged analogsignals
+ainp_signals = [nsig for nsig in trial_segment.analogsignals if
                 nsig.annotations['channel_id'] > 96]
 
 ainp_trialz = [nsig for nsig in trialz_seg_lfp.analogsignals if

+ 79 - 71
code/example.py

@@ -50,7 +50,7 @@ from neo import Block, Segment
 from elephant.signal_processing import butter
 from reachgraspio import reachgraspio
 from neo.utils import cut_segment_by_epoch, add_epoch, get_events
-# from neo.utils import add_epoch, cut_segment_by_epoch, get_events
+from neo_utils import load_segment
 
 
 # =============================================================================
@@ -61,21 +61,29 @@ from neo.utils import cut_segment_by_epoch, add_epoch, get_events
 
 # Specify the path to the recording session to load, eg,
 # '/home/user/l101210-001'
-session_name = os.path.join('..', 'datasets', 'i140703-001')
-# session_name = os.path.join('..', 'datasets', 'l101210-001')
+# session_name = os.path.join('..', 'datasets', 'i140703-001')
+session_name = os.path.join('..', 'datasets', 'l101210-001')
 odml_dir = os.path.join('..', 'datasets')
 
 # Open the session for reading
 session = reachgraspio.ReachGraspIO(session_name, odml_directory=odml_dir)
 
-# Read the complete dataset in lazy mode. Neo object will be created, but
-# data are not loaded in to memory.
-data_block = session.read_block(correct_filter_shifts=True, lazy=True)
+# Read a the complete dataset in lazy mode generating all neo objects,
+# but not loading data into memory.  The lazy neo structure will contain objects
+# to capture all recorded data types (time series at 1000Hz (ns2) and 30kHz (ns6)
+# scaled to units of voltage, sorted spike trains, spike waveforms and events)
+# from electrode 62 of the recording session and return it as a Neo Block. The
+# time shift of the ns2 signal (LFP) induced by the online filter is
+# automatically corrected for by a heuristic factor stored in the metadata
+# (correct_filter_shifts=True).
 
-# Access the single Segment of the data block
-assert len(data_block.segments) == 1
-data_segment = data_block.segments[0]
+block = session.read_block(lazy=True, correct_filter_shifts=True)
 
+# Access the single Segment of the data block, reaching up to 300s.
+assert len(block.segments) == 1
+
+# loading data content of all data objects during the first 300 seconds
+data_segment = load_segment(block.segments[0], time_range=(None, 300*pq.s))
 
 # =============================================================================
 # Create offline filtered LFP
@@ -87,49 +95,44 @@ data_segment = data_block.segments[0]
 # Neo AnalogSignal, which is used for plotting later on in this script.
 # =============================================================================
 
-# Iterate through all analog signals and replace these lazy object by new
-# analog signals containing only data of channel 62 (target_channel_id) and
-# provide human readable name for the analog signal (LFP / raw signal type)
-
 target_channel_id = 62
 nsx_to_anasig_name = {2: 'LFP signal (online filtered)',
                       5: 'raw signal',
                       6: 'raw signal'}
 
-idx = 0
-while idx < len(data_segment.analogsignals):
-    # remove analog signals, that don't contain target channel
-    channel_ids = data_segment.analogsignals[idx].array_annotations['channel_ids']
-    if target_channel_id not in channel_ids:
-        data_segment.analogsignals.pop(idx)
+filtered_anasig = None
+raw_anasig = None
+# identify neuronal signals and provide labels for plotting
+for anasig in data_segment.analogsignals:
+    # skip non-neuronal signals
+    if not anasig.annotations['neural_signal']:
         continue
 
-    # replace analog signal with analog signal containing data
-    target_channel_index = np.where(channel_ids == target_channel_id)[0][0]
-    anasig = data_segment.analogsignals[idx].load(
-        channel_indexes=[target_channel_index])
-    data_segment.analogsignals[idx] = anasig
-    idx += 1
-
-    # replace name by label of contained signal type
-    anasig.name = nsx_to_anasig_name[anasig.array_annotations['nsx'][0]]
-
-# load spiketrains of same channel
-channel_spiketrains = data_segment.filter({'channel_id': target_channel_id})
-data_segment.spiketrains = [st.load(load_waveforms=True) for st in channel_spiketrains]
-
-# The LFP is not present in the data fils of both recording. Here, we
-# generate the LFP signal from the raw signal if it's not present already.
-if not data_segment.filter({'name': 'LFP signal (online filtered)'}):
-    raw_signal = data_segment.filter({'name': 'raw signal'})[0]
-
-    # Use the Elephant library to filter the raw analog signal
-    f_anasig = butter(raw_signal,  highpass_freq=None, lowpass_freq=250 * pq.Hz, order=4)
-    print('filtering done.')
-
-    f_anasig.name = 'LFP signal (offline filtered)'
-    # Attach offline filtered LFP to the segment of data
-    data_segment.analogsignals.extend(f_anasig)
+    # identify nsx source of signals in this AnalogSignal object
+    nsx = np.unique(anasig.array_annotations['nsx'])
+    assert len(nsx) == 1, 'Different nsx sources in AnalogSignal'
+    nsx = nsx[0]
+
+    if nsx == 2:
+        # AnalogSignal is LFP from ns2
+        anasig.name = f'LFP (online filter, ns2)'
+        filtered_anasig = anasig
+    elif nsx in [5, 6]:
+        # AnalogSignal is raw signal from ns5 or ns6
+        anasig.name = f'raw (ns{nsx})'
+        raw_anasig = anasig
+
+# Create LFP signal by filtering raw signal if not present already
+if filtered_anasig is None:
+    # Use the Elephant library to filter the signal
+    f_anasig = butter(
+        raw_anasig,
+        highpass_freq=None,
+        lowpass_freq=250 * pq.Hz,
+        order=4)
+    f_anasig.name = f'LFP (offline filtered ns{anasig.array_annotations["nsx"][0]})'
+    # Attach all offline filtered LFPs to the segment of data
+    data_segment.analogsignals.append(f_anasig)
 
 
 # =============================================================================
@@ -213,38 +216,44 @@ trial_segment = trial_segments[0]
 fig = plt.figure(facecolor='w')
 time_unit = pq.CompoundUnit('1./30000*s')
 amplitude_unit = pq.microvolt
-nsx_colors = ['b', 'k', 'r']
+nsx_colors = {2: 'k', 5: 'r', 6: 'b'}
 
-# Loop through all analog signals and plot the signal in a color corresponding
-# to its sampling frequency (i.e., originating from the ns2/ns5 or ns2/ns6).
+# Loop through all AnalogSignal objects and plot the signal of the target channel
+# in a color corresponding to its sampling frequency (i.e., originating from the ns2/ns5 or ns2/ns6).
 for i, anasig in enumerate(trial_segment.analogsignals):
-    plt.plot(
-        anasig.times.rescale(time_unit),
-        anasig.squeeze().rescale(amplitude_unit),
-        label=anasig.name,
-        color=nsx_colors[i])
+    # only visualize neural data
+    if anasig.annotations['neural_signal']:
+        nsx = anasig.array_annotations[nsx][0]
+        target_channel_index = np.where(anasig.array_annotations['channel_ids'] == target_channel_id)[0]
+        target_signal = anasig[:, target_channel_index]
+        plt.plot(
+            target_signal.times.rescale(time_unit),
+            target_signal.squeeze().rescale(amplitude_unit),
+            label=target_signal.name,
+            color=nsx_colors[nsx])
 
 # Loop through all spike trains and plot the spike time, and overlapping the
 # wave form of the spike used for spike sorting stored separately in the nev
 # file.
 for st in trial_segment.spiketrains:
     color = np.random.rand(3,)
-    for spike_id, spike in enumerate(st):
-        # Plot spike times
-        plt.axvline(
-            spike.rescale(time_unit).magnitude,
-            color=color,
-            label='Unit ID %i' % st.annotations['unit_id'])
-        # Plot waveforms
-        waveform = st.waveforms[spike_id, 0, :]
-        waveform_times = np.arange(len(waveform))*time_unit + spike
-        plt.plot(
-            waveform_times.rescale(time_unit).magnitude,
-            waveform.rescale(amplitude_unit),
-            '--',
-            linewidth=2,
-            color=color,
-            zorder=0)
+    if st.annotations['channel_id'] == target_channel_id:
+        for spike_id, spike in enumerate(st):
+            # Plot spike times
+            plt.axvline(
+                spike.rescale(time_unit).magnitude,
+                color=color,
+                label='Unit ID %i' % st.annotations['unit_id'])
+            # Plot waveforms
+            waveform = st.waveforms[spike_id, 0, :]
+            waveform_times = np.arange(len(waveform))*time_unit + spike
+            plt.plot(
+                waveform_times.rescale(time_unit).magnitude,
+                waveform.rescale(amplitude_unit),
+                '--',
+                linewidth=2,
+                color=color,
+                zorder=0)
 
 # Loop through all events
 for event in trial_segment.events:
@@ -255,8 +264,7 @@ for event in trial_segment.events:
                     alpha=0.2,
                     linewidth=3,
                     linestyle='dashed',
-                    label='event ' + event.array_annotations[
-                        'trial_event_labels'][ev_id])
+                    label=f'event {event.array_annotations["trial_event_labels"][ev_id]}'
 
 # Finishing touches on the plot
 plt.autoscale(enable=True, axis='x', tight=True)

+ 29 - 0
code/neo_utils.py

@@ -0,0 +1,29 @@
+from neo.io.proxyobjects import BaseProxy
+from neo import Segment
+import copy
+
+def load_segment(segment, *args, **kwargs):
+    """
+    Utility function to load data of all child data object
+    """
+
+    new_segment = Segment(segment.name)
+
+    def load_data(obj, *args, **kwargs):
+        if isinstance(obj, BaseProxy):
+            return obj.load(*args, **kwargs)
+        else:
+            return copy.deepcopy(obj)
+
+    for st in segment.spiketrains:
+        new_segment.spiketrains.append(load_data(st, load_waveforms=True))
+    for event in segment.events:
+        new_segment.events.append(load_data(event))
+    for epoch in segment.epochs:
+        new_segment.epochs.append(load_data(epoch))
+    for anasig in segment.analogsignals:
+        new_segment.analogsignals.append(load_data(anasig))
+    for irrsig in segment.irregularlysampledsignals:
+        new_segment.irregularlysampledsignals.append(load_data(irrsig))
+
+    return new_segment

+ 0 - 1
datasets/i140703-001-03.nev

@@ -1 +0,0 @@
-../.git/annex/objects/5Z/4g/MD5-s168323228--2c19fedac036b789e3a1192541a6f338/MD5-s168323228--2c19fedac036b789e3a1192541a6f338

+ 1 - 0
datasets/i140703-001-03.nev

@@ -0,0 +1 @@
+/annex/objects/MD5-s168323228--2c19fedac036b789e3a1192541a6f338

+ 0 - 1
datasets/i140703-001.ccf

@@ -1 +0,0 @@
-../.git/annex/objects/2F/5M/MD5-s187076--10d198df25a921669f01b40c9eb3a8c8/MD5-s187076--10d198df25a921669f01b40c9eb3a8c8

+ 1 - 0
datasets/i140703-001.ccf

@@ -0,0 +1 @@
+/annex/objects/MD5-s187076--10d198df25a921669f01b40c9eb3a8c8

+ 0 - 1
datasets/i140703-001.nev

@@ -1 +0,0 @@
-../.git/annex/objects/xZ/Z9/MD5-s168323228--a9f5d017dacfffe571070a3157de37d9/MD5-s168323228--a9f5d017dacfffe571070a3157de37d9

+ 1 - 0
datasets/i140703-001.nev

@@ -0,0 +1 @@
+/annex/objects/MD5-s168323228--a9f5d017dacfffe571070a3157de37d9

+ 0 - 1
datasets/i140703-001.ns2

@@ -1 +0,0 @@
-../.git/annex/objects/QZ/gP/MD5-s204661895--b1a081853671a5c1e74c0d3ba9e62e84/MD5-s204661895--b1a081853671a5c1e74c0d3ba9e62e84

+ 1 - 0
datasets/i140703-001.ns2

@@ -0,0 +1 @@
+/annex/objects/MD5-s204661895--b1a081853671a5c1e74c0d3ba9e62e84

+ 0 - 1
datasets/i140703-001.ns6

@@ -1 +0,0 @@
-../.git/annex/objects/Xp/1q/MD5-s5778493379--3724a99d37c1e61e1c5c2a3e0236c807/MD5-s5778493379--3724a99d37c1e61e1c5c2a3e0236c807

+ 1 - 0
datasets/i140703-001.ns6

@@ -0,0 +1 @@
+/annex/objects/MD5-s5778493379--3724a99d37c1e61e1c5c2a3e0236c807

+ 0 - 1
datasets/i140703-001.xls

@@ -1 +0,0 @@
-../.git/annex/objects/Z0/ZJ/MD5-s950784--d207115d297a6ade70377cc4aec03a76/MD5-s950784--d207115d297a6ade70377cc4aec03a76

+ 1 - 0
datasets/i140703-001.xls

@@ -0,0 +1 @@
+/annex/objects/MD5-s950784--d207115d297a6ade70377cc4aec03a76

+ 0 - 1
datasets/l101210-001-02.nev

@@ -1 +0,0 @@
-../.git/annex/objects/jf/2Z/MD5-s287712104--50e8b4ddbd88d02145e8fd27edd2c89a/MD5-s287712104--50e8b4ddbd88d02145e8fd27edd2c89a

+ 1 - 0
datasets/l101210-001-02.nev

@@ -0,0 +1 @@
+/annex/objects/MD5-s287712104--50e8b4ddbd88d02145e8fd27edd2c89a

+ 0 - 1
datasets/l101210-001.ccf

@@ -1 +0,0 @@
-../.git/annex/objects/FV/3G/MD5-s108204--dd2be8e8417f21a1e61da5e90e65f386/MD5-s108204--dd2be8e8417f21a1e61da5e90e65f386

+ 1 - 0
datasets/l101210-001.ccf

@@ -0,0 +1 @@
+/annex/objects/MD5-s108204--dd2be8e8417f21a1e61da5e90e65f386

+ 0 - 1
datasets/l101210-001.nev

@@ -1 +0,0 @@
-../.git/annex/objects/xV/3g/MD5-s287712104--498b5d5cdd51b5b7a2ba05481c185c8f/MD5-s287712104--498b5d5cdd51b5b7a2ba05481c185c8f

+ 1 - 0
datasets/l101210-001.nev

@@ -0,0 +1 @@
+/annex/objects/MD5-s287712104--498b5d5cdd51b5b7a2ba05481c185c8f

+ 0 - 1
datasets/l101210-001.ns2

@@ -1 +0,0 @@
-../.git/annex/objects/X1/38/MD5-s8511032--66d7cca0ec1479056b561fa85c6d222b/MD5-s8511032--66d7cca0ec1479056b561fa85c6d222b

+ 1 - 0
datasets/l101210-001.ns2

@@ -0,0 +1 @@
+/annex/objects/MD5-s8511032--66d7cca0ec1479056b561fa85c6d222b

+ 0 - 1
datasets/l101210-001.ns5

@@ -1 +0,0 @@
-../.git/annex/objects/Jv/Qj/MD5-s4085268896--c1fe85b78d2e56b6ada79a583661fe88/MD5-s4085268896--c1fe85b78d2e56b6ada79a583661fe88

+ 1 - 0
datasets/l101210-001.ns5

@@ -0,0 +1 @@
+/annex/objects/MD5-s4085268896--c1fe85b78d2e56b6ada79a583661fe88

+ 0 - 1
datasets/l101210-001.xls

@@ -1 +0,0 @@
-../.git/annex/objects/Gw/kq/MD5-s1091072--cebc0a7fe6333c44699c6d47e1c8e17b/MD5-s1091072--cebc0a7fe6333c44699c6d47e1c8e17b

+ 1 - 0
datasets/l101210-001.xls

@@ -0,0 +1 @@
+/annex/objects/MD5-s1091072--cebc0a7fe6333c44699c6d47e1c8e17b