Sfoglia il codice sorgente

allow to speak of minutes (and not just TRs)

Christian O. Häusler 1 anno fa
parent
commit
11ffb9fdc4
3 ha cambiato i file con 109 aggiunte e 80 eliminazioni
  1. 73 9
      code/plot_corr-emp-vs-estimation.py
  2. 36 69
      code/predict_ppa.py
  3. 0 2
      code/predict_ppa.submit

+ 73 - 9
code/plot_corr-emp-vs-estimation.py

@@ -27,38 +27,84 @@ def parse_arguments():
 
     parser.add_argument('-invis',
                         required=False,
-                        default='test/corr_vis-ppa-vs-estimation_srm-ao-av-vis_feat10.csv',
+                        default='test/results/corr_vis-ppa-vs-estimation_srm-ao-av-vis_feat10.csv',
                         help='csv file with correlations VIS vs. estimation')
 
     parser.add_argument('-inav',
                         required=False,
-                        default='test/corr_av-ppa-vs-estimation_srm-ao-av-vis_feat10.csv',
+                        default='test/results/corr_av-ppa-vs-estimation_srm-ao-av-vis_feat10.csv',
                         help='csv file with correlations AV vs. estimation')
 
     parser.add_argument('-inao',
                         required=False,
-                        default='test/corr_ao-ppa-vs-estimation_srm-ao-av-vis_feat10.csv',
+                        default='test/results/corr_ao-ppa-vs-estimation_srm-ao-av-vis_feat10.csv',
                         help='csv file with correlations AO vs. estimation')
 
     parser.add_argument('-incronb',
                         required=False,
-                        default='test/statistics_cronbachs.csv',
+                        default='test/results/statistics_cronbachs.csv',
                         help='csv file with Cronbachs Alpha of PPA from VIS, AV & AO')
 
+    parser.add_argument('-useMin',
+                        required=False,
+                        default='False',
+                        help='plot minutes instead of TRs?')
+
     parser.add_argument('-outdir',
                         required=False,
-                        default='test',
+                        default='test/results',
                         help='output directory')
 
+
+
     args = parser.parse_args()
 
     inVisResults = args.invis
     inAvResults = args.inav
     inAoResults = args.inao
     inCronbachs = args.incronb
+    useMin = args.useMin
     outdir = args.outdir
 
-    return inVisResults, inAvResults, inAoResults, inCronbachs, outdir
+    if useMin == 'False':
+        useMin = False
+    elif useMin == 'True':
+        useMin = True
+    else:
+        raise TypeError("useMin must be Boolean (e.g. 'True' or 'False')")
+
+    return inVisResults, inAvResults, inAoResults, inCronbachs, useMin, outdir
+
+
+def convert_trs_to_min(df):
+    '''just a quick and dirty solution
+    '''
+    #df['number of runs'] = np.where((df['prediction via'] == 'anatomical alignment') & (df['number of runs'] == 0), 0, df['number of runs'])
+
+    df['number of runs'] = np.where((df['prediction via'] == 'visual localizer') & (df['number of runs'] == 1), 5, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'visual localizer') & (df['number of runs'] == 2), 10, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'visual localizer') & (df['number of runs'] == 3), 15, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'visual localizer') & (df['number of runs'] == 4), 20, df['number of runs'])
+
+    df['number of runs'] = np.where((df['prediction via'] == 'movie') & (df['number of runs'] == 1), 15, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'movie') & (df['number of runs'] == 2), 30, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'movie') & (df['number of runs'] == 3), 45, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'movie') & (df['number of runs'] == 4), 60, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'movie') & (df['number of runs'] == 5), 75, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'movie') & (df['number of runs'] == 6), 90, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'movie') & (df['number of runs'] == 7), 105, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'movie') & (df['number of runs'] == 8), 120, df['number of runs'])
+
+    df['number of runs'] = np.where((df['prediction via'] == 'audio-description') & (df['number of runs'] == 1), 15, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'audio-description') & (df['number of runs'] == 2), 30, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'audio-description') & (df['number of runs'] == 3), 45, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'audio-description') & (df['number of runs'] == 4), 60, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'audio-description') & (df['number of runs'] == 5), 75, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'audio-description') & (df['number of runs'] == 6), 90, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'audio-description') & (df['number of runs'] == 7), 105, df['number of runs'])
+    df['number of runs'] = np.where((df['prediction via'] == 'audio-description') & (df['number of runs'] == 8), 120, df['number of runs'])
+
+    return df
 
 
 def plot_boxplot(axis, df):
@@ -140,7 +186,10 @@ def plot_subplot(axis, title, df, boxplot=False, legend=True, xlabel=True, ylabe
 
     # set a custom label for the x axis
     if xlabel == True:
-        axis.set_xlabel('number of runs / segments')
+        if useMin == True:
+            axis.set_xlabel('minutes of functional scanning')
+        else:
+            axis.set_xlabel('number of runs / segments')
     else:
         x_axis = axis.axes.get_xaxis()
         x_label = x_axis.get_label()
@@ -170,7 +219,7 @@ def plot_subplot(axis, title, df, boxplot=False, legend=True, xlabel=True, ylabe
 
 if __name__ == "__main__":
     # read command line arguments
-    visResults, avResults, aoResults, cronbachs, outDir = parse_arguments()
+    visResults, avResults, aoResults, cronbachs, useMin, outDir = parse_arguments()
 
     # some preparations for plotting
     # close figure
@@ -187,7 +236,7 @@ if __name__ == "__main__":
     # fig.suptitle('Correlations between empirical and predicted Z-maps')
 
     # adjust some spacings
-    plt.subplots_adjust(hspace=0.35,
+    plt.subplots_adjust(hspace=0.55,
                         wspace=None,
                         top=None,
                         bottom=None,
@@ -229,6 +278,11 @@ if __name__ == "__main__":
     # read the first
     visDf = pd.read_csv(visResults)
     axNr = 0
+
+    # convert TRs to Minutes
+    if useMin is True:
+        visDf = convert_trs_to_min(visDf)
+
     # call the plotting function
     axes[axNr] = plot_subplot(
         axes[axNr],
@@ -243,6 +297,11 @@ if __name__ == "__main__":
     # plot middle subplot
     avDf = pd.read_csv(avResults)
     axNr = 1
+
+    # convert TRs to Minutes
+    if useMin is True:
+        avDf = convert_trs_to_min(avDf)
+
     # call the plotting function
     axes[axNr] = plot_subplot(
         axes[axNr],
@@ -256,6 +315,11 @@ if __name__ == "__main__":
     # plot lower subplot
     aoDf = pd.read_csv(aoResults)
     axNr = 2
+
+    # convert TRs to Minutes
+    if useMin is True:
+        aoDf = convert_trs_to_min(aoDf)
+
     # call the plotting function
     axes[axNr] = plot_subplot(
         axes[axNr],

+ 36 - 69
code/predict_ppa.py

@@ -394,13 +394,16 @@ def predict_from_cms(left_out_subj, subjs, zmap_fpathes, start, end):
 
     # load the left-out subject's transformation matrix
     # that will be used to tranform the data from CMS into brain volume
-    in_fpath = os.path.join(in_dir, left_out_subj,
+    in_fpath = os.path.join(in_dir,
+                            left_out_subj,
+                            'matrices',
                             f'wmatrix_{model}_feat{n_feat}_{start}-{end}.npy')
     wmatrix = np.load(in_fpath)
 
     # load the SRM (based on non-left-out subjects' data)
     srm_fpath = os.path.join(in_dir,
                              left_out_subj,
+                             'models',
                              f'{model}_feat{n_feat}-iter{n_iter}.npz')
     # load it
     srm = load_srm(srm_fpath)
@@ -446,15 +449,21 @@ def predict_from_cms(left_out_subj, subjs, zmap_fpathes, start, end):
     # adjust the name of the output file according to the input:
     if 'studyforrest-data-visualrois' in zmap_fpathes[0]:
         out_fpath = os.path.join(
-            in_dir, left_out_subj,
+            in_dir,
+            left_out_subj,
+            'predictions',
             f'predicted-VIS-PPA_from_{model}_feat{n_feat}_{start}-{end}.nii.gz')
     elif '2nd-lvl_audio-ppa-ind' in zmap_fpathes[0]:
         out_fpath = os.path.join(
-            in_dir, left_out_subj,
+            in_dir,
+            left_out_subj,
+            'predictions',
             f'predicted-AO-PPA_from_{model}_feat{n_feat}_{start}-{end}.nii.gz')
     elif '2nd-lvl_movie-ppa-ind' in zmap_fpathes[0]:
         out_fpath = os.path.join(
-            in_dir, left_out_subj,
+            in_dir,
+            left_out_subj,
+            'predictions',
             f'predicted-AV-PPA_from_{model}_feat{n_feat}_{start}-{end}.nii.gz')
     else:
         print('unkown source for PPA (must be from VIS, AV or AO)')
@@ -507,15 +516,19 @@ def predict_from_ana(left_out_subj, nifti_masker, subjs, zmap_fpathes):
 
     out_fpath = os.path.join(in_dir,
                              left_out_subj,
+                             'predictions',
                              f'predicted-{which_PPA}-PPA_from_anatomy.nii.gz')
 
+    # create (sub)directories
+    os.makedirs(os.path.dirname(out_fpath), exist_ok=True)
+
     nib.save(mean_anat_img, out_fpath)
 
     return mean_anat_arr
 
 
 def get_array_from_fsl_results(left_out_subj, zmap_fpath):
-    '''loads a zmap (FSL outpunt)
+    '''loads Z-map (FSL output)
     '''
     # load the subject's zmap of the PPA contrast
     zmap_img = nib.load(zmap_fpath)
@@ -588,7 +601,7 @@ def run_the_predictions(zmap_fpathes, subjs, model):
     # but also increase the number of fMRI runs used for functional alignment
     print('Running prediction via functional alignment')
     for start, end, stim, runs in starts_ends[:]:  # cf. constants at the top
-        print(f'\nTRs:\t{start}-{end}')
+        print(f'TRs:\t{start}-{end}')
 
         # initialize a list that will, for every subject, contain
         # the array of predicted z-value
@@ -658,61 +671,15 @@ def run_the_predictions(zmap_fpathes, subjs, model):
         which_PPA = 'AO'
 
     # save the dataframe for the currently used CMS
-    df.to_csv(f'{in_dir}/corr_{which_PPA.lower()}-ppa-vs-estimation_{model}_feat{n_feat}.csv', index=False)
+    outFpath = os.path.join(in_dir,
+                            'results',
+                            f'corr_{which_PPA.lower()}-ppa-vs-estimation_{model}_feat{n_feat}.csv')
 
-    return None
+    os.makedirs(os.path.dirname(outFpath), exist_ok=True)
 
+    df.to_csv(outFpath, index=False)
 
-def run_predictions_for_denoised(zmap_fpathes, subjs, model):
-    '''Following is a mess and more a scaffold
-    '''
-#     ### DENOISED TIME-SERIES
-#     # using the contrast calculated from denoised time-series
-#     denoised_contr_arrays = []
-#
-#     for left_out_subj in subjs[:]:
-#         zmap_fpath = [x for x in zmap_fpathes if left_out_subj in x][0]
-#         # following is super hard-coded
-#         new_contrast = zmap_fpath.replace(
-#             'inputs/studyforrest-data-visualrois',
-#             'test')
-#
-#         nifti_masker, func_contrast_array = get_array_from_fsl_results(
-#             left_out_subj,
-#             new_contrast)
-#
-#         denoised_contr_arrays.append(func_contrast_array)
-#
-#     # compute the correlations of empirical vs. contrast from denoised TRs
-#     emp_vs_func_contr = [stats.pearsonr(empirical_arrays[idx],
-#                                         denoised_contr_arrays[idx]) for idx
-#                                         in range(len(empirical_arrays))]
-#
-#     # print the result of the currently used runs / stimulus
-#     print('subject\temp. vs. anat\temp. vs. from cleand TRs')
-#
-#     for idx, subj in enumerate(subjs):
-#         print(f'{subj}\t{round(emp_vs_anat[idx][0], 2)}\t'
-#         f'{round(emp_vs_func_contr[idx][0], 2)}')
-#
-#
-#     funcVsFuncLines = [[subj, predictor+'(contr)', runs, corr[0]]
-#                        for subj, corr in zip(subjs, func_vs_func_contr)]
-#
-#     for_dataframe.extend(funcVsFuncLines)
-#
-#     # compute the correlations of empirical vs. prediction from
-#     # functional alignment (with currently used no. of runs)
-#     func_vs_func_contr = [stats.pearsonr(
-#         denoised_contr_arrays[idx],
-#         func_pred_arrays[idx]) for idx
-#         in range(len(denoised_contr_arrays))]
-#
-#     # print the result of the currently used runs / stimulus
-#     print('subject\temp. vs. cms\tfunc.contr. vs. cms')
-#
-#     for idx, subj in enumerate(subjs):
-#         print(f'{subj}\t{round(emp_vs_func[idx][0], 2)}\t{round(func_vs_func_contr[idx][0], 2)}')
+    return None
 
 
 if __name__ == "__main__":
@@ -725,35 +692,35 @@ if __name__ == "__main__":
     subjs = [re.search(r'sub-..', string).group() for string in subjs_pathes]
     subjs = sorted(list(set(subjs)))
 
-    # for later prediction from anatomy, we need to transform the
-    # subject-specific z-maps from the localizer into MNI space
-    # (and later transform then into the subject-space of the left-out subject
-
     # VIS PPA
     # create the list of all subjects' VIS zmaps by substituting the
     # subject's string and the correct cope that was used in Sengupta et al.
+    print('Estimating Z-maps of visual localizer')
     zmap_fpathes = [
         (VIS_ZMAP_PATTERN.replace('sub-*', x[0]).replace('cope*', x[1]))
         for x in VIS_VPN_COPES.items()]
 
-    print('\nTransforming VIS z-maps into MNI and into other subjects\' space')
+    print('Transforming VIS z-maps into MNI and into other subjects\' space')
     transform_ind_ppas(zmap_fpathes, subjs)
     run_the_predictions(zmap_fpathes, subjs, model)
 
-    # AO PPA
+    # AV PPA
+    print('\tEstimating Z-maps of movie')
     zmap_fpathes = [
-        (AO_ZMAP_PATTERN.replace('sub-*', x[0]))
+        (AV_ZMAP_PATTERN.replace('sub-*', x[0]))
         for x in VIS_VPN_COPES.items()]
 
-    print('\nTransforming AO z-maps into MNI and into other subjects\' space')
+    print('Transforming AV z-maps into MNI and into other subjects\' space')
     transform_ind_ppas(zmap_fpathes, subjs)
     run_the_predictions(zmap_fpathes, subjs, model)
 
-    # AV PPA
+    # AO PPA
+    print('\tEstimating Z-maps of audio-description')
+
     zmap_fpathes = [
-        (AV_ZMAP_PATTERN.replace('sub-*', x[0]))
+        (AO_ZMAP_PATTERN.replace('sub-*', x[0]))
         for x in VIS_VPN_COPES.items()]
 
-    print('\nTransforming AV z-maps into MNI and into other subjects\' space')
+    print('Transforming AO z-maps into MNI and into other subjects\' space')
     transform_ind_ppas(zmap_fpathes, subjs)
     run_the_predictions(zmap_fpathes, subjs, model)

+ 0 - 2
code/predict_ppa.submit

@@ -16,5 +16,3 @@ should_transfer_files = NO
 
 arguments = ./code/predict_ppa.py -model 'srm-ao-av-vis' -indir 'test' -nfeat '10' -niter '30'
 queue
-arguments = ./code/predict_ppa.py -model 'srm-ao-av' -indir 'test' -nfeat '10' -niter '30'
-queue