Browse Source

gin commit from rMBP-15-Malthe.local

New files: 1
Modified files: 3
malthe.nielsen 4 years ago
parent
commit
50cb0b2ac8
4 changed files with 123 additions and 87 deletions
  1. 9 40
      preprocess.py
  2. 3 1
      registration.py
  3. 110 0
      runner.py
  4. 1 46
      tools.py

+ 9 - 40
preprocess.py

@@ -1,9 +1,5 @@
 from pathlib import Path
-import xmltodict
 import argparse
-
-import javabridge
-import bioformats
 import numpy as np
 import nibabel as nib
 from PIL import Image, ImageSequence
@@ -11,6 +7,9 @@ from scipy import ndimage
 from skimage import morphology, filters
 from nipype.interfaces.ants.segmentation import N4BiasFieldCorrection
 
+import warnings
+warnings.simplefilter(action='ignore', category=FutureWarning)
+
 
 def get_out_paths(out_dir, stem):
     out_path_scaled = Path(out_dir, f'{stem}.nii')
@@ -27,12 +26,6 @@ def bias_field_correction(image, output, dim=2):
 
 
 def image_data_to_nii(pixdim, image_data, shrink, out_dir, file_path, save_unscaled=False):
-    """
-    Saves an array of image data as a nifti file, with correct pixel dimensions in
-    :param save_unscaled: boolean whether to save unscaled, as well as scaled nifti file
-    :param shrink: factor by which to shrink image dimensions by
-    :return: location of scaled and possibly unscaled nifti files
-    """
     image_dim = (image_data.shape[0], image_data.shape[1])
     scale = 1/shrink
     new_dim = (round(image_dim[1] * scale), round(image_dim[0] * scale))
@@ -57,22 +50,8 @@ def image_data_to_nii(pixdim, image_data, shrink, out_dir, file_path, save_unsca
     return path_scaled, path_unscaled
 
 
-def nd2_to_nii(nd2_path, out_dir, series, shrink=10):
-    """
-    Wrapper function for image_data_to_nii, for converting nd2 files to nifti
-    """
-    javabridge.start_vm(bioformats.JARS)
-    image = np.array(bioformats.load_image(str(nd2_path), series=series-1, rescale=False))
-    meta_dict = xmltodict.parse(bioformats.get_omexml_metadata(str(nd2_path)))
-    vox_size_x = float(meta_dict['OME']['Image'][series-1]['Pixels']['@PhysicalSizeX'])
-    javabridge.kill_vm()
-    return image_data_to_nii(vox_size_x, image, shrink, out_dir, nd2_path)
-
-
 def tiff_to_nii(tif_path, out_dir, pixdim=None, shrink=10):
-    """
-    Wrapper function for image_data_to_nii, for converting tiff files to nifti
-    """
+    Image.MAX_IMAGE_PIXELS = None
     tif_image = Image.open(tif_path)
     tif_header = dict(tif_image.tag)
     output = np.empty(np.array(tif_image).shape + (0,))
@@ -85,13 +64,6 @@ def tiff_to_nii(tif_path, out_dir, pixdim=None, shrink=10):
 
 
 def split_nii_channels(nii_path, out_dir=None, flip=False, mask_index=-1, bias=False):
-    """
-    Converts a single multi-channel nifti file to multiple single-channel nifti files, and masks foreground
-    :param flip: Whether to vertically flip the image, in order to properly align nifti file with template
-    :param mask_index: index of DAPI stained channel, on which to mask other channels on
-    :param bias: whether to bias-field correct the image
-    :return: Location of multiple single-channel nifti files
-    """
     if out_dir is None:
         out_dir = nii_path.parent
     nii = nib.load(str(nii_path))
@@ -108,10 +80,8 @@ def split_nii_channels(nii_path, out_dir=None, flip=False, mask_index=-1, bias=F
 
         if flip:
             channel_data = np.flip(channel_data, 1)
-
         if i == mask_index:
             channel_data = mask_foreground(channel_data)
-            channel_data = channel_data/np.mean(channel_data)
 
         new_header = nii_header
         new_header['dim'][0] = 2
@@ -130,9 +100,6 @@ def split_nii_channels(nii_path, out_dir=None, flip=False, mask_index=-1, bias=F
 
 
 def mask_foreground(raw_data):
-    """
-        Mask the foreground of an image, using otsu threshold and connected components to remove background noise
-    """
     raw_max = raw_data.max()
     raw_data = raw_data / raw_max
     blurred_data = ndimage.gaussian_filter(raw_data, 4)
@@ -152,13 +119,13 @@ def mask_foreground(raw_data):
     closed = morphology.binary_closing(foreground_mask, selem=morphology.square(30))
     raw_data = np.where(closed, raw_data, 0)
     return raw_data * raw_max
-
-
+    
 if __name__ == '__main__':
     parser = argparse.ArgumentParser()
     parser.add_argument("file", help="Location of file to process")
     parser.add_argument("dir", help="Directory for preprocessed Files")
-    parser.add_argument("-s", type=int, help="Series to extract")
+    parser.add_argument("-s", "--series", type=int, help="Series to extract")
+    parser.add_argument('-b', type=bool, default= True, help='Bias field correct image')
     parser.add_argument('--pdim', type=float, help='Pixel dimensions (Retrieved from Tiff file if not set)')
     args = parser.parse_args()
 
@@ -167,3 +134,5 @@ if __name__ == '__main__':
         n_path = tiff_to_nii(Path(args.file), Path(args.dir))
     elif filetype == '.nd2':
         n_path = nd2_to_nii(Path(args.file), Path(args.dir), args.series)
+    elif filetype == '.nii':
+        n_path = Path(args.file)

+ 3 - 1
registration.py

@@ -1,7 +1,9 @@
 from nipype.interfaces import ants
+import warnings
+warnings.simplefilter(action='ignore', category=FutureWarning)
 
 
-# Parameters for a full registration (Affine,Syn)  --- SLOW ---
+# Parameters for a full registration (Affine,Syn)  --- SLOW --- 
 def full_registration(fixed, moving, output='full_trans.nii', transform_pre=''):
     ants.Registration(
         dimension=2,

+ 110 - 0
runner.py

@@ -0,0 +1,110 @@
+from pathlib import Path
+from pprint import pprint
+import argparse
+import sys
+import os
+import numpy as np
+import scipy.stats
+import nibabel as nib
+from nipype.interfaces import ants
+import registration
+import tools
+import preprocess
+
+from PIL import Image, ImageSequence
+import warnings
+warnings.simplefilter(action='ignore', category=FutureWarning)
+
+def find_optimal_index(slice_path, template_path, approx, temporary_directory, dist = 2):
+
+    weights = []
+    for i in range(approx-dist,approx+dist+1):
+        print(f'Slice index: {i}')
+        _slice = nib.load(str(slice_path))
+        template_slice_loc = str(temporary_directory / f't_slice{i}.nii')
+        registered_loc = str(temporary_directory / f'reg_slice{i}.nii')
+        transform_prefix = str(temporary_directory / f'{i}-')
+
+        tools.save_slice(str(template_path), template_slice_loc, i, np.eye(4), _slice.header)
+        registration.rigid_registration(template_slice_loc, str(slice_path), registered_loc, transform_prefix)
+
+        template_data = nib.load(str(template_slice_loc)).get_data()
+        registered_data = nib.load(registered_loc).get_data()
+        registered_data = tools.resize_im(registered_data, template_data)
+        nib.save(nib.Nifti1Image(registered_data, np.eye(4)), registered_loc)
+
+        mutualinfo = tools.mutual_info_mask(template_data, registered_data)
+        norm_factor = scipy.stats.norm.pdf(i-approx+dist, dist, dist*2) / scipy.stats.norm.pdf(dist, dist, dist*2)
+        dice_coef = tools.dice_coef(template_data, registered_data)
+        weights.append((i, norm_factor * (0.7 * mutualinfo + 0.3 * dice_coef)))
+
+    pprint(weights)
+    optimal = max(weights, key=lambda a: a[1])
+    print(optimal[0])
+    return optimal[0]
+
+def apply_transform(segmentation, fixed, index, out_dir, temporary_directory):
+
+    seg_slice_loc = out_dir / "segment_slice.nii" 
+    tools.save_slice(str(segmentation), str(seg_slice_loc), index, np.eye(4), nib.load(str(fixed)).header)
+    post_transform_loc = out_dir / f'{seg_slice_loc.stem}_t.nii'
+
+    template_slice_loc = str(temporary_directory / f't_slice{index}.nii')
+    registration.full_registration(str(template_slice_loc), str(fixed),
+                                   str(Path(out_dir, f'{fixed.stem}_template.nii')), str(out_dir / f'Final-'))
+                                                             
+    transform = [str(out_dir / f"Final-InverseComposite.h5")]
+    reverse_transform = ants.ApplyTransforms(
+            input_image=str(seg_slice_loc),
+            reference_image=str(fixed),
+            transforms=transform,
+            invert_transform_flags=[False],
+            interpolation='MultiLabel',
+            dimension=2,
+            output_image=str(post_transform_loc))
+    reverse_transform.run()
+
+    return post_transform_loc
+
+def generate_segmentation(slice_path, segment_path, template_path, approx_index, dapi, output_directory):     
+        
+    if slice_path.endswith('.tiff') or slice_path.endswith('.tif'):
+        Image.MAX_IMAGE_PIXELS = None
+        #  tif_image = Image.open(slice_path)
+        #  ch_nm = (list(tif_image.tag_v2[270].split('='))[2])
+        #  if int(ch_nm[0]) < dapi:
+        #      print(f'DAPI channel out of range, max range is {ch_nm[0]} channels')
+        #      sys.exit()
+        tif_path = preprocess.tiff_to_nii(slice_path, output_directory)
+        slice_path = tif_path[0]
+    elif not slice_path.endswith(".nii"):
+        print('Selected slice file was neither a nifti or a TIFF file. Please check slice file format')
+        sys.exit(2)
+        
+    out_subdir = Path(output_directory) / Path(slice_path).stem.replace(' ', '_').replace(',','_').replace('.','')
+    tools.create_dir(out_subdir)
+    temporary_directory = Path(output_directory) / Path(out_subdir) / 'tmp'
+    tools.create_dir(temporary_directory)
+    channel_paths = preprocess.split_nii_channels(slice_path, out_subdir, True, dapi-1)
+    optimal_index = find_optimal_index(channel_paths[dapi-1], template_path, approx_index, temporary_directory)
+    seg_loc = apply_transform(segment_path, channel_paths[dapi-1], optimal_index, out_subdir, temporary_directory)
+    return seg_loc
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument("sliceloc", metavar="Slice Location", help="Location of preprocessed slice")
+    parser.add_argument("segloc", metavar="Segmentation Location", help="Location of segmentation")
+    parser.add_argument("tloc", metavar="Template Location", help="Location of template file")
+    parser.add_argument("--approx", type = int, default=-1, help="Approximate index of slice relative to template file")
+    parser.add_argument("bregma", metavar = 'Bregma index', type = float, help="Approx bregma coordinates")
+    parser.add_argument("out", metavar = 'Output directory', default="output", help="Output directory")
+    parser.add_argument("--dapi", type=int, default=0, help="DAPI channel number, default is last channel")
+
+    args = parser.parse_args()
+    if args.approx == -1 and not args.bregma:
+        print("Error: Please specify an approximate location in the template, or a bregma coordinate of the slice")
+        sys.exit(2)
+    if args.bregma:
+        args.approx = tools.bregma_to_slice_index(args.bregma)
+    generate_segmentation(args.sliceloc, args.segloc, args.tloc, args.approx, args.dapi, args.out)
+    

+ 1 - 46
tools.py

@@ -8,8 +8,6 @@ from sklearn.metrics import mutual_info_score
 
 from preprocess import mask_foreground
 from pprint import pprint
-
-
 def save_slice(template, filename, index, affine=np.eye(4), header=None):
     _slice = nib.load(str(template))
     _slice_data = _slice.get_data()
@@ -22,30 +20,10 @@ def save_slice(template, filename, index, affine=np.eye(4), header=None):
     newimg.to_filename(str(filename))
     return filename
 
-
-# Functions for temporary directory
 def create_dir(name):
     if not os.path.exists(name):
         os.mkdir(name)
 
-
-# Functions for calculating mutual info of two slices
-def remove_dir(loc):
-    shutil.rmtree(loc)
-
-
-def remove_nan(input_array):
-    output_list = input_array.ravel()
-    output_list[np.isnan(output_list)] = 0
-    return output_list
-
-
-def mutual_info(slice1, slice2, bins=32):
-    slice1, slice2 = remove_nan(slice1), remove_nan(slice2)
-    hist = np.histogram2d(slice1, slice2, bins=bins)[0]
-    return mutual_info_score(None, None, contingency=hist)
-
-
 def mutual_info_mask(slice1,slice2, bins=32):
     slice1_masked = mask_foreground(slice1)
     slice2_masked = mask_foreground(slice2)
@@ -67,28 +45,5 @@ def resize_im(image_data, image_with_dimensions):
     resized = np.array(Image.fromarray(image_data).resize(dimensions))
     return resized
 
-
-def slice_num_to_index(slice_num):
-    index_dict = {
-        1: 175,
-        2: 154,
-        3: 123,
-        4: 112,
-        5: 93,
-        6: 75,
-        7: 31,
-        8: 7,
-        9: 119,
-        10: 100,
-        11: 83,
-        12: 71
-    }
-    return index_dict[slice_num]
-
-
 def bregma_to_slice_index(bregma):
-    return round(27.908*bregma + 116.831)
-
-
-def slice_index_to_bregma(slice_index):
-    return round(0.03564*slice_index - 4.168)
+   return round(27.908*bregma + 116.831)