| 0 | 1 #!/usr/bin/env python | 
|  | 2 import argparse | 
| 10 | 3 import os | 
| 3 | 4 import shutil | 
|  | 5 | 
| 0 | 6 import dipy.core.optimize as opt | 
|  | 7 import dipy.tracking.life as life | 
| 3 | 8 from dipy.data import fetch_stanford_t1, read_stanford_labels, read_stanford_t1 | 
|  | 9 from dipy.viz import fvtk | 
| 0 | 10 from dipy.viz.colormap import line_colors | 
| 3 | 11 | 
|  | 12 import matplotlib | 
|  | 13 import matplotlib.pyplot as plt | 
|  | 14 | 
| 0 | 15 from mpl_toolkits.axes_grid1 import AxesGrid | 
| 3 | 16 | 
|  | 17 import nibabel as nib | 
|  | 18 | 
|  | 19 import numpy as np | 
| 0 | 20 | 
|  | 21 parser = argparse.ArgumentParser() | 
| 10 | 22 parser.add_argument('--input_nifti1', dest='input_nifti1', help='Input nifti1 dataset') | 
|  | 23 parser.add_argument('--input_nifti1_files_path', dest='input_nifti1_files_path', help='Input nifti1 extra files path') | 
|  | 24 parser.add_argument('--input_nifti2', dest='input_nifti2', help='Input nifti2 dataset') | 
|  | 25 parser.add_argument('--input_nifti2_files_path', dest='input_nifti2_files_path', help='Input nifti2 extra files path') | 
|  | 26 parser.add_argument('--input_trackvis', dest='input_trackvis', help='Track Visualization Header dataset') | 
| 0 | 27 parser.add_argument('--output_life_candidates', dest='output_life_candidates', help='Output life candidates') | 
|  | 28 | 
|  | 29 args = parser.parse_args() | 
|  | 30 | 
| 10 | 31 # Get input data. | 
|  | 32 # TODO: do not hard-code 'stanford_hardi' | 
|  | 33 input_dir = 'stanford_hardi' | 
|  | 34 os.mkdir(input_dir) | 
|  | 35 # Copy the dRMI dataset (stanford_t1) files. | 
|  | 36 for f in os.listdir(args.input_nifti1_files_path): | 
|  | 37     shutil.copy(os.path.join(args.input_nifti1_files_path, f), input_dir) | 
|  | 38 # Copy the dRMI dataset and label map (stanford_hardi) files. | 
|  | 39 for f in os.listdir(args.input_nifti2_files_path): | 
|  | 40     shutil.copy(os.path.join(args.input_nifti2_files_path, f), input_dir) | 
|  | 41 | 
| 1 | 42 # We'll need to know where the corpus callosum is from these variables. | 
|  | 43 hardi_img, gtab, labels_img = read_stanford_labels() | 
|  | 44 labels = labels_img.get_data() | 
|  | 45 cc_slice = labels == 2 | 
|  | 46 t1 = read_stanford_t1() | 
|  | 47 t1_data = t1.get_data() | 
|  | 48 data = hardi_img.get_data() | 
| 0 | 49 | 
|  | 50 # Read the candidates from file in voxel space: | 
| 10 | 51 candidate_sl = [s[0] for s in nib.trackvis.read(args.input_trackvis, points_space='voxel')[0]] | 
| 0 | 52 # Visualize the initial candidate group of streamlines | 
|  | 53 # in 3D, relative to the anatomical structure of this brain. | 
|  | 54 candidate_streamlines_actor = fvtk.streamtube(candidate_sl, line_colors(candidate_sl)) | 
|  | 55 cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)], opacities=[1.]) | 
|  | 56 vol_actor = fvtk.slicer(t1_data) | 
|  | 57 vol_actor.display(40, None, None) | 
|  | 58 vol_actor2 = vol_actor.copy() | 
|  | 59 vol_actor2.display(None, None, 35) | 
|  | 60 # Add display objects to canvas. | 
|  | 61 ren = fvtk.ren() | 
|  | 62 fvtk.add(ren, candidate_streamlines_actor) | 
|  | 63 fvtk.add(ren, cc_ROI_actor) | 
|  | 64 fvtk.add(ren, vol_actor) | 
|  | 65 fvtk.add(ren, vol_actor2) | 
| 3 | 66 fvtk.record(ren, n_frames=1, out_path="life_candidates.png", size=(800, 800)) | 
|  | 67 shutil.move("life_candidates.png", args.output_life_candidates) |