Mercurial > repos > greg > linear_fascile_evaluation
comparison linear_fascile_evaluation.py @ 10:2de70534993d draft
Uploaded
author | greg |
---|---|
date | Thu, 30 Nov 2017 11:28:22 -0500 |
parents | eb03934e044f |
children |
comparison
equal
deleted
inserted
replaced
9:6ad10978f14d | 10:2de70534993d |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 import argparse | 2 import argparse |
3 import os | |
3 import shutil | 4 import shutil |
4 | 5 |
5 import dipy.core.optimize as opt | 6 import dipy.core.optimize as opt |
6 import dipy.tracking.life as life | 7 import dipy.tracking.life as life |
7 from dipy.data import fetch_stanford_t1, read_stanford_labels, read_stanford_t1 | 8 from dipy.data import fetch_stanford_t1, read_stanford_labels, read_stanford_t1 |
16 import nibabel as nib | 17 import nibabel as nib |
17 | 18 |
18 import numpy as np | 19 import numpy as np |
19 | 20 |
20 parser = argparse.ArgumentParser() | 21 parser = argparse.ArgumentParser() |
21 parser.add_argument('--input', dest='input', help='Track Visualization Header dataset') | 22 parser.add_argument('--input_nifti1', dest='input_nifti1', help='Input nifti1 dataset') |
23 parser.add_argument('--input_nifti1_files_path', dest='input_nifti1_files_path', help='Input nifti1 extra files path') | |
24 parser.add_argument('--input_nifti2', dest='input_nifti2', help='Input nifti2 dataset') | |
25 parser.add_argument('--input_nifti2_files_path', dest='input_nifti2_files_path', help='Input nifti2 extra files path') | |
26 parser.add_argument('--input_trackvis', dest='input_trackvis', help='Track Visualization Header dataset') | |
22 parser.add_argument('--output_life_candidates', dest='output_life_candidates', help='Output life candidates') | 27 parser.add_argument('--output_life_candidates', dest='output_life_candidates', help='Output life candidates') |
23 | 28 |
24 args = parser.parse_args() | 29 args = parser.parse_args() |
30 | |
31 # Get input data. | |
32 # TODO: do not hard-code 'stanford_hardi' | |
33 input_dir = 'stanford_hardi' | |
34 os.mkdir(input_dir) | |
35 # Copy the dRMI dataset (stanford_t1) files. | |
36 for f in os.listdir(args.input_nifti1_files_path): | |
37 shutil.copy(os.path.join(args.input_nifti1_files_path, f), input_dir) | |
38 # Copy the dRMI dataset and label map (stanford_hardi) files. | |
39 for f in os.listdir(args.input_nifti2_files_path): | |
40 shutil.copy(os.path.join(args.input_nifti2_files_path, f), input_dir) | |
25 | 41 |
26 # We'll need to know where the corpus callosum is from these variables. | 42 # We'll need to know where the corpus callosum is from these variables. |
27 hardi_img, gtab, labels_img = read_stanford_labels() | 43 hardi_img, gtab, labels_img = read_stanford_labels() |
28 labels = labels_img.get_data() | 44 labels = labels_img.get_data() |
29 cc_slice = labels == 2 | 45 cc_slice = labels == 2 |
30 fetch_stanford_t1() | |
31 t1 = read_stanford_t1() | 46 t1 = read_stanford_t1() |
32 t1_data = t1.get_data() | 47 t1_data = t1.get_data() |
33 data = hardi_img.get_data() | 48 data = hardi_img.get_data() |
34 | 49 |
35 # Read the candidates from file in voxel space: | 50 # Read the candidates from file in voxel space: |
36 candidate_sl = [s[0] for s in nib.trackvis.read(args.input, points_space='voxel')[0]] | 51 candidate_sl = [s[0] for s in nib.trackvis.read(args.input_trackvis, points_space='voxel')[0]] |
37 # Visualize the initial candidate group of streamlines | 52 # Visualize the initial candidate group of streamlines |
38 # in 3D, relative to the anatomical structure of this brain. | 53 # in 3D, relative to the anatomical structure of this brain. |
39 candidate_streamlines_actor = fvtk.streamtube(candidate_sl, line_colors(candidate_sl)) | 54 candidate_streamlines_actor = fvtk.streamtube(candidate_sl, line_colors(candidate_sl)) |
40 cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)], opacities=[1.]) | 55 cc_ROI_actor = fvtk.contour(cc_slice, levels=[1], colors=[(1., 1., 0.)], opacities=[1.]) |
41 vol_actor = fvtk.slicer(t1_data) | 56 vol_actor = fvtk.slicer(t1_data) |