# HG changeset patch # User greg # Date 1488292794 18000 # Node ID a0c347192b081e05a14d5a10e4b2978963bdf2bf # Parent c2d2ca6cf94b8e9c77fc2f7bcdac519cc48252e1 Uploaded diff -r c2d2ca6cf94b -r a0c347192b08 phylogenomics_analysis.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/phylogenomics_analysis.py Tue Feb 28 09:39:54 2017 -0500 @@ -0,0 +1,184 @@ +#!/usr/bin/env python +import argparse +import os +import shutil +import subprocess +import sys +import tempfile + +BUFF_SIZE = 1048576 +OUTPUT_DIR = 'phylogenomicsAnalysis_dir' + +parser = argparse.ArgumentParser() + +parser.add_argument('--orthogroup_faa', dest='orthogroup_faa', help="Input dataset files_path") +parser.add_argument('--scaffold', dest='scaffold', default='mode', help='Orthogroups or gene families proteins scaffold') +parser.add_argument('--method', dest='method', help='Protein clustering method') +parser.add_argument('--config_dir', dest='config_dir', help='Directory containing default configuration files') +parser.add_argument('--num_threads', dest='num_threads', type=int, help='Number of threads to use for execution') +parser.add_argument('--orthogroup_fna', dest='orthogroup_fna', default=None, help="Flag for coding sequences associated with orthogroups") +parser.add_argument('--sequence_type', dest='sequence_type', default=None, help="Sequence type used in the phylogenetic inference") +parser.add_argument('--alignments_method', dest='alignments_method', default=None, help='Multiple sequence alignments method') +parser.add_argument('--pasta_script_path', dest='pasta_script_path', default=None, help='Path to script for executing pasta') +parser.add_argument('--pasta_iter_limit', dest='pasta_iter_limit', type=int, default=None, help='"Maximum number of iteration that the PASTA algorithm will execute') +parser.add_argument('--tree_inference', dest='single_copy_custom', default=None, help='Phylogenetic trees inference method') +parser.add_argument('--rooting_order', dest='rooting_order', default=None, help='Rooting order configuration for rooting trees') +parser.add_argument('--bootstrap_replicates', dest='bootstrap_replicates', type=int, default=None, help='Number of replicates for rapid bootstrap analysis') +parser.add_argument('--max_orthogroup_size', dest='max_orthogroup_size', type=int, default=None, help='Maximum number of sequences in orthogroup alignments') +parser.add_argument('--min_orthogroup_size', dest='min_orthogroup_size', type=int, default=None, help='Minimum number of sequences in orthogroup alignments') +parser.add_argument('--remove_sequences', dest='remove_sequences', default=None, type=float, help='Remove sequences with gaps of') +parser.add_argument('--trim_type', dest='trim_type', default=None, help='Process used for gap trimming') +parser.add_argument('--gap_trimming', dest='gap_trimming', default=None, type=float, help='Remove sites in alignments with gaps of') +parser.add_argument('--output_ptortho', dest='output_ptortho', default=None, help='Output for orthogroups') +parser.add_argument('--output_ptortho_dir', dest='output_ptortho_dir', default=None, help='output_ptortho.files_path') +parser.add_argument('--output_ptorthocs', dest='output_ptorthocs', default=None, help='Output for orthogroups with corresponding coding sequences') +parser.add_argument('--output_ptorthocs_dir', dest='output_ptorthocs_dir', default=None, help='output_ptorthocs.files_path') +parser.add_argument('--output_aln', dest='output_aln', default=None, help='Output for orthogroups alignments') +parser.add_argument('--output_aln_dir', dest='output_aln_dir', default=None, help='output_aln.files_path') +parser.add_argument('--output_tree', dest='output_tree', default=None, help='Output for phylogenetic trees') +parser.add_argument('--output_tree_dir', dest='output_tree_dir', default=None, help='output_tree.files_path') + +args = parser.parse_args() + + +def get_stderr_exception(tmp_err, tmp_stderr, tmp_out, tmp_stdout, include_stdout=False): + tmp_stderr.close() + # Get stderr, allowing for case where it's very large. + tmp_stderr = open(tmp_err, 'rb') + stderr_str = '' + buffsize = BUFF_SIZE + try: + while True: + stderr_str += tmp_stderr.read(buffsize) + if not stderr_str or len(stderr_str) % buffsize != 0: + break + except OverflowError: + pass + tmp_stderr.close() + if include_stdout: + tmp_stdout = open(tmp_out, 'rb') + stdout_str = '' + buffsize = BUFF_SIZE + try: + while True: + stdout_str += tmp_stdout.read(buffsize) + if not stdout_str or len(stdout_str) % buffsize != 0: + break + except OverflowError: + pass + tmp_stdout.close() + if include_stdout: + return 'STDOUT\n%s\n\nSTDERR\n%s\n' % (stdout_str, stderr_str) + return stderr_str + + +def move_directory_files(source_dir, destination_dir): + source_directory = os.path.abspath(source_dir) + destination_directory = os.path.abspath(destination_dir) + if not os.path.isdir(destination_directory): + os.makedirs(destination_directory) + for dir_entry in os.listdir(source_directory): + source_entry = os.path.join(source_directory, dir_entry) + shutil.move(source_entry, destination_directory) + + +def stop_err(msg): + sys.stderr.write(msg) + sys.exit(1) + + +def write_html_output(output, title, dir): + with open(output, 'w') as fh: + fh.write('
Size | Name |
---|---|
%s | %s |