# HG changeset patch # User dave # Date 1527210689 14400 # Node ID a2a04db1bbf3dcb50c3bea1f6b7324d08a75a09a Uploaded diff -r 000000000000 -r a2a04db1bbf3 .shed.yml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/.shed.yml Thu May 24 21:11:29 2018 -0400 @@ -0,0 +1,10 @@ +categories: +- Data Managers +description: Data Manager for building BWA (0.6+) indexes +homepage_url: '' +long_description: | + Data Manager for building BWA (0.6+) indexes. +name: data_manager_bwa_mem_index_builder +owner: devteam +remote_repository_url: https://github.com/galaxyproject/tools-iuc/tree/master/data_managers/data_manager_bwa_mem_index_builder +type: unrestricted diff -r 000000000000 -r a2a04db1bbf3 data_manager/mummer4_index_builder.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/data_manager/mummer4_index_builder.py Thu May 24 21:11:29 2018 -0400 @@ -0,0 +1,91 @@ +#!/usr/bin/env python +# Dan Blankenberg +from __future__ import print_function + +import optparse +import os +import subprocess +import sys +from json import dumps, loads + +CHUNK_SIZE = 2**20 +TWO_GB = 2**30 * 2 +DEFAULT_DATA_TABLE_NAME = "mummer4_indexes" + + +def get_id_name( params, dbkey, fasta_description=None): + # TODO: ensure sequence_id is unique and does not already appear in location file + sequence_id = params['param_dict']['sequence_id'] + if not sequence_id: + sequence_id = dbkey + + sequence_name = params['param_dict']['sequence_name'] + if not sequence_name: + sequence_name = fasta_description + if not sequence_name: + sequence_name = dbkey + return sequence_id, sequence_name + + +def build_mummer4_index( data_manager_dict, fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=DEFAULT_DATA_TABLE_NAME ): + # TODO: allow multiple FASTA input files + fasta_base_name = os.path.split( fasta_filename )[-1] + sym_linked_fasta_filename = os.path.join( target_directory, fasta_base_name ) + os.symlink( fasta_filename, sym_linked_fasta_filename ) + if params['param_dict']['index_algorithm'] == 'automatic': + if os.stat( fasta_filename ).st_size < TWO_GB: # use 2 GB as cut off for memory vs. max of 2gb database size; this is somewhat arbitrary + index_algorithm = 'is' + else: + index_algorithm = 'bwtsw' + else: + index_algorithm = params['param_dict']['index_algorithm'] + + args = [ 'mummer4', '-save', dbkey, sym_linked_fasta_filename, sym_linked_fasta_filename ] + args.append( sym_linked_fasta_filename ) + proc = subprocess.Popen( args=args, shell=False, cwd=target_directory ) + return_code = proc.wait() + if return_code: + print("Error building index.", file=sys.stderr) + sys.exit( return_code ) + data_table_entry = dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name ) + _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ) + + +def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ): + data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} ) + data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] ) + data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry ) + return data_manager_dict + + +def main(): + parser = optparse.OptionParser() + parser.add_option( '-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' ) + parser.add_option( '-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' ) + parser.add_option( '-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' ) + parser.add_option( '-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' ) + (options, args) = parser.parse_args() + + filename = args[0] + + params = loads( open( filename ).read() ) + target_directory = params[ 'output_data' ][0]['extra_files_path'] + os.mkdir( target_directory ) + data_manager_dict = {} + + dbkey = options.fasta_dbkey + + if dbkey in [ None, '', '?' ]: + raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) ) + + sequence_id, sequence_name = get_id_name( params, dbkey=dbkey, fasta_description=options.fasta_description ) + + # build the index + build_mummer4_index( data_manager_dict, options.fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME ) + + # save info to json file + open( filename, 'wb' ).write( dumps( data_manager_dict ) ) + + +if __name__ == "__main__": + main() diff -r 000000000000 -r a2a04db1bbf3 data_manager/mummer4_index_builder.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/data_manager/mummer4_index_builder.xml Thu May 24 21:11:29 2018 -0400 @@ -0,0 +1,30 @@ + + builder + + mummer4 + + + + + + + + + + + + + + +.. class:: infomark + +**Notice:** If you leave name, description, or id blank, it will be generated automatically. + + diff -r 000000000000 -r a2a04db1bbf3 data_manager_conf.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/data_manager_conf.xml Thu May 24 21:11:29 2018 -0400 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + ${dbkey}/mummer4_index/${value} + + ${GALAXY_DATA_MANAGER_DATA_PATH}/${dbkey}/mummer4_index/${value}/${path} + abspath + + + + + + diff -r 000000000000 -r a2a04db1bbf3 tool-data/all_fasta.loc.sample --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool-data/all_fasta.loc.sample Thu May 24 21:11:29 2018 -0400 @@ -0,0 +1,18 @@ +#This file lists the locations and dbkeys of all the fasta files +#under the "genome" directory (a directory that contains a directory +#for each build). The script extract_fasta.py will generate the file +#all_fasta.loc. This file has the format (white space characters are +#TAB characters): +# +# +# +#So, all_fasta.loc could look something like this: +# +#apiMel3 apiMel3 Honeybee (Apis mellifera): apiMel3 /path/to/genome/apiMel3/apiMel3.fa +#hg19canon hg19 Human (Homo sapiens): hg19 Canonical /path/to/genome/hg19/hg19canon.fa +#hg19full hg19 Human (Homo sapiens): hg19 Full /path/to/genome/hg19/hg19full.fa +# +#Your all_fasta.loc file should contain an entry for each individual +#fasta file. So there will be multiple fasta files for each build, +#such as with hg19 above. +# diff -r 000000000000 -r a2a04db1bbf3 tool-data/mummer4_index.loc.sample --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool-data/mummer4_index.loc.sample Thu May 24 21:11:29 2018 -0400 @@ -0,0 +1,38 @@ +#This is a sample file distributed with Galaxy that enables tools +#to use a directory of BWA indexed sequences data files. You will need +#to create these data files and then create a bwa_index.loc file +#similar to this one (store it in this directory) that points to +#the directories in which those files are stored. The bwa_index.loc +#file has this format (longer white space characters are TAB characters): +# +# +# +#So, for example, if you had phiX indexed stored in +#/depot/data2/galaxy/phiX/base/, +#then the bwa_index.loc entry would look like this: +# +#phiX174 phiX phiX Pretty /depot/data2/galaxy/phiX/base/phiX.fa +# +#and your /depot/data2/galaxy/phiX/base/ directory +#would contain phiX.fa.* files: +# +#-rw-r--r-- 1 james universe 830134 2005-09-13 10:12 phiX.fa.amb +#-rw-r--r-- 1 james universe 527388 2005-09-13 10:12 phiX.fa.ann +#-rw-r--r-- 1 james universe 269808 2005-09-13 10:12 phiX.fa.bwt +#...etc... +# +#Your bwa_index.loc file should include an entry per line for each +#index set you have stored. The "file" in the path does not actually +#exist, but it is the prefix for the actual index files. For example: +# +#phiX174 phiX phiX174 /depot/data2/galaxy/phiX/base/phiX.fa +#hg18canon hg18 hg18 Canonical /depot/data2/galaxy/hg18/base/hg18canon.fa +#hg18full hg18 hg18 Full /depot/data2/galaxy/hg18/base/hg18full.fa +#/orig/path/hg19.fa hg19 hg19 /depot/data2/galaxy/hg19/base/hg19.fa +#...etc... +# +#Note that for backwards compatibility with workflows, the unique ID of +#an entry must be the path that was in the original loc file, because that +#is the value stored in the workflow for that parameter. That is why the +#hg19 entry above looks odd. New genomes can be better-looking. +# diff -r 000000000000 -r a2a04db1bbf3 tool_data_table_conf.xml.sample --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool_data_table_conf.xml.sample Thu May 24 21:11:29 2018 -0400 @@ -0,0 +1,12 @@ + + + + value, dbkey, name, path + +
+ + + value, dbkey, name, path + +
+