# HG changeset patch # User devteam # Date 1426797677 14400 # Node ID 9a024421602a125dad56c9b82400c759609dbd9e Uploaded diff -r 000000000000 -r 9a024421602a .shed.yml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/.shed.yml Thu Mar 19 16:41:17 2015 -0400 @@ -0,0 +1,4 @@ +# repository published to https://toolshed.g2.bx.psu.edu/repos/devteam/data_manager_picard_index_builder +owner: devteam +name: data_manager_picard_index_builder + diff -r 000000000000 -r 9a024421602a data_manager/picard_index_builder.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/data_manager/picard_index_builder.py Thu Mar 19 16:41:17 2015 -0400 @@ -0,0 +1,77 @@ +#!/usr/bin/env python +#Dan Blankenberg + +import sys +import os +import optparse +import subprocess + +from json import loads, dumps + + +DEFAULT_DATA_TABLE_NAME = "picard_indexes" + +def get_id_name( params, dbkey, fasta_description=None): + #TODO: ensure sequence_id is unique and does not already appear in location file + sequence_id = params['param_dict']['sequence_id'] + if not sequence_id: + sequence_id = dbkey + + sequence_name = params['param_dict']['sequence_name'] + if not sequence_name: + sequence_name = fasta_description + if not sequence_name: + sequence_name = dbkey + return sequence_id, sequence_name + +def build_picard_index( data_manager_dict, fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=DEFAULT_DATA_TABLE_NAME ): + #TODO: allow multiple FASTA input files + fasta_base_name = os.path.split( fasta_filename )[-1] + prefix = os.path.splitext( fasta_base_name )[0] + sym_linked_fasta_filename = os.path.join( target_directory, fasta_base_name ) + os.symlink( fasta_filename, sym_linked_fasta_filename ) + args = [ 'java', '-Xmx4G', '-jar', os.path.join( os.environ[ 'JAVA_JAR_PATH' ], 'CreateSequenceDictionary.jar' ), 'R=%s' % sym_linked_fasta_filename, 'O=%s.dict' % prefix ] + proc = subprocess.Popen( args=args, shell=False, cwd=target_directory ) + return_code = proc.wait() + if return_code: + print >> sys.stderr, "Error building index." + sys.exit( return_code ) + data_table_entry = dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name ) + _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ) + +def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ): + data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} ) + data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] ) + data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry ) + return data_manager_dict + +def main(): + #Parse Command Line + parser = optparse.OptionParser() + parser.add_option( '-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' ) + parser.add_option( '-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' ) + parser.add_option( '-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' ) + parser.add_option( '-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' ) + (options, args) = parser.parse_args() + + filename = args[0] + + params = loads( open( filename ).read() ) + target_directory = params[ 'output_data' ][0]['extra_files_path'] + os.mkdir( target_directory ) + data_manager_dict = {} + + dbkey = options.fasta_dbkey + + if dbkey in [ None, '', '?' ]: + raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) ) + + sequence_id, sequence_name = get_id_name( params, dbkey=dbkey, fasta_description=options.fasta_description ) + + #build the index + build_picard_index( data_manager_dict, options.fasta_filename, params, target_directory, dbkey, sequence_id, sequence_name, data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME ) + + #save info to json file + open( filename, 'wb' ).write( dumps( data_manager_dict ) ) + +if __name__ == "__main__": main() diff -r 000000000000 -r 9a024421602a data_manager/picard_index_builder.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/data_manager/picard_index_builder.xml Thu Mar 19 16:41:17 2015 -0400 @@ -0,0 +1,29 @@ + + builder + + picard + + picard_index_builder.py "${out_file}" --fasta_filename "${all_fasta_source.fields.path}" --fasta_dbkey "${all_fasta_source.fields.dbkey}" --fasta_description "${all_fasta_source.fields.name}" --data_table_name "picard_indexes" + + + + + + + + + + + + + + + + + +.. class:: infomark + +**Notice:** If you leave name, description, or id blank, it will be generated automatically. + + + diff -r 000000000000 -r 9a024421602a data_manager_conf.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/data_manager_conf.xml Thu Mar 19 16:41:17 2015 -0400 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + ${dbkey}/picard_index/${value} + + ${GALAXY_DATA_MANAGER_DATA_PATH}/${dbkey}/picard_index/${value}/${path} + abspath + + + + + + diff -r 000000000000 -r 9a024421602a tool-data/picard_index.loc.sample --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool-data/picard_index.loc.sample Thu Mar 19 16:41:17 2015 -0400 @@ -0,0 +1,26 @@ +#This is a sample file distributed with Galaxy that enables tools +#to use a directory of Picard dict and associated files. You will need +#to create these data files and then create a picard_index.loc file +#similar to this one (store it in this directory) that points to +#the directories in which those files are stored. The picard_index.loc +#file has this format (longer white space is the TAB character): +# +# +# +#So, for example, if you had hg18 indexed and stored in +#/depot/data2/galaxy/srma/hg18/, +#then the srma_index.loc entry would look like this: +# +#hg18 hg18 hg18 Pretty /depot/data2/galaxy/picard/hg18/hg18.fa +# +#and your /depot/data2/galaxy/srma/hg18/ directory +#would contain the following three files: +#hg18.fa +#hg18.dict +#hg18.fa.fai +# +#The dictionary file for each reference (ex. hg18.dict) must be +#created via Picard (http://picard.sourceforge.net). Note that +#the dict file does not have the .fa extension although the +#path list in the loc file does include it. +# diff -r 000000000000 -r 9a024421602a tool_data_table_conf.xml.sample --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool_data_table_conf.xml.sample Thu Mar 19 16:41:17 2015 -0400 @@ -0,0 +1,12 @@ + + + + value, dbkey, name, path + +
+ + + value, dbkey, name, path + +
+
\ No newline at end of file diff -r 000000000000 -r 9a024421602a tool_dependencies.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tool_dependencies.xml Thu Mar 19 16:41:17 2015 -0400 @@ -0,0 +1,6 @@ + + + + + +