# HG changeset patch
# User devteam
# Date 1395863287 14400
# Node ID e9b0c187700f2d19be86fac237b7805cc07672ee
Uploaded data manager definition.
diff -r 000000000000 -r e9b0c187700f README
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/README Wed Mar 26 15:48:07 2014 -0400
@@ -0,0 +1,1 @@
+TODO
diff -r 000000000000 -r e9b0c187700f data_manager/data_manager_sam_fasta_index_builder.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager/data_manager_sam_fasta_index_builder.py Wed Mar 26 15:48:07 2014 -0400
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+#Dan Blankenberg
+
+import json
+import optparse
+import os
+import subprocess
+import sys
+import tempfile
+
+CHUNK_SIZE = 2**20
+
+DEFAULT_DATA_TABLE_NAME = "fasta_indexes"
+
+def get_id_name( params, dbkey, fasta_description=None):
+ #TODO: ensure sequence_id is unique and does not already appear in location file
+ sequence_id = params['param_dict']['sequence_id']
+ if not sequence_id:
+ sequence_id = dbkey
+
+ sequence_name = params['param_dict']['sequence_name']
+ if not sequence_name:
+ sequence_name = fasta_description
+ if not sequence_name:
+ sequence_name = dbkey
+ return sequence_id, sequence_name
+
+def build_sam_index( data_manager_dict, fasta_filename, target_directory, dbkey, sequence_id, sequence_name, data_table_name=DEFAULT_DATA_TABLE_NAME ):
+ #TODO: allow multiple FASTA input files
+ fasta_base_name = os.path.split( fasta_filename )[-1]
+ sym_linked_fasta_filename = os.path.join( target_directory, fasta_base_name )
+ os.symlink( fasta_filename, sym_linked_fasta_filename )
+
+ args = [ 'samtools', 'faidx' ]
+ args.append( sym_linked_fasta_filename )
+ tmp_stderr = tempfile.NamedTemporaryFile( prefix = "tmp-data-manager-sam_fa_index_builder-stderr" )
+ proc = subprocess.Popen( args=args, shell=False, cwd=target_directory, stderr=tmp_stderr.fileno() )
+ return_code = proc.wait()
+ if return_code:
+ tmp_stderr.flush()
+ tmp_stderr.seek( 0 )
+ sys.stderr.write( "Error building index:\n" )
+ while True:
+ chunk = tmp_stderr.read( CHUNK_SIZE )
+ if not chunk:
+ break
+ sys.stderr.write( chunk )
+ sys.exit( return_code )
+ tmp_stderr.close()
+ data_table_entry = dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name )
+ _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry )
+
+def _add_data_table_entry( data_manager_dict, data_table_name, data_table_entry ):
+ data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
+ data_manager_dict['data_tables'][ data_table_name ] = data_manager_dict['data_tables'].get( data_table_name, [] )
+ data_manager_dict['data_tables'][ data_table_name ].append( data_table_entry )
+ return data_manager_dict
+
+def main():
+ #Parse Command Line
+ parser = optparse.OptionParser()
+ parser.add_option( '-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename' )
+ parser.add_option( '-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey' )
+ parser.add_option( '-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description' )
+ parser.add_option( '-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name' )
+ (options, args) = parser.parse_args()
+
+ filename = args[0]
+
+ params = json.loads( open( filename ).read() )
+ target_directory = params[ 'output_data' ][0]['extra_files_path']
+ os.mkdir( target_directory )
+ data_manager_dict = {}
+
+ if options.fasta_dbkey in [ None, '', '?' ]:
+ raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( options.fasta_dbkey ) )
+
+ sequence_id, sequence_name = get_id_name( params, dbkey=options.fasta_dbkey, fasta_description=options.fasta_description )
+
+ #build the index
+ build_sam_index( data_manager_dict, options.fasta_filename, target_directory, options.fasta_dbkey, sequence_id, sequence_name, data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME )
+
+ #save info to json file
+ open( filename, 'wb' ).write( json.dumps( data_manager_dict ) )
+
+if __name__ == "__main__": main()
diff -r 000000000000 -r e9b0c187700f data_manager/data_manager_sam_fasta_index_builder.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager/data_manager_sam_fasta_index_builder.xml Wed Mar 26 15:48:07 2014 -0400
@@ -0,0 +1,25 @@
+
+ builder
+
+ samtools
+
+ data_manager_sam_fasta_index_builder.py "${out_file}" --fasta_filename "${all_fasta_source.fields.path}" --fasta_dbkey "${all_fasta_source.fields.dbkey}" --fasta_description "${all_fasta_source.fields.name}" --data_table_name "fasta_indexes"
+
+
+
+
+
+
+
+
+
+
+
+
+
+.. class:: infomark
+
+**Notice:** If you leave name, description, or id blank, it will be generated automatically.
+
+
+
diff -r 000000000000 -r e9b0c187700f data_manager_conf.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/data_manager_conf.xml Wed Mar 26 15:48:07 2014 -0400
@@ -0,0 +1,22 @@
+
+
+
+
+
+
+
+
+
+
diff -r 000000000000 -r e9b0c187700f tool-data/all_fasta.loc.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool-data/all_fasta.loc.sample Wed Mar 26 15:48:07 2014 -0400
@@ -0,0 +1,18 @@
+#This file lists the locations and dbkeys of all the fasta files
+#under the "genome" directory (a directory that contains a directory
+#for each build). The script extract_fasta.py will generate the file
+#all_fasta.loc. This file has the format (white space characters are
+#TAB characters):
+#
+#
+#
+#So, all_fasta.loc could look something like this:
+#
+#apiMel3 apiMel3 Honeybee (Apis mellifera): apiMel3 /path/to/genome/apiMel3/apiMel3.fa
+#hg19canon hg19 Human (Homo sapiens): hg19 Canonical /path/to/genome/hg19/hg19canon.fa
+#hg19full hg19 Human (Homo sapiens): hg19 Full /path/to/genome/hg19/hg19full.fa
+#
+#Your all_fasta.loc file should contain an entry for each individual
+#fasta file. So there will be multiple fasta files for each build,
+#such as with hg19 above.
+#
diff -r 000000000000 -r e9b0c187700f tool-data/fasta_indexes.loc.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool-data/fasta_indexes.loc.sample Wed Mar 26 15:48:07 2014 -0400
@@ -0,0 +1,29 @@
+#This is a sample file distributed with Galaxy that enables tools
+#to use a directory of Samtools indexed sequences data files. You will need
+#to create these data files and then create a fasta_indexes.loc file
+#similar to this one (store it in this directory) that points to
+#the directories in which those files are stored. The fasta_indexes.loc
+#file has this format (white space characters are TAB characters):
+#
+#
+#
+#So, for example, if you had hg19 Canonical indexed stored in
+#
+# /depot/data2/galaxy/hg19/sam/,
+#
+#then the fasta_indexes.loc entry would look like this:
+#
+#hg19canon hg19 Human (Homo sapiens): hg19 Canonical /depot/data2/galaxy/hg19/sam/hg19canon.fa
+#
+#and your /depot/data2/galaxy/hg19/sam/ directory
+#would contain hg19canon.fa and hg19canon.fa.fai files.
+#
+#Your fasta_indexes.loc file should include an entry per line for
+#each index set you have stored. The file in the path does actually
+#exist, but it should never be directly used. Instead, the name serves
+#as a prefix for the index file. For example:
+#
+#hg18canon hg18 Human (Homo sapiens): hg18 Canonical /depot/data2/galaxy/hg18/sam/hg18canon.fa
+#hg18full hg18 Human (Homo sapiens): hg18 Full /depot/data2/galaxy/hg18/sam/hg18full.fa
+#hg19canon hg19 Human (Homo sapiens): hg19 Canonical /depot/data2/galaxy/hg19/sam/hg19canon.fa
+#hg19full hg19 Human (Homo sapiens): hg19 Full /depot/data2/galaxy/hg19/sam/hg19full.fa
diff -r 000000000000 -r e9b0c187700f tool_data_table_conf.xml.sample
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool_data_table_conf.xml.sample Wed Mar 26 15:48:07 2014 -0400
@@ -0,0 +1,12 @@
+
+
+
+
+ value, dbkey, name, path
+
+
+
+ value, dbkey, name, path
+
+
+
diff -r 000000000000 -r e9b0c187700f tool_dependencies.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tool_dependencies.xml Wed Mar 26 15:48:07 2014 -0400
@@ -0,0 +1,6 @@
+
+
+
+
+
+