changeset 60:f17bdf6f27bd

Deleted selected files
author bgruening
date Mon, 26 Aug 2013 17:13:58 -0400
parents b2e673e1db33
children 797c3e358588
files README.rst datatypes_conf.xml homer.py tool-data/homer.loc.sample tool-data/homer_available_genomes.loc.sample tool_dependencies.xml tools/findMotifsGenome.xml tools/findPeaks.xml tools/homer_macros.xml tools/makeTagDirectory.xml
diffstat 10 files changed, 0 insertions(+), 595 deletions(-) [+]
line wrap: on
line diff
--- a/README.rst	Mon Aug 26 17:13:45 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-Galaxy datatypes for HOMER tools
-================================
-
-These HOMER datatypes are copyright 2013 by Björn Grüning.
-
-See the licence text below.
-
-
-History
-=======
-
-======= ======================================================================
-Version Changes
-------- ----------------------------------------------------------------------
-v0.0.1  - First release.
-======= ======================================================================
-
-
-Installation
-============
-
-Doing this automatically via the Galaxy Tool Shed is probably simplest.
-
-
-Manual Installation
-===================
-
-Normally you would install this via the Galaxy ToolShed, which would move
-the provided homer.py file into a suitable location and process the
-datatypes_conf.xml entry to be combined with your local configuration.
-
-However, if you really want to this should work for a manual install. Add
-the following lines to the datatypes_conf.xml file in the Galaxy main folder::
-
-    <datatype extension="homer_tagdir" type="galaxy.datatypes.homer:TagDirectory" mimetype="text/html" display_in_upload="false"/>
-
-Also create the file lib/galaxy/datatypes/homer.py by moving, copying or linking
-the homer.py file provided in this tar-ball.  Finally add 'import homer' near
-the start of file lib/galaxy/datatypes/registry.py (after the other import
-lines).
-
-
-Bug Reports
-===========
-
-You can file an issue here https://github.com/bgruening/galaxytools/issues or ask
-us on the Galaxy development list http://lists.bx.psu.edu/listinfo/galaxy-dev
-
-
-Developers
-==========
-
-Development is happening here:
-
-    https://github.com/bgruening/galaxytools/
-
-
-Licence (MIT)
-=============
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-NOTE: This is the licence for the Galaxy HOMER datatypes **only**. HOMER
-and associated data files are available and licenced separately.
--- a/datatypes_conf.xml	Mon Aug 26 17:13:45 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,11 +0,0 @@
-<?xml version="1.0"?>
-<datatypes>
-    <datatype_files>
-        <datatype_file name="homer.py"/>
-    </datatype_files>
-    <registration>
-        <datatype extension="homer_tagdir" type="galaxy.datatypes.homer:TagDirectory" mimetype="text/html" display_in_upload="false"/>
-    </registration>
-    <sniffers>
-    </sniffers>
-</datatypes>
--- a/homer.py	Mon Aug 26 17:13:45 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-"""
-HOMER special datatypes
-"""
-import os
-from galaxy.datatypes.data import get_file_peek
-from galaxy.datatypes.data import Text, Data
-from galaxy.datatypes.metadata import MetadataElement
-from galaxy.datatypes.images import Html
-
-
-class TagDirectory( Html ):
-    """Base class for HOMER's Tag Directory datatype."""
-
-    file_ext = 'homer_tagdir'
-    composite_type = 'auto_primary_file'
-    allow_datatype_change = False
-
-    def __init__(self, **kwd):
-        Html.__init__( self, **kwd )
-        #self.add_composite_file('tagInfo.txt', description = 'basic configuration information', mimetype = 'text/html') # Contains basic configuration information
-        self.add_composite_file('tagLengthDistribution.txt', description = 'histogram of read lengths used for alignment', mimetype = 'text/html') # File contains a histogram of read lengths used for alignment.
-        self.add_composite_file('tagCountDistribution.txt', description = 'histogram of clonal read depth, showing the number of reads per unique position', mimetype = 'text/html') # File contains a histogram of clonal read depth, showing the number of reads per unique position.
-        self.add_composite_file('tagAutocorrelation.txt', description = 'distribution of distances between adjacent reads in the genome', mimetype = 'text/html') # The autocorrelation routine creates a distribution of distances between adjacent reads in the genome.
-        self.add_composite_file('tagFreq.txt', description = "nucleotide and dinucleotide frequencies as a function of distance from the 5' end of all reads", mimetype = 'text/html', optional=True) # Calculates the nucleotide and dinucleotide frequencies as a function of distance from the 5' end of all reads.
-        self.add_composite_file('tagFreqUniq.txt', description = "nucleotide and dinucleotide frequencies as a function of distance from the 5' end of all reads (counted only once)", mimetype = 'text/html', optional=True) # Same as tagFreq.txt, however individual genomic positions are only counted once.
-        self.add_composite_file('tagGCcontent.txt', description = 'Distribution of fragment GC%-content', mimetype = 'text/html', optional=True) # Distribution of fragment GC%-content.
-        self.add_composite_file('genomeGCcontent.txt', description = 'Distribution of fragment GC%-content at each location in the genome', mimetype = 'text/html', optional=True) # Distribution of fragment GC%-content at each location in the genome.
-
-
-    def regenerate_primary_file(self,dataset):
-        """
-            regenerate the index file after metadata generation
-        """
-        rval = ['<html><head><title>HOMER database files</title></head>']
-        rval.append('<body>')
-        rval.append('<p/>CuffDiff Outputs:<p/><ul>')
-        for fname in os.listdir(dataset.extra_files_path):
-            sfname = os.path.split(fname)[-1]
-            rval.append( '<li><a href="%s" type="text/html">%s</a>' % ( sfname, sfname ) )
-        rval.append( '</ul></body></html>' )
-        f = file( dataset.file_name, 'w' )
-        f.write( '%s\n' % '\n'.join( rval ) )
-        f.close()
-        if not dataset.info:
-            dataset.info = 'HOMER datatype object'
-        if not dataset.blurb:
-            dataset.blurb = 'Composite file - HOMER'
-        return True
-
-    def generate_primary_file( self, dataset = None ):
-        rval = ['<html><head><title>HOMER database files</title></head><ul>']
-        for composite_name, composite_file in self.get_composite_files( dataset = dataset ).iteritems():
-            opt_text = ''
-            if composite_file.optional:
-                opt_text = ' (optional)'
-            rval.append( '<li><a href="%s">%s</a>%s' % ( composite_name, composite_name, opt_text ) )
-        rval.append( '</ul></html>' )
-        return "\n".join( rval )
-
-    def set_meta( self, dataset, **kwd ):
-        Html.set_meta( self, dataset, **kwd )
-        self.regenerate_primary_file(dataset)
-
-
-    def display_data(self, trans, data, preview=False, filename=None,
-                     to_ext=None, size=None, offset=None, **kwd):
-        """Apparently an old display method, but still gets called.
-
-        This allows us to format the data shown in the central pane via the "eye" icon.
-        """
-        return "This is a HOMER database."
-
-    def set_peek( self, dataset, is_multi_byte=False ):
-        """Set the peek and blurb text."""
-        if not dataset.dataset.purged:
-            dataset.peek  = "HOMER database (multiple files)"
-            dataset.blurb = "HOMER database (multiple files)"
-        else:
-            dataset.peek = 'file does not exist'
-            dataset.blurb = 'file purged from disk'
-
-    def display_peek( self, dataset ):
-        """Create HTML content, used for displaying peek."""
-        try:
-            return dataset.peek
-        except:
-            return "HOMER database (multiple files)"
-
-    def get_mime(self):
-        """Returns the mime type of the datatype (pretend it is text for peek)"""
-        return 'text/plain'
-
-    def merge(split_files, output_file):
-        """Merge HOMER databases (not implemented)."""
-        raise NotImplementedError("Merging HOMER databases is not supported")
-
-    def split( cls, input_datasets, subdir_generator_function, split_params):
-        """Split a HOMER database (not implemented)."""
-        if split_params is None:
-            return None
-        raise NotImplementedError("Can't split HOMER databases")
-
--- a/tool-data/homer.loc.sample	Mon Aug 26 17:13:45 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-#This is a sample file distributed with Galaxy that is used to define a
-#list of homer installations with associated databases, using three columns tab separated
-#(longer whitespace are TAB characters):
-#
-#That files should enable the administrator the establish some kind of 
-#reproducibility of HOMER data. It is recommended to install HOMER from scratch
-#in a new folder and define the location here. If you want to update your HOMER
-#installation consider to check out a completly new HOMER version in a new PATH.
-#
-#The entries are as follows:
-#
-#<unique_id>	<database_caption>	<base_name_path>
-#
-#Your homer.loc file should include an entry per line for each "base name" 
-#you have stored.  For example:
-#
-#homer_08_Aug_2013		HOMER 4.2 08 Aug 2013		/data/0/galaxy_data/homer/08_08_2013/
-#homer_02_July_2013		HOMER 4.2 02 July 2013		/data/0/galaxy_data/homer/02_07_2013/
-#homer_1_Jan_2013	HOMER 4.1 1 Jan 2013	/data/0/galaxy_data/homer/01_01_2013
-
-#...etc...
-#
-#You can install and populate HOMER with all relevant data, following the instructions here:
-#http://biowhat.ucsd.edu/homer/introduction/install.html
-#
-#The Galaxy Toolshed will take care to install all requirements, but they are only
-#accessible during execution time. So feel free to ignore warnings during the installation,
-#that your are missing weblogo, blat and Co.
-
-
--- a/tool-data/homer_available_genomes.loc.sample	Mon Aug 26 17:13:45 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,4 +0,0 @@
-hg18
-hg19
-mm9
-mm10
--- a/tool_dependencies.xml	Mon Aug 26 17:13:45 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-<?xml version="1.0"?>
-<tool_dependency>
-    <package name="blast+" version="2.2.26+">
-        <install version="1.0">
-            <actions>
-
-            <action type="download_binary">
-                <url_template architecture="x86_64" os="linux">ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/2.2.26/ncbi-blast-2.2.26+-x64-linux.tar.gz</url_template>
-                <url_template architecture="i386" os="linux">ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/2.2.26/ncbi-blast-2.2.26+-ia32-linux.tar.gz</url_template>
-                <url_template architecture="i686" os="linux">ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/2.2.26/ncbi-blast-2.2.26+-ia32-linux.tar.gz</url_template>
-                <url_template architecture="i686" os="darwin">ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/2.2.26/ncbi-blast-2.2.26+-universal-macosx.tar.gz</url_template>
-                <url_template architecture="i386" os="darwin">ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/2.2.26/ncbi-blast-2.2.26+-universal-macosx.tar.gz</url_template>
-                <url_template architecture="x86_64" os="darwin">ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/2.2.26/ncbi-blast-2.2.26+-universal-macosx.tar.gz</url_template>
-            </action>
-
-            <action type="shell_command">
-                tar -zxvf $INSTALL_DIR/ncbi-blast-2.2.26+-x64-linux.tar.gz ;
-                tar -zxvf $INSTALL_DIR/ncbi-blast-2.2.26+-ia32-linux.tar.gz ;
-                tar -zxvf $INSTALL_DIR/ncbi-blast-2.2.26+-universal-macosx.tar.gz ;
-            </action>
-                <action type="set_environment">
-                    <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR</environment_variable>
-                </action>
-            </actions>
-        </install>
-        <readme>
-Downloads the precompiled 32bit Linux, 64bit Linux, or Mac OS X BLAST+
-binaries from the NCBI, which is faster than performing a local compliation,
-avoids any issues with build dependencies, and is more reproducible between
-installations as there is no variability from the compiler or library versions.
-
-For more details, see:
-http://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&amp;PAGE_TYPE=BlastDocs&amp;DOC_TYPE=Download
-        </readme>
-    </package>
-</tool_dependency>
--- a/tools/findMotifsGenome.xml	Mon Aug 26 17:13:45 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-<tool id="homer_findMotifsGenome" name="identify Motifs" version="0.1.2">
-    <description></description>
-    <requirements>
-        <requirement type="package" version="35x1">blat</requirement>
-        <requirement type="package" version="2.8.2">weblogo</requirement>
-        <requirement type="package" version="9.07">ghostscript</requirement>
-    </requirements>
-    <command>
-        #import os
-        #import tempfile
-        
-        #set $tmpdir = os.path.abspath( tempfile.mkdtemp() )
-        export PATH=\$PATH:$database.fields.path;
-
-        findMotifsGenome.pl $infile ${infile.metadata.dbkey} $tmpdir
-
-        -p 4
-        $mask
-        -size $size
-        -len $motif_len
-        -mis $mismatches
-        -S $number_of_motifs
-        $noweight
-        $cpg
-        -nlen $nlen
-        -olen $olen
-        $hypergeometric
-        $norevopp
-        $rna
-
-        #if $bg_infile:
-            -bg $bg_infile
-        #end if
-
-        #if $logfile_output:
-            2> $out_logfile
-        #else:
-            2>&#38;1
-        #end if
-
-        ;
-        cp $tmpdir/knownResults.txt $known_results_tabular;
-
-        #if $concat_motifs_output:
-            cp $tmpdir/homerMotifs.all.motifs $out_concat_motifs;
-        #end if
-
-        #if $html_output:
-            #set $go_path = os.path.join($tmpdir, 'geneOntology.html')
-
-            mkdir $denovo_results_html.files_path;
-            cp $tmpdir/homerResults.html $denovo_results_html;
-            cp $tmpdir/homerResults.html "$denovo_results_html.files_path";
-            cp -r $tmpdir/homerResults/ "$denovo_results_html.files_path";
-
-
-            mkdir "$known_results_html.files_path";
-            cp $tmpdir/knownResults.html $known_results_html;
-            cp $tmpdir/knownResults.html "$known_results_html.files_path";
-            cp $tmpdir/homerResults.html "$known_results_html.files_path";
-            cp -r $tmpdir/knownResults/ "$known_results_html.files_path";
-
-            #if os.path.exists( $go_path ):
-                cp $go_path "$denovo_results_html.files_path";
-                cp $go_path "$known_results_html.files_path";
-            #end if
-
-        #end if
-
-        ##rm -rf $tmpdir
-
-    </command>
-    <inputs>
-        <expand macro="input_choose_homer_version" />
-
-        <param name="infile" format="bed" type="data" label="BED file" help="a file containing genomic coordinates">
-            <validator type="dataset_metadata_in_file" filename="homer_available_genomes.loc" metadata_name="dbkey" metadata_column="0" message="No HOMER genome build available for your species." />
-        </param>
-
-        <param name="rna" type="boolean" truevalue="-rna" falsevalue="" checked="False" label="Search for RNA motifs" help="If looking at RNA data (i.e. Clip-Seq or similar), this option will restrict HOMER to only search the + strand (relative to the peak), and will output RNA motif logos (i.e. U instead of T).  It will also try to compare found motifs to an RNA motif database, which sadly, only contains miRNAs right now... I guess chuck roundhouse kicked all of the splicing and other RNA motifs into hard to find databases."/>
-        <param name="norevopp" type="boolean" truevalue="-norevopp" falsevalue="" checked="False" label="Only search for motifs on + strand" help=""/>
-        <param name="hypergeometric" type="boolean" truevalue="-h" falsevalue="" checked="False" label="Hypergeometric enrichment scoring" help="By default, findMotifsGenome.pl uses the binomial distribution to score motifs.  This works well when the number of background sequences greatly out number the target sequences - however, if you are using '-bg' option above, and the number of background sequences is smaller than target sequences, it is a good idea to use the hypergeometric distribution instead.  FYI - The binomial is faster to compute, hence it's use for motif finding in large numbers of regions."/>
-        <param name="olen" type="integer" value="0" label="Motif level autonormalization" help ="0 means disabled"/>
-        <param name="nlen" type="integer" value="3" label="Region level autonormalization" help ="0 to disable"/>
-        <param name="noweight" type="boolean" truevalue="-noweight" falsevalue="" checked="False" label="disabling GC/CpG normalization" help=""/>
-        <param name="cpg" type="boolean" truevalue="-cpg" falsevalue="" checked="False" label="normalize CpG% content instead of GC% content" help=""/>
-        <param name="number_of_motifs" type="integer" value="25" label="Number of motifs to find" help ="The more mismatches you allow, the more sensitive the algorithm, particularly for longer motifs.  However, this also slows down the algorithm a bit.  If searching for motifs longer than 12-15 bp, it's best to increase this value to at least 3 or even 4."/>
-        <param name="mismatches" type="integer" value="2" label="Mismatches allowed in global optimization phase" help ="The more mismatches you allow, the more sensitive the algorithm, particularly for longer motifs.  However, this also slows down the algorithm a bit.  If searching for motifs longer than 12-15 bp, it's best to increase this value to at least 3 or even 4."/>
-        <param name="mask" type="boolean" truevalue="-mask" falsevalue="" checked="True" label="Use masked version of the genome" help=""/>
-        <param name="size" type="integer" value="200" label="The size of the region used for motif finding" help =" If analyzing ChIP-Seq peaks from a transcription factor, Chuck would recommend 50 bp for establishing the primary motif bound by a given transcription factor and 200 bp for finding both primary and 'co-enriched' motifs for a transcription factor.  When looking at histone marked regions, 500-1000 bp is probably a good idea (i.e. H3K4me or H3/H4 acetylated regions).  In theory, HOMER can work with very large regions (i.e. 10kb), but with the larger the regions comes more sequence and longer execution time."/>
-        <param name="motif_len" type="text" value="8,10,12" label="Specifies the length of motifs to be found" help ="HOMER will find motifs of each size separately and then combine the results at the end.  The length of time it takes to find motifs increases greatly with increasing size.  In general, it's best to try out enrichment with shorter lengths (i.e. less than 15) before trying longer lengths.  Much longer motifs can be found with HOMER, but it's best to use smaller sets of sequence when trying to find long motifs (i.e. use '-len 20 -size 50'), otherwise it may take way too long (or take too much memory).  The other trick to reduce the total resource consumption is to reduce the number of background sequences (-N #)."/>
-
-        <param name="bg_infile" format="bed" type="data" optional="True" label="User defined background regions" help="These will still be normalized for CpG% or GC% content just like randomly chosen sequences and autonormalized unless these options are turned off (i.e. '-nlen 0 -noweight').  This can be very useful since HOMER is a differential motif discovery algorithm.  For example, you can give HOMER a set of peaks co-bound by another factor and compare them to the rest of the peaks.  HOMER will automatically check if the background peaks overlap with the target peaks using mergePeaks, and discard overlapping regions."/>
-
-        <param name="concat_motifs_output" type="boolean" truevalue="" falsevalue="" checked="True" label="Output concatenated file composed of all motifs" help=""/>
-        <param name="html_output" type="boolean" truevalue="" falsevalue="" checked="True" label="Output HOMER visual summaries" help=""/>
-        <param name="logfile_output" type="boolean" truevalue="" falsevalue="" label="Output HOMER logfile" help=""/>
-
-    </inputs>
-    <outputs>
-        <data format="tabular" name="known_results_tabular" label="HOMER known motifs" />
-        <data format="html" name="denovo_results_html" label="HOMER de novo motifs">
-            <filter>html_output is True</filter>
-        </data>
-        <data format="html" name="known_results_html" label="HOMER known motifs">
-            <filter>html_output is True</filter>
-        </data>
-        <data format="txt" name="out_concat_motifs" label="HOMER concatenated motif files">
-            <filter>concat_motifs_output is True</filter>
-        </data>
-        <data name="out_logfile" type="data" format="txt" label="HOMER logfile: motifs from  ${on_string}">
-            <filter>logfile_output is True</filter>
-        </data>
-    </outputs>
-    <tests>
-        <test>
-
-        </test>
-    </tests>
-
-    <help>
-
-  .. class:: infomark
-
-  **Homer findMotifsGenome**
-
-Autonormalization attempts to remove sequence bias from lower order oligos (1-mers, 2-mers ... up to #).
-Region level autonormalization, which is for 1/2/3 mers by default, attempts to normalize background regions by adjusting their weights.
-If this isn't getting the job done (autonormalization is not guaranteed to remove all sequence bias), you can try the more aggressive motif level autonormalization (-olen #).
-This performs the autonormalization routine on the oligo table during de novo motif discovery.
-
-    </help>
-</tool>
-
--- a/tools/findPeaks.xml	Mon Aug 26 17:13:45 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-<tool id="homer_findPeaks" name="find Peaks" version="0.1.2">
-    <description></description>
-    <requirements>
-        <requirement type="package" version="35x1">blat</requirement>
-        <requirement type="package" version="2.8.2">weblogo</requirement>
-        <requirement type="package" version="9.07">ghostscript</requirement>
-    </requirements>
-    <!--<version_command></version_command>-->
-    <command>
-        export PATH=\$PATH:$database.fields.path;
-
-        findPeaks $affected_tag_dir.extra_files_path -o $outputPeakFile
-
-        #if $control_tag_dir:
-            -i $control_tag_dir.extra_files_path
-        #end if
-
-        #if $logfile_output:
-            2> $out_logfile
-        #else:
-            2>&#38;1
-        #end if
-
-    </command>
-    <inputs>
-        <expand macro="input_choose_homer_version" />
-        <param name="affected_tag_dir" format="homer_tagdir" type="data" label="tag directory" help="Must be made with the tool makeTagDirectory" />
-        <param name="control_tag_dir" type="data" format="homer_tagdir" optional="True" label="Control tag directory" help="Must be made with makeTagDirectory" />
-        <param name="logfile_output" type="boolean" truevalue="" falsevalue="" label="Output HOMER logfile" help=""/>
-    </inputs>
-    <outputs>
-        <data format="txt" name="outputPeakFile" label="${tool.name} on #echo os.path.splitext(str($affected_tag_dir.name))[0]#.txt" />
-    </outputs>
-    <tests>
-        <test>
-        </test>
-    </tests>
-
-    <help>
-
-  .. class:: infomark
-
-  **Homer findPeaks**
-
-Requires tag directories (see makeTagDirectory)
-
-    </help>
-</tool>
-
--- a/tools/homer_macros.xml	Mon Aug 26 17:13:45 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,11 +0,0 @@
-<macros>
-    <macro name="input_choose_homer_version">
-        <param name="database" type="select" label="HOMER version and data files" min="1">
-            <options from_file="homer.loc">
-                <column name="value" index="0"/>
-                <column name="name" index="1"/>
-                <column name="path" index="2"/>
-            </options>
-        </param>
-    </macro>
-</macros>
--- a/tools/makeTagDirectory.xml	Mon Aug 26 17:13:45 2013 -0400
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,138 +0,0 @@
-<tool id="homer_makeTagDirectory" name="create HOMER database" version="1.0.1">
-    <requirements>
-        <requirement type="package" version="35x1">blat</requirement>
-        <requirement type="package" version="2.8.2">weblogo</requirement>
-        <requirement type="package" version="9.07">ghostscript</requirement>
-    </requirements>
-    <description>(TagDirectory)</description>
-    <command>
-        #set $HOMER_PATH = str($database.fields.path)
-        export PATH=\$PATH:$database.fields.path;
-        
-        makeTagDirectory $tag_dir.extra_files_path
-        #for $infile in $alignment_files:
-            $infile.file
-        #end for
-    
-        #if $logfile_output:
-            2> $out_logfile
-        #else:
-            2>&#38;1
-        #end if
-
-    </command>
-    <inputs>
-        <expand macro="input_choose_homer_version" />
-
-        <repeat name="alignment_files" title="Alignment Files">
-            <param name="file" label="Add file" type="data" format="sam,bed,bam" help="Alignments in SAM, BAM or BED format" />
-        </repeat>
-
-    </inputs>
-    <outputs>
-        <data format="homer_tagdir" name="tag_dir" label="HOMER tag directory" />
-    </outputs>
-
-
-    <tests>
-    </tests>
-
-    <help>
-
-  .. class:: infomark
-
-  **Homer makeTagDirectory**
-
-  For more options, look under: "Command line options"
-
-  http://biowhat.ucsd.edu/homer/ngs/tagDir.html
-
-**Parameter list**
-
-Command line options (not all of them are supported)::
-
-	Usage: makeTagDirectory &lt;directory&gt; &lt;alignment file 1&gt; [file 2] ... [options]
-
-	Creates a platform-independent &apos;tag directory&apos; for later analysis.
-	Currently BED, eland, bowtie, and sam files are accepted. The program will try to
-	automatically detect the alignment format if not specified.  Program will also
-	unzip *.gz, *.bz2, and *.zip files and convert *.bam to sam files on the fly
-	Existing tag directories can be added or combined to make a new one using -d/-t
-	If more than one format is needed and the program cannot auto-detect it properly,
-	make separate tag directories by running the program separately, then combine them.
-	To perform QC/manipulations on an existing tag directory, add &quot;-update&quot;
-
-	Options:
-		-fragLength &lt;# | given&gt; (Set estimated fragment length - given: use read lengths)
-			By default treats the sample as a single read ChIP-Seq experiment
-		-format &lt;X&gt; where X can be: (with column specifications underneath)
-			bed - BED format files:
-				(1:chr,2:start,3:end,4:+/- or read name,5:# tags,6:+/-)
-				-force5th (5th column of BED file contains # of reads mapping to position)
-			sam - SAM formatted files (use samTools to covert BAMs into SAM if you have BAM)
-				-unique (keep if there is a single best alignment based on mapq)
-					-mapq &lt;#&gt; (Minimum mapq for -unique, default: 10, set negative to use AS:i:/XS:i:)
-				-keepOne (keep one of the best alignments even if others exist)
-				-keepAll (include all alignments in SAM file)
-				-mis (Maximum allowed mismatches, default: no limit, uses MD:Z: tag)
-			bowtie - output from bowtie (run with --best -k 2 options)
-				(1:read name,2:+/-,3:chr,4:position,5:seq,6:quality,7:NA,8:misInfo)
-			eland_result - output from basic eland
-				(1:read name,2:seq,3:code,4:#zeroMM,5:#oneMM,6:#twoMM,7:chr,
-							8:position,9:F/R,10-:mismatches
-			eland_export - output from illumina pipeline (22 columns total)
-				(1-5:read name info,9:sequence,10:quality,11:chr,13:position,14:strand)
-			eland_extended - output from illumina pipeline (4 columns total)
-				(1:read name,2:sequence,3:match stats,4:positions[,])
-			mCpGbed - encode style mCpG reporting in extended BED format, no auto-detect
-				(1:chr,2:start,3:end,4:name,5:,6:+/-,7:,8:,9:,10:#C,11:#mC)
-			allC - Lister style output files detailing the read information about all cytosines
-				(1:chr,2:pos,3:strand,4:context,#mC,#totalC,#C
-				-minCounts &lt;#&gt; (minimum number of reads to report mC/C ratios, default: 10)
-				-mCcontext &lt;CG|CHG|CHH|all&gt; (only use C&apos;s in this context, default: CG)
-			HiCsummary - minimal paired-end read mapping information
-				(1:readname,2:chr1,3:5&apos;pos1,4:strand1,5:chr2,6:5&apos;pos2,7:strand2)
-		-force5th (5th column of BED file contains # of reads mapping to position)
-		-d &lt;tag directory&gt; [tag directory 2] ... (add Tag directory to new tag directory)
-		-t &lt;tag file&gt; [tag file 2] ... (add tag file i.e. *.tags.tsv to new tag directory)
-		-single (Create a single tags.tsv file for all &quot;chromosomes&quot; - i.e. if &gt;100 chromosomes)
-		-update (Use current tag directory for QC/processing, do not parse new alignment files)
-		-tbp &lt;#&gt; (Maximum tags per bp, default: no maximum)
-		-precision &lt;1|2|3&gt; (number of decimal places to use for tag totals, default: 1)
-
-		GC-bias options:
-		-genome &lt;genome version&gt; (To see available genomes, use &quot;-genome list&quot;)
-			-or- (for custom genomes):
-		-genome &lt;path-to-FASTA file or directory of FASTA files&gt;
-
-		-checkGC (check Sequence bias, requires &quot;-genome&quot;)
-			-freqStart &lt;#&gt; (offset to start calculating frequency, default: -50)
-			-freqEnd &lt;#&gt; (distance past fragment length to calculate frequency, default: +50)
-			-oligoStart &lt;#&gt; (oligo bias start)
-			-oligoEnd &lt;#&gt; (oligo bias end)
-		-normGC &lt;target GC profile file&gt; (i.e. tagGCcontent.txt file from control experiment)
-			Use &quot;-normGC default&quot; to match the genomic GC distribution
-		-normFixedOligo &lt;oligoFreqFile&gt; (normalize 5&apos; end bias, &quot;-normFixedOligo default&quot; ok)
-		-minNormRatio &lt;#&gt; (Minimum deflation ratio of tag counts, default: 0.25)
-		-maxNormRatio &lt;#&gt; (Maximum inflation ratio of tag counts, default: 2.0)
-		-iterNorm &lt;#&gt; (Sets -max/minNormRatio to 1 and 0, iteratively normalizes such that the
-			resulting distrubtion is no more than #% different than target, i.e. 0.1,default: off)
-
-	Paired-end/HiC options
-		-illuminaPE (when matching PE reads, assumes last character of read name is 0 or 1)
-		-removePEbg (remove paired end tags within 1.5x fragment length on same chr)
-			-PEbgLength &lt;#&gt; (remove PE  reads facing on another within this distance, default: 1.5x fragLen)
-		-restrictionSite &lt;seq&gt; (i.e. AAGCTT for HindIII, assign data &lt; 1.5x fragment length to sites)
-			Must specify genome sequence directory too. (-rsmis &lt;#&gt; to specify mismatches, def: 0)
-			-both, -one, -onlyOne, -none (Keeps reads near restriction sites, default: keep all)
-			-removeSelfLigation (removes reads linking same restriction fragment)
-			-removeRestrictionEnds (removes reads starting on a restriction fragment)
-			-assignMidPoint (will place reads in the middle of HindIII fragments)
-			-restrictionSiteLength &lt;#&gt; (maximum distance from restriction site, default: 1.5x fragLen)
-		-removeSpikes &lt;size bp&gt; &lt;#&gt; (remove tags from regions with &gt; than # times
-			the average tags per size bp, suggest &quot;-removeSpikes 10000 5&quot;)
-
-
-    </help>
-</tool>
-