# HG changeset patch # User wolma # Date 1418160500 18000 # Node ID 7bce49512badc776d5419c9d80e477f789999a46 # Parent 623cf7b461fa6e21b9ad623230c917261ce5c0cd version 0_1_5 diff -r 623cf7b461fa -r 7bce49512bad sam_header.xml --- a/sam_header.xml Thu Aug 14 10:36:08 2014 -0400 +++ b/sam_header.xml Tue Dec 09 16:28:20 2014 -0500 @@ -1,42 +1,43 @@ - + Create a SAM format header from run metadata for sample annotation. - mimodd + mimodd + mimodd version -q mimodd header - --rg_id "$rg_id" - --rg_sm "$rg_sm" + --rg-id "$rg_id" + --rg-sm "$rg_sm" #if $str($rg_cn): - --rg_cn "$rg_cn" + --rg-cn "$rg_cn" #end if #if $str($rg_ds): - --rg_ds "$rg_ds" + --rg-ds "$rg_ds" #end if - #if $str($anno) and $str($month) and $str($day): - --rg_dt "$anno-$month-$day" + #if $str($rg_date): + --rg-dt "$rg_date" #end if #if $str($rg_lb): - --rg_lb "$rg_lb" + --rg-lb "$rg_lb" #end if #if $str($rg_pl): - --rg_pl "$rg_pl" + --rg-pl "$rg_pl" #end if - #if $str($rg_ds): - --rg_pi "$rg_pi" + #if $str($rg_pi): + --rg-pi "$rg_pi" #end if #if $str($rg_pu): - --rg_pu "$rg_pu" + --rg-pu "$rg_pu" #end if - --outputfile $outputfile + --ofile $outputfile - + @@ -46,17 +47,7 @@ - - - - - - - - - - - + @@ -76,9 +67,17 @@ - - - + + + + + + + + + + + @@ -122,7 +121,7 @@ **Tip:** -While you can do Alignments from fastq file format by providing a custom header file directly to the *SNAP Read Alignment* tool, the **recommended approach** is to first convert all input files to and archive all datasets in SAM/BAM format with appropriate header information prior to any downstream analysis. Although a bit more time-consuming this practice protects against information loss and ensures that the input datasets will remain useful for others in the future. +While you can do Alignments from fastq file format by providing a custom header file directly to the *SNAP Read Alignment* tool, we **recommend** you to first convert all input files to and archive all datasets in SAM/BAM format with appropriate header information prior to any downstream analysis. Although a bit more time-consuming, this practice protects against information loss and ensures that the input datasets will remain useful for others in the future.