diff rglasso_cox.xml @ 20:bb725f6d6d38 draft default tip

planemo upload for repository https://github.com/galaxyproject/tools-iuc/tree/master/tools/rglasso commit 344140b8df53b8b7024618bb04594607a045c03a
author iuc
date Mon, 04 May 2015 22:47:29 -0400
parents 0e87f636bdd8
children
line wrap: on
line diff
--- a/rglasso_cox.xml	Wed Apr 29 12:07:11 2015 -0400
+++ b/rglasso_cox.xml	Mon May 04 22:47:29 2015 -0400
@@ -7,223 +7,9 @@
       <requirement type="package" version="2.14">glmnet_lars_2_14</requirement>
   </requirements>
   <command interpreter="python">
-     rgToolFactory.py --script_path "$runme" --interpreter "Rscript" --tool_name "rglasso" 
+     rgToolFactory.py --script_path "$runme" --interpreter "Rscript" --tool_name "rglasso"
     --output_dir "$html_file.files_path" --output_html "$html_file" --make_HTML "yes"
   </command>
-  <inputs>
-     <param name="title" type="text" value="lasso test" size="80" label="Title for job outputs" help="Typing a short, meaningful text here will help remind you (and explain to others) what the outputs represent">
-      <sanitizer invalid_char="">
-        <valid initial="string.letters,string.digits"><add value="_" /> </valid>
-      </sanitizer>
-    </param>
-    <param name="input1"  type="data" format="tabular" label="Select an input tabular text file from your history. Rows represent samples; Columns are measured phenotypes"
-    multiple='False' optional="False" help="Tabular text data with samples as rows, phenotypes as columns with a header row of column identifiers" />
-    <param name="xvar_cols" label="Select columns containing numeric variables to use as predictor (x) variables" type="data_column" data_ref="input1" numerical="False" 
-         multiple="True" use_header_names="True" force_select="True" />
-    <param name="force_xvar_cols" label="Select numeric columns containing variables ALWAYS included as predictors in cross validation" type="data_column" data_ref="input1" numerical="False" 
-         multiple="True" use_header_names="True" force_select="False"/>
-    <conditional name="model">
-        <param name="fam" type="select" label="GLM Link function for models" 
-             help="Binary dependant variables will automatically be set to Binomial no matter what this is set to">
-                <option value="gaussian" selected="true">Gaussian - continuous dependent (y)</option>
-                <option value="binomial">Binomial dependent variables</option>
-                <option value="poisson">Poisson (eg counts)</option>
-                <option value="cox">Cox models - require special setup for y variables - see below</option>
-        </param>
-        <when value="gaussian">
-            <param name="yvar_cols" label="Select numeric columns containing variables to use as the dependent (y) in elasticnet" type="data_column" data_ref="input1" numerical="False" 
-             multiple="True" use_header_names="True"  help = "If multiple, each will be modelled against all the x variables and reported separately." force_select="True"/>
-            <param name="output_full" type="hidden" value='F' />
-            <param name="output_pred" type="hidden" value='F' />
-              <param name="cox_id" label="Select column containing a unique sample identifier"
-                 help = "Only really needed for output sample specific predicted values downstream."
-                 type="data_column" data_ref="input1" numerical="False" force_select="True"
-                 multiple="False" use_header_names="True" />
-      </when>
-        <when value="binomial">
-            <param name="yvar_cols" label="Select numeric columns containing variables to use as the dependent (y) in elasticnet" type="data_column" data_ref="input1" numerical="False" 
-             multiple="True" use_header_names="True"  help = "If multiple, each will be modelled against all the x variables and reported separately." force_select="True"/>
-             <param name="output_full" type="hidden" value='F' />
-             <param name="output_pred" type="select" label="Create a tabular output with predicted values for each subject from the optimal model for (eg) NRI estimates" >
-                <option value="F" selected="true">No predicted value output file</option>
-                <option value="T">Create a predicted value output file</option>
-             </param>             
-              <param name="cox_id" label="Select column containing a unique sample identifier" 
-                 help = "Only really needed for output sample specific predicted values downstream."
-                 type="data_column" data_ref="input1" numerical="False" force_select="True"
-                 multiple="False" use_header_names="True" />
-             <param name="predict_at" type="hidden" value='' />
-             
-        </when>
-        <when value="poisson">
-            <param name="yvar_cols" label="Select columns containing variables to use as the dependent (y) in elasticnet" type="data_column" data_ref="input1" numerical="True" 
-             multiple="True" use_header_names="True"  help = "If multiple, each will be modelled against all the x variables and reported separately." force_select="True"/>
-             <param name="output_full" type="hidden" value='F' />
-             <param name="output_pred" type="hidden" value='F' />
-             <param name="predict_at" type="hidden" value='' />
-              <param name="cox_id" label="Select column containing a unique sample identifier"
-                 help = "Optional. Only really needed for output sample specific predicted values downstream. Free - enjoy"
-                 type="data_column" data_ref="input1" numerical="True" force_select="False"
-                 multiple="False" use_header_names="True" />
-        </when>
-        <when value="cox">
-             <param name="cox_time" label="Select column containing time under observation for Cox regression"
-                 type="data_column" data_ref="input1" numerical="True" force_select="True"
-                 multiple="False" use_header_names="True"  help = "This MUST contain a time period - eg continuous years or days to failure or right censoring"/>
-             <param name="cox_status" label="Select column containing status = 1 for outcome of interest at the end of the time under observation or 0 for right censoring"
-                 type="data_column" data_ref="input1" numerical="True" force_select="True"
-                 multiple="False" use_header_names="True"  help = "This MUST contain 1 for subjects who had an event at that time or 0 for a right censored observation"/>
-              <param name="cox_id" label="Select column containing a unique sample identifier"
-                 help = "Optional. Only really needed for output sample specific predicted values downstream. Free - enjoy"
-                 type="data_column" data_ref="input1" numerical="False" force_select="False"
-                 multiple="False" use_header_names="True" />
-             <param name="output_full" type="select" label="Create a tabular output with coefficients for all predictors" >
-                <option value="F" selected="true">No full model output file</option>
-                <option value="T">Create a full model output file</option>
-             </param>
-             <param name="output_pred" type="select" label="Create a tabular output with predicted values for each subject from the optimal model for (eg) NRI estimates" >
-                <option value="F" selected="true">No predicted value output file</option>
-                <option value="T">Create a predicted value output file</option>
-             </param>
-             <param name="predict_at"  type="text" value='' label="Provide a comma separated list of times to make a prediction for each subject"
-                 optional="True" help="Default (blank) will return predictions at 0%,25%,50%,75%,100% of the observed times which should be informative" />
-             
-        </when>
-    </conditional>
-    <param name="optLambda" type="select" label="Value to use when reporting optimal model and coefficients" help="minLambda will have more predictors - 1SDLambda will be more parsimonious">
-            <option value="lambda.1se" selected="true">Lambda + 1 SE of min MSE or AUC (fewer coefficients - more false negatives)</option>
-            <option value="lambda.min">Lambda at min MSE or max AUC (more coefficients - more false positives)</option>
-    </param>
-    <param name="logxform_cols"  optional="True" label="Select numeric columns to be log transformed before use as predictors or dependent variables" type="data_column"
-        data_ref="input1" numerical="True" multiple="True" use_header_names="True" help = "The wisdom of doing this depends entirely on your predictors - eg can help diminish long-tailed outlier influence"
-        force_select="False"/>
-    <param name="do_standard" type="select" label="Standardise x vars" 
-         help="If all measurements on same scale, may not be needed. Coefficients are always returned on the original scale.">
-            <option value="False" selected="true">No standardisation of predictors</option>l
-            <option value="True">Standardise predictors before model</option>
-    </param>
-    <param name="mdsplots" type="select" label="Generate MDS plots of samples in measurement space and measurements in sample space" >
-            <option value="False" selected="true">No MDS plots</option>l
-            <option value="True">Yes create MDS plots</option>
-    </param>
-    <param name="alpha" type="float" value="0.95" size="5" min="0.01" max="1.0" label="Alpha - see glmnet docs. 1 for pure lasso. 0.0 for pure ridge regression"
-     help="Default 0.95 allows lasso to cope better with expected predictor collinearity. Use (eg) 0.5 for hybrid regularised regression or (eg) 0.025 for ridge regression"/>
-    <param name="nfold" type="integer" value="10" size="5" label="Number of folds for internal cross validation"
-     help="Default of 10 is usually ok"/>
-  </inputs>
-  <outputs>
-    <data format="html" name="html_file" label="${title}.html"/>
-    <data format="tabular" name="model_file" label="${title}_modelres.xls"/>
-    <data format="tabular" name="output_full_file" label="${title}_full_cox_model.xls">
-        <filter>model['output_full'] == 'T'</filter>
-    </data>
-    <data format="tabular" name="output_pred_file" label="${title}_predicted_from_model.xls">
-        <filter>model['output_pred'] == 'T'</filter>
-    </data>
-  </outputs>
- <tests>
-    <test>
-     <param name='input1' value='cox_test.xls' ftype='tabular' />
-     <param name='treatment_name' value='case' />
-     <param name='title' value='Cox glmnet test' />
-     <param name='nfold' value='10' />
-     <param name='logxform_cols' value='' />
-     <param name='alpha' value='0.95' />
-     <param name='do_standard' value="True" />
-     <param name='cox_time' value='1' />
-     <param name='cox_status' value='2' />
-     <param name='cox_id' value='1' />
-     <param name='predict_at' value='' />
-     <param name='fam' value='cox' />
-     <param name='yvar_cols' value='' />
-     <param name='xvar_cols' value='3,4,5' />
-     <param name='force_xvar_cols' value='3' />
-     <param name='output_full' value='F' />
-     <param name='output_pred' value='F' />
-     <output name='model_file' file='coxlassotest_modelres.xls'> 
-          <assert_contents>
-                <has_text text="rhubarb" />
-                <has_text text="TRUE" />
-                <!-- &#009; is XML escape code for tab -->
-                <!-- has_line line="regulator&#009;partial_likelihood&#009;forced_in&#009;glmnet_model&#009;best_lambda" / -->
-                <has_line line="regulator&#009;partial_likelihood&#009;forced_in&#009;glmnet_model&#009;best_lambda&#009;lambdaChoice&#009;alpha" />
-                <has_n_columns n="7" />
-           </assert_contents>
-     </output>
-     <output name='html_file' file='coxlassotest.html'  compare='diff' lines_diff='16' />
-    </test>
-</tests>
-<help>
-
-**Before you start**
-
-Please read the glmnet documentation @ glmnet_
-
-This Galaxy wrapper merely exposes that code and the glmnet_ documentation is essential reading
-before getting useful results here.
-
-**What it does**
-
-From documentation at glmnet_ ::
-
- Glmnet is a package that fits a generalized linear model via penalized maximum likelihood.
- The regularization path is computed for the lasso or elasticnet penalty at a grid of values for the regularization parameter lambda.
- The algorithm is extremely fast, and can exploit sparsity in the input matrix x.
- It fits linear, logistic and multinomial, poisson, and Cox regression models.
- A variety of predictions can be made from the fitted models.
-
-Internal cross validation is used to optimise the choice of lambda based on CV AUC for logistic (binomial outcome) models, or CV mse for gaussian.
-
-**Warning about the tyrany of dimensionality**
-
-Yes, this package will select 'optimal' models even when you (optimistically) supply more predictors than you have cases.
-The model returned is unlikely to represent the only informative regularisation path through your data - if you run repeatedly with
-exactly the same settings, you will probably see many different models being selected.
-This is not a software bug - the real problem is that you just don't have enough information in your data. 
-
-Sufficiently big jobs will take a while (eg each lasso regression with 20k features on 1k samples takes about 2-3 minutes on our aged cluster)
-
-**Input**
-
-Assuming you have more measurements than samples, you supply data as a tabular text file where each row is a sample and columns
-are variables. You specify which columns are dependent (predictors) and which are observations for each sample. Each of multiple
-dependent variable columns will be run and reported independently. Predictors can be forced in to the model.
-
-**Output**
-
-For each selected dependent regression variable, a brief report of the model coefficients predicted at the
-'optimal' nfold CV value of lambda.
-
-**Predicted event probabilities for Cox and Logistic models**
-
-If you want to compare (eg) two competing clinical predictions, there's a companion generic NRI tool
-for predicted event probabilities. Estimates dozens of measures of improvement in prediction. Currently only works for identical id subjects
-but can probably be extended to independent sample predictions.
-
-Given a model, we can generate a predicted p (for status 1) in binomial or cox frameworks so models can be evaluated in terms of NRI.
-Of course, estimates are likely substantially inflated over 'real world' performance by being estimated from the same sample - but you probably
-already knew that since you were smart enough to reach this far down into the on screen help. The author salutes you, intrepid reader!
-
-It may seem an odd thing to do, but we can predict p for an event for each subject from our original data, given a parsimonious model. Doing
-this for two separate models (eg, forcing in an additional known explanatory measurement to the new model) allows comparison of the two models
-predicted status for each subject, or the same model in independent populations to see how badly it does
-
-**Attributions**
-
-glmnet_ is the R package exposed by this Galaxy tool.
-
-Galaxy_ (that's what you are using right now!) for gluing everything together 
-
-Otherwise, all code and documentation comprising this tool was written by Ross Lazarus and is 
-licensed to you under the LGPL_ like other rgenetics artefacts
-
-.. _LGPL: http://www.gnu.org/copyleft/lesser.html
-.. _glmnet: http://web.stanford.edu/~hastie/glmnet/glmnet_alpha.html
-.. _Galaxy: http://getgalaxy.org
-
-
-</help>
-
 <configfiles>
 <configfile name="runme">
 <![CDATA[
@@ -236,27 +22,27 @@
 message=function(x) {print.noquote(paste(x,sep=''))}
 
 
-ross.cv.glmnet = function (x, y, weights, offset = NULL, lambda = NULL, type.measure = c("mse", 
-    "deviance", "class", "auc", "mae"), nfolds = 10, foldid, 
-    grouped = TRUE, keep = FALSE, parallel = FALSE, ...) 
+ross.cv.glmnet = function (x, y, weights, offset = NULL, lambda = NULL, type.measure = c("mse",
+    "deviance", "class", "auc", "mae"), nfolds = 10, foldid,
+    grouped = TRUE, keep = FALSE, parallel = FALSE, ...)
 {
-    if (missing(type.measure)) 
+    if (missing(type.measure))
         type.measure = "default"
     else type.measure = match.arg(type.measure)
-    if (!is.null(lambda) && length(lambda) < 2) 
+    if (!is.null(lambda) && length(lambda) < 2)
         stop("Need more than one value of lambda for cv.glmnet")
     N = nrow(x)
-    if (missing(weights)) 
+    if (missing(weights))
         weights = rep(1, N)
     else weights = as.double(weights)
     y = drop(y)
     glmnet.call = match.call(expand.dots = TRUE)
-    sel = match(c("type.measure", "nfolds", "foldid", "grouped", 
+    sel = match(c("type.measure", "nfolds", "foldid", "grouped",
         "keep"), names(glmnet.call), F)
-    if (any(sel)) 
+    if (any(sel))
         glmnet.call = glmnet.call[-sel]
     glmnet.call[[1]] = as.name("glmnet")
-    glmnet.object = glmnet(x, y, weights = weights, offset = offset, 
+    glmnet.object = glmnet(x, y, weights = weights, offset = offset,
         lambda = lambda, ...)
     glmnet.object\$call = glmnet.call
     is.offset = glmnet.object\$offset
@@ -266,56 +52,56 @@
         nz = sapply(nz, function(x) sapply(x, length))
         nz = ceiling(apply(nz, 1, median))
     }
-    else nz = sapply(predict(glmnet.object, type = "nonzero"), 
+    else nz = sapply(predict(glmnet.object, type = "nonzero"),
         length)
-    if (missing(foldid)) 
+    if (missing(foldid))
         foldid = sample(rep(seq(nfolds), length = N))
     else nfolds = max(foldid)
-    if (nfolds < 3) 
+    if (nfolds < 3)
         stop("nfolds must be bigger than 3; nfolds=10 recommended")
     outlist = as.list(seq(nfolds))
     if (parallel && require(foreach)) {
-        outlist = foreach(i = seq(nfolds), .packages = c("glmnet")) %dopar% 
+        outlist = foreach(i = seq(nfolds), .packages = c("glmnet")) %dopar%
             {
                 sel = foldid == i
-                if (is.matrix(y)) 
+                if (is.matrix(y))
                   y_sub = y[!sel, ]
                 else y_sub = y[!sel]
-                if (is.offset) 
+                if (is.offset)
                   offset_sub = as.matrix(offset)[!sel, ]
                 else offset_sub = NULL
-                glmnet(x[!sel, , drop = FALSE], y_sub, lambda = lambda, 
-                  offset = offset_sub, weights = weights[!sel], 
+                glmnet(x[!sel, , drop = FALSE], y_sub, lambda = lambda,
+                  offset = offset_sub, weights = weights[!sel],
                   ...)
             }
     }
     else {
         for (i in seq(nfolds)) {
             sel = foldid == i
-            if (is.matrix(y)) 
+            if (is.matrix(y))
                 y_sub = y[!sel, ]
             else y_sub = y[!sel]
-            if (is.offset) 
+            if (is.offset)
                 offset_sub = as.matrix(offset)[!sel, ]
             else offset_sub = NULL
-            outlist[[i]] = glmnet(x[!sel, , drop = FALSE], 
-                y_sub, lambda = lambda, offset = offset_sub, 
+            outlist[[i]] = glmnet(x[!sel, , drop = FALSE],
+                y_sub, lambda = lambda, offset = offset_sub,
                 weights = weights[!sel], ...)
         }
     }
     fun = paste("cv", class(glmnet.object)[[1]], sep = ".")
-    cvstuff = do.call(fun, list(outlist, lambda, x, y, weights, 
+    cvstuff = do.call(fun, list(outlist, lambda, x, y, weights,
         offset, foldid, type.measure, grouped, keep))
     cvm = cvstuff\$cvm
     cvsd = cvstuff\$cvsd
     cvname = cvstuff\$name
 
-    out = list(lambda = lambda, cvm = cvm, cvsd = cvsd, cvup = cvm + 
+    out = list(lambda = lambda, cvm = cvm, cvsd = cvsd, cvup = cvm +
         cvsd, cvlo = cvm - cvsd, nzero = nz, name = cvname, glmnet.fit = glmnet.object)
-    if (keep) 
+    if (keep)
         out = c(out, list(fit.preval = cvstuff\$fit.preval, foldid = foldid))
 
-    lamin = if (type.measure == "auc") 
+    lamin = if (type.measure == "auc")
         getmin(lambda, -cvm, cvsd)
     else getmin(lambda, cvm, cvsd)
     out = c(out, as.list(lamin))
@@ -342,7 +128,7 @@
 
 mdsPlot = function(dm,myTitle,groups=NA,outpdfname,transpose=T)
 {
-  
+
   samples = colnames(dm)
   mt = myTitle
   pcols=c('maroon')
@@ -355,11 +141,11 @@
   mydata = dm
   if (transpose==T)
   {
-  mydata = t(dm) 
+  mydata = t(dm)
   }
   npred = ncol(mydata)
-  d = dist(mydata) 
-  fit = cmdscale(d,eig=TRUE, k=min(10,npred-2))  
+  d = dist(mydata)
+  fit = cmdscale(d,eig=TRUE, k=min(10,npred-2))
   xmds = fit\$points[,1]
   ymds = fit\$points[,2]
   pdf(outpdfname)
@@ -388,7 +174,7 @@
   formstring=paste("y ~",cn)
   form = as.formula(formstring)
   ok = complete.cases(x)
-  
+
   if (sum(ok) < length(ok)) {
     x = x[ok,]
     yvec = yvec[ok]
@@ -408,9 +194,6 @@
   return(p1)
 }
 
-
-
-
 getpredp_cox = function(x,time,status,id,predict_at)
 {
   cols = colnames(x)
@@ -419,13 +202,13 @@
        return(NA)
        }
   cn = paste(colnames(x), collapse = ' + ')
-  
+
   formstring=paste("Surv(time, status) ~",cn)
-  
+
   form = as.formula(formstring)
-  
+
   ok = complete.cases(x)
-  
+
   if (sum(ok) < length(ok)) {
     x = x[ok,]
     time = time[ok]
@@ -444,7 +227,6 @@
 }
 
 
-
 dolasso_cox = function(x,y,debugOn=F,maxsteps=10000,nfold=10,xcolnames,ycolnames,optLambda='lambda.1se',out_full=F,out_full_file=NA,
                              out_pred=F,out_pred_file=NA,cox_id=NA, descr='Cox test',do_standard=F,alpha=0.9,penalty,predict_at,mdsplots=F)
 {
@@ -457,7 +239,7 @@
       if (class(p) == "try-error")
       {
         print.noquote(paste('Unable to produce predictors in sample space mds plot',p))
-      }  
+      }
       outpdfname = 'cox_samples_in_x_space_MDS.pdf'
       p = try({mdsPlot(x,'samples in measurement space',groups=y,outpdfname=outpdfname,transpose=F) },T)
       if (class(p) == "try-error")
@@ -468,7 +250,7 @@
   if (is.na(predict_at)) { predict_at = quantile(y) }
   message(paste('@@@ Cox model will be predicted at times =',paste(predict_at,collapse=',')))
   do_standard = do_standard
-  standardize = do_standard 
+  standardize = do_standard
   normalize = do_standard
   p = try({larsres = glmnet(x,y,family='cox',standardize=standardize,alpha=alpha,penalty.factor=penalty )},T)
   if (class(p) == "try-error")
@@ -492,13 +274,13 @@
   try(
       {
       pdf(outpdf)
-      plot(larsres,main='cox glmnet',label=T) 
+      plot(larsres,main='cox glmnet',label=T)
       grid()
       dev.off()
       },T)
-  
+
   larscv = NA
- 
+
   p = try({larscv=ross.cv.glmnet(x,y,family=fam,type.measure='deviance',penalty=penalty)},T)
   if (class(p) == "try-error") {
      print.noquote(paste('Unable to cross validate your data',p))
@@ -588,23 +370,23 @@
     sink()
     return(NA)
   }
-  
+
   mt = paste('Glmnet fraction deviance for',target)
   outpdf = paste(target,'glmnetPath.pdf',sep='_')
   pdf(outpdf)
   plot(larsres,main=mt,label=T)
   grid()
   dev.off()
-  
+
   outpdf = paste(target,'glmnetDeviance.pdf',sep='_')
-  
+
   mt2 = paste('Glmnet lambda for',target)
-  
+
   pdf(outpdf)
   plot(larsres,xvar="lambda",main=mt2,label=T)
   grid()
   dev.off()
-  
+
   larscv = NA
   if (fam=="binomial") {
     tmain = paste(target,'AUC')
@@ -621,13 +403,12 @@
     sink()
     return(NA)
   }
-  
-  
+
   pdf(outpdf)
   plot(larscv,main=tmain)
   grid()
   dev.off()
-  
+
   lse = larscv\$cvhits.1se
   lmin = larscv\$cvhits.min
   tot = lse + lmin
@@ -637,7 +418,7 @@
   print.noquote(nzhits)
   out_nz_file = paste(target,'cross_validation_model_counts.xls',sep='_')
   write.table(nzhits,out_nz_file,quote=FALSE, sep="\t",row.names=F)
-  
+
   ipenalty = c(0,penalty)
   if (optLambda == 'lambda.min') {
     best_lambda = larscv\$lambda.min
@@ -747,7 +528,7 @@
 
 corPlot=function(xdat=c(),main='main title',is_raw=T)
 {
-  library(pheatmap)   
+  library(pheatmap)
   library(gplots)
   if (is_raw) {
     cxdat = cor(xdat,method="spearman",use="pairwise.complete.obs")
@@ -762,17 +543,15 @@
 }
 
 
-
-
 runTest = function(n=10)
-{  
+{
   set.seed (NULL)
   Y = data.frame(y1=runif (n),y2=runif(n))
   Xv <- runif(n*n)
   X <- matrix(Xv, nrow = n, ncol = n)
-  
+
   mydf <- data.frame(Y, X)
-  
+
   regres_out = dolasso_generic(predvars=X,depvars=Y,debugOn=T,p.cutoff = 0.05,maxsteps=10000,nfold=10,
                                descr='randomdata',do_standard=do_standard,defaultFam="gaussian",alpha=0.05)
   return(regres_out)
@@ -900,6 +679,218 @@
 
 </configfile>
 </configfiles>
+  <inputs>
+     <param name="title" type="text" value="lasso test" size="80" label="Title for job outputs" help="Typing a short, meaningful text here will help remind you (and explain to others) what the outputs represent">
+      <sanitizer invalid_char="">
+        <valid initial="string.letters,string.digits"><add value="_" /> </valid>
+      </sanitizer>
+    </param>
+    <param name="input1"  type="data" format="tabular" label="Select an input tabular text file from your history. Rows represent samples; Columns are measured phenotypes"
+    multiple='False' optional="False" help="Tabular text data with samples as rows, phenotypes as columns with a header row of column identifiers" />
+    <param name="xvar_cols" label="Select columns containing numeric variables to use as predictor (x) variables" type="data_column" data_ref="input1" numerical="False"
+         multiple="True" use_header_names="True" force_select="True" />
+    <param name="force_xvar_cols" label="Select numeric columns containing variables ALWAYS included as predictors in cross validation" type="data_column" data_ref="input1" numerical="False"
+         multiple="True" use_header_names="True" force_select="False"/>
+    <conditional name="model">
+        <param name="fam" type="select" label="GLM Link function for models"
+             help="Binary dependant variables will automatically be set to Binomial no matter what this is set to">
+                <option value="gaussian" selected="true">Gaussian - continuous dependent (y)</option>
+                <option value="binomial">Binomial dependent variables</option>
+                <option value="poisson">Poisson (eg counts)</option>
+                <option value="cox">Cox models - require special setup for y variables - see below</option>
+        </param>
+        <when value="gaussian">
+            <param name="yvar_cols" label="Select numeric columns containing variables to use as the dependent (y) in elasticnet" type="data_column" data_ref="input1" numerical="False"
+             multiple="True" use_header_names="True"  help = "If multiple, each will be modelled against all the x variables and reported separately." force_select="True"/>
+            <param name="output_full" type="hidden" value='F' />
+            <param name="output_pred" type="hidden" value='F' />
+              <param name="cox_id" label="Select column containing a unique sample identifier"
+                 help = "Only really needed for output sample specific predicted values downstream."
+                 type="data_column" data_ref="input1" numerical="False" force_select="True"
+                 multiple="False" use_header_names="True" />
+      </when>
+        <when value="binomial">
+            <param name="yvar_cols" label="Select numeric columns containing variables to use as the dependent (y) in elasticnet" type="data_column" data_ref="input1" numerical="False"
+             multiple="True" use_header_names="True"  help = "If multiple, each will be modelled against all the x variables and reported separately." force_select="True"/>
+             <param name="output_full" type="hidden" value='F' />
+             <param name="output_pred" type="select" label="Create a tabular output with predicted values for each subject from the optimal model for (eg) NRI estimates" >
+                <option value="F" selected="true">No predicted value output file</option>
+                <option value="T">Create a predicted value output file</option>
+             </param>
+              <param name="cox_id" label="Select column containing a unique sample identifier"
+                 help = "Only really needed for output sample specific predicted values downstream."
+                 type="data_column" data_ref="input1" numerical="False" force_select="True"
+                 multiple="False" use_header_names="True" />
+             <param name="predict_at" type="hidden" value='' />
+
+        </when>
+        <when value="poisson">
+            <param name="yvar_cols" label="Select columns containing variables to use as the dependent (y) in elasticnet" type="data_column" data_ref="input1" numerical="True"
+             multiple="True" use_header_names="True"  help = "If multiple, each will be modelled against all the x variables and reported separately." force_select="True"/>
+             <param name="output_full" type="hidden" value='F' />
+             <param name="output_pred" type="hidden" value='F' />
+             <param name="predict_at" type="hidden" value='' />
+              <param name="cox_id" label="Select column containing a unique sample identifier"
+                 help = "Optional. Only really needed for output sample specific predicted values downstream. Free - enjoy"
+                 type="data_column" data_ref="input1" numerical="True" force_select="False"
+                 multiple="False" use_header_names="True" />
+        </when>
+        <when value="cox">
+             <param name="cox_time" label="Select column containing time under observation for Cox regression"
+                 type="data_column" data_ref="input1" numerical="True" force_select="True"
+                 multiple="False" use_header_names="True"  help = "This MUST contain a time period - eg continuous years or days to failure or right censoring"/>
+             <param name="cox_status" label="Select column containing status = 1 for outcome of interest at the end of the time under observation or 0 for right censoring"
+                 type="data_column" data_ref="input1" numerical="True" force_select="True"
+                 multiple="False" use_header_names="True"  help = "This MUST contain 1 for subjects who had an event at that time or 0 for a right censored observation"/>
+              <param name="cox_id" label="Select column containing a unique sample identifier"
+                 help = "Optional. Only really needed for output sample specific predicted values downstream. Free - enjoy"
+                 type="data_column" data_ref="input1" numerical="False" force_select="False"
+                 multiple="False" use_header_names="True" />
+             <param name="output_full" type="select" label="Create a tabular output with coefficients for all predictors" >
+                <option value="F" selected="true">No full model output file</option>
+                <option value="T">Create a full model output file</option>
+             </param>
+             <param name="output_pred" type="select" label="Create a tabular output with predicted values for each subject from the optimal model for (eg) NRI estimates" >
+                <option value="F" selected="true">No predicted value output file</option>
+                <option value="T">Create a predicted value output file</option>
+             </param>
+             <param name="predict_at"  type="text" value='' label="Provide a comma separated list of times to make a prediction for each subject"
+                 optional="True" help="Default (blank) will return predictions at 0%,25%,50%,75%,100% of the observed times which should be informative" />
+
+        </when>
+    </conditional>
+    <param name="optLambda" type="select" label="Value to use when reporting optimal model and coefficients" help="minLambda will have more predictors - 1SDLambda will be more parsimonious">
+            <option value="lambda.1se" selected="true">Lambda + 1 SE of min MSE or AUC (fewer coefficients - more false negatives)</option>
+            <option value="lambda.min">Lambda at min MSE or max AUC (more coefficients - more false positives)</option>
+    </param>
+    <param name="logxform_cols"  optional="True" label="Select numeric columns to be log transformed before use as predictors or dependent variables" type="data_column"
+        data_ref="input1" numerical="True" multiple="True" use_header_names="True" help = "The wisdom of doing this depends entirely on your predictors - eg can help diminish long-tailed outlier influence"
+        force_select="False"/>
+    <param name="do_standard" type="select" label="Standardise x vars"
+         help="If all measurements on same scale, may not be needed. Coefficients are always returned on the original scale.">
+            <option value="False" selected="true">No standardisation of predictors</option>l
+            <option value="True">Standardise predictors before model</option>
+    </param>
+    <param name="mdsplots" type="select" label="Generate MDS plots of samples in measurement space and measurements in sample space" >
+            <option value="False" selected="true">No MDS plots</option>l
+            <option value="True">Yes create MDS plots</option>
+    </param>
+    <param name="alpha" type="float" value="0.95" size="5" min="0.01" max="1.0" label="Alpha - see glmnet docs. 1 for pure lasso. 0.0 for pure ridge regression"
+     help="Default 0.95 allows lasso to cope better with expected predictor collinearity. Use (eg) 0.5 for hybrid regularised regression or (eg) 0.025 for ridge regression"/>
+    <param name="nfold" type="integer" value="10" size="5" label="Number of folds for internal cross validation"
+     help="Default of 10 is usually ok"/>
+  </inputs>
+  <outputs>
+    <data format="html" name="html_file" label="${title}.html"/>
+    <data format="tabular" name="model_file" label="${title}_modelres.xls"/>
+    <data format="tabular" name="output_full_file" label="${title}_full_cox_model.xls">
+        <filter>model['output_full'] == 'T'</filter>
+    </data>
+    <data format="tabular" name="output_pred_file" label="${title}_predicted_from_model.xls">
+        <filter>model['output_pred'] == 'T'</filter>
+    </data>
+  </outputs>
+ <tests>
+    <test>
+     <param name='input1' value='cox_test.xls' ftype='tabular' />
+     <param name='treatment_name' value='case' />
+     <param name='title' value='Cox glmnet test' />
+     <param name='nfold' value='10' />
+     <param name='logxform_cols' value='' />
+     <param name='alpha' value='0.95' />
+     <param name='do_standard' value="True" />
+     <param name='cox_time' value='1' />
+     <param name='cox_status' value='2' />
+     <param name='cox_id' value='1' />
+     <param name='predict_at' value='' />
+     <param name='fam' value='cox' />
+     <param name='yvar_cols' value='' />
+     <param name='xvar_cols' value='3,4,5' />
+     <param name='force_xvar_cols' value='3' />
+     <param name='output_full' value='F' />
+     <param name='output_pred' value='F' />
+     <output name='model_file' file='coxlassotest_modelres.xls'>
+          <assert_contents>
+                <has_text text="rhubarb" />
+                <has_text text="TRUE" />
+                <!-- &#009; is XML escape code for tab -->
+                <!-- has_line line="regulator&#009;partial_likelihood&#009;forced_in&#009;glmnet_model&#009;best_lambda" / -->
+                <has_line line="regulator&#009;partial_likelihood&#009;forced_in&#009;glmnet_model&#009;best_lambda&#009;lambdaChoice&#009;alpha" />
+                <has_n_columns n="7" />
+           </assert_contents>
+     </output>
+     <output name='html_file' file='coxlassotest.html'  compare='diff' lines_diff='16' />
+    </test>
+</tests>
+<help>
+
+**Before you start**
+
+Please read the glmnet documentation @ glmnet_
+
+This Galaxy wrapper merely exposes that code and the glmnet_ documentation is essential reading
+before getting useful results here.
+
+**What it does**
+
+From documentation at glmnet_ ::
+
+ Glmnet is a package that fits a generalized linear model via penalized maximum likelihood.
+ The regularization path is computed for the lasso or elasticnet penalty at a grid of values for the regularization parameter lambda.
+ The algorithm is extremely fast, and can exploit sparsity in the input matrix x.
+ It fits linear, logistic and multinomial, poisson, and Cox regression models.
+ A variety of predictions can be made from the fitted models.
+
+Internal cross validation is used to optimise the choice of lambda based on CV AUC for logistic (binomial outcome) models, or CV mse for gaussian.
+
+**Warning about the tyrany of dimensionality**
+
+Yes, this package will select 'optimal' models even when you (optimistically) supply more predictors than you have cases.
+The model returned is unlikely to represent the only informative regularisation path through your data - if you run repeatedly with
+exactly the same settings, you will probably see many different models being selected.
+This is not a software bug - the real problem is that you just don't have enough information in your data.
+
+Sufficiently big jobs will take a while (eg each lasso regression with 20k features on 1k samples takes about 2-3 minutes on our aged cluster)
+
+**Input**
+
+Assuming you have more measurements than samples, you supply data as a tabular text file where each row is a sample and columns
+are variables. You specify which columns are dependent (predictors) and which are observations for each sample. Each of multiple
+dependent variable columns will be run and reported independently. Predictors can be forced in to the model.
+
+**Output**
+
+For each selected dependent regression variable, a brief report of the model coefficients predicted at the
+'optimal' nfold CV value of lambda.
+
+**Predicted event probabilities for Cox and Logistic models**
+
+If you want to compare (eg) two competing clinical predictions, there's a companion generic NRI tool
+for predicted event probabilities. Estimates dozens of measures of improvement in prediction. Currently only works for identical id subjects
+but can probably be extended to independent sample predictions.
+
+Given a model, we can generate a predicted p (for status 1) in binomial or cox frameworks so models can be evaluated in terms of NRI.
+Of course, estimates are likely substantially inflated over 'real world' performance by being estimated from the same sample - but you probably
+already knew that since you were smart enough to reach this far down into the on screen help. The author salutes you, intrepid reader!
+
+It may seem an odd thing to do, but we can predict p for an event for each subject from our original data, given a parsimonious model. Doing
+this for two separate models (eg, forcing in an additional known explanatory measurement to the new model) allows comparison of the two models
+predicted status for each subject, or the same model in independent populations to see how badly it does
+
+**Attributions**
+
+glmnet_ is the R package exposed by this Galaxy tool.
+
+Galaxy_ (that's what you are using right now!) for gluing everything together
+
+Otherwise, all code and documentation comprising this tool was written by Ross Lazarus and is
+licensed to you under the LGPL_ like other rgenetics artefacts
+
+.. _LGPL: http://www.gnu.org/copyleft/lesser.html
+.. _glmnet: http://web.stanford.edu/~hastie/glmnet/glmnet_alpha.html
+.. _Galaxy: http://getgalaxy.org
+</help>
+
 <citations>
     <citation type="bibtex">
 @Article{Friedman2010, title = {Regularization Paths for Generalized Linear Models via Coordinate Descent},
@@ -917,6 +908,3 @@
     </citation>
 </citations>
 </tool>
-
-
-