Mercurial > repos > refinery-platform > refinery_test
changeset 1:b87749d7a24c draft default tip
planemo upload commit 4fb0a789956149e5a58f4e370d7fe14f4e8bcf79
| author | refinery-platform |
|---|---|
| date | Thu, 22 Feb 2018 14:14:03 -0500 |
| parents | 5b28174e774d |
| children | |
| files | refinery_file_splitter.py refinery_test_1-N.xml refinery_test_N-1.xml test-data/concat_output.txt test-data/file1.txt test-data/file2.txt test-data/file3.txt |
| diffstat | 7 files changed, 219 insertions(+), 0 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/refinery_file_splitter.py Thu Feb 22 14:14:03 2018 -0500 @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +''' +Test tool for splitting output files from Refinery Test Tools + +@author: Scott Ouellette + +Input: one text file +Output: N output files based on the amount of input files that +got concatenated from Refinery test tool runs + +Requires Python v2.7 + +''' + +import re +import argparse + + +def main(args): + create_many_files(args.input_file) + + +def create_many_files(input_file): + # Split file's content when we see data that wasn't added by test tool runs + file_content = re.split("Output.*|Input.*", input_file.read()) + + sanitized_data = [ + data.lstrip("\n") for data in file_content if data.rstrip("\n")] + + # Create N ouput files based on the number of inputs run through test tools + for num, file_content in enumerate(sanitized_data): + open("Output file {}.txt".format(num + 1), 'w').write(file_content) + + +if __name__ == '__main__': + version = "%(prog)s 0.1" + description = "Test tool for running workflows on Galaxy platform from Refinery" + parser = argparse.ArgumentParser(description=description, version=version) + + parser.add_argument('-i', '--in-file', dest='input_file', + type=file, metavar='INPUT_FILE', required=True, + help='name of the input file') + + # check argument values for errors + try: + args = parser.parse_args() + except IOError as e: + parser.error(e) + + main(args)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/refinery_test_1-N.xml Thu Feb 22 14:14:03 2018 -0500 @@ -0,0 +1,54 @@ +<?xml version='1.0' encoding='utf-8'?> + +<tool id="refinery_test_1-N" name="Refinery test tool 1-N" version="0.1"> + <description>for testing Galaxy workflow execution from Refinery</description> + <command> + <![CDATA[ + ## Run the splitter tool upon a single input producing N outputs based on how many concatenated files our input was created from. + $__tool_directory__/refinery_file_splitter.py -i $input_file + ]]> + </command> + <inputs> + <param name="input_file" format="txt" type="data" label="Input file"/> + </inputs> + <outputs> + <data format="txt" name="report"> + <discover_datasets pattern="__designation_and_ext__" visible="true"/> + </data> + </outputs> + <tests> + <test> + <param name="input_file" value="concat_output.txt"/> + <discovered_dataset designation="Output file 1" ftype="txt"> + <assert_contents> + <has_line line="Contents of File 1:"/> + </assert_contents> + </discovered_dataset> + <discovered_dataset designation="Output file 2" ftype="txt"> + <assert_contents> + <has_line line="Contents of File 2:"/> + </assert_contents> + </discovered_dataset> + <discovered_dataset designation="Output file 3" ftype="txt"> + <assert_contents> + <has_line line="Contents of File 3:"/> + </assert_contents> + </discovered_dataset> + </test> + </tests> + <help> + .. class:: infomark + + **Purpose** + + To test Galaxy workflow execution and monitoring from Refinery. + + ----- + + .. class:: infomark + + **Inputs and outputs** + + This wrapper will accept one input file with the concatenated data of N input files run through refinery test tools and will partition said data into N output files containing the afforementioned N input files original contents + </help> +</tool>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/refinery_test_N-1.xml Thu Feb 22 14:14:03 2018 -0500 @@ -0,0 +1,73 @@ +<?xml version='1.0' encoding='utf-8'?> + +<tool id="refinery_test_N-1" name="Refinery test tool N-1" version="0.1"> + <description>for testing Galaxy workflow execution from Refinery</description> + <command> + <![CDATA[ + ## Argparse expects many values for a single arg to be in the form of: -i INUPT INPUT1 INPUT2 ... + #set $inputs = " ".join([$input_file.__str__ for $input_file in $input_col]) + + ## Run the test tool with the N inputs from our Dataset Collection + $__tool_directory__/refinery_test_tool.py -i $inputs -o $output_file -e $exit_code -p $p_fail -s $sleep_time $stdout $stderr $empty_outfile + ]]> + </command> + <inputs> + <param name="input_col" type="data_collection" collection_type="list" label="Input file"/> + <param name="sleep_time" type="integer" label="Sleep (seconds)" value="0" min="0"/> + <param name="empty_outfile" type="boolean" label="Produce empty output file" truevalue="--empty_outfile" falsevalue=""/> + <param name="p_fail" type="float" label="Probability of failure [0.0, 1.0]" value="0.0" min="0.0" max="1.0"/> + <param name="stdout" type="boolean" label="Write to standard out" truevalue="--stdout" falsevalue=""/> + <param name="stderr" type="boolean" label="Write to standard error" truevalue="--stderr" falsevalue=""/> + <param name="exit_code" type="integer" label="Exit code [0, 255]" value="0" min="0" max="255"/> + </inputs> + <outputs> + <data format="txt" name="output_file" label="Refinery Test Tool N-1 Output" /> + </outputs> + <stdio> + <exit_code range="1:" level="fatal" /> + <regex match=".+" source="stdout" level="fatal" description="Tool produced output to standard out" /> + <regex match=".+" source="stderr" level="fatal" description="Tool produced output to standard error" /> + </stdio> + <tests> + <test> + <param name="input_col"> + <collection type="list"> + <element name="1" value="file1.txt"/> + <element name="2" value="file2.txt"/> + <element name="3" value="file3.txt"/> + </collection> + </param> + <param name="sleep_time" type="integer" label="Sleep (seconds)" value="0" min="0"/> + <param name="empty_outfile" type="boolean" label="Produce empty output file" truevalue="--empty_outfile" falsevalue=""/> + <param name="p_fail" type="float" label="Probability of failure [0.0, 1.0]" value="0.0" min="0.0" max="1.0"/> + <param name="stdout" type="boolean" label="Write to standard out" truevalue="--stdout" falsevalue=""/> + <param name="stderr" type="boolean" label="Write to standard error" truevalue="--stderr" falsevalue=""/> + <param name="exit_code" type="integer" label="Exit code [0, 255]" value="0" min="0" max="255"/> + <output name="output_file"> + <assert_contents> + <has_text text="Contents of File 1" /> + <has_text text="Contents of File 2" /> + <has_text text="Contents of File 3" /> + </assert_contents> + </output> + </test> + </tests> + <help> + .. class:: infomark + + **Purpose** + + To test Galaxy workflow execution and monitoring from Refinery. + + ----- + + .. class:: infomark + + **Inputs and outputs** + + This wrapper will accept A Dataset collection list as input and produce a single output file with the concatenated, annotated content of each element in the Dataset Collection. + + *Note:* You must set the "Probability of failure" parameter to a non-zero value + for "Write to standard out", "Write to standard error" or "Exit code" to take effect. + </help> +</tool>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test-data/concat_output.txt Thu Feb 22 14:14:03 2018 -0500 @@ -0,0 +1,29 @@ +Output file name: "<base_path>"/galaxy/database/files/009/dataset_9980.dat + +Input file name: "<base_path>"/galaxy/database/files/009/dataset_9977.dat + +Output file name: "<base_path>"/galaxy/database/files/009/dataset_9977.dat + +Input file name: "<base_path>"/galaxy/database/files/009/dataset_9808.dat + +Contents of File 3: +TEST TEST TEST TEST +TEST TEST TEST TEST +Input file name: "<base_path>"/galaxy/database/files/009/dataset_9978.dat + +Output file name: "<base_path>"/galaxy/database/files/009/dataset_9978.dat + +Input file name: "<base_path>"/galaxy/database/files/009/dataset_9807.dat + +Contents of File 2: +TEST TEST TEST TEST +TEST TEST TEST TEST +Input file name: "<base_path>"/galaxy/database/files/009/dataset_9979.dat + +Output file name: "<base_path>"/galaxy/database/files/009/dataset_9979.dat + +Input file name: "<base_path>"/galaxy/database/files/009/dataset_9806.dat + +Contents of File 1: +TEST TEST TEST TEST +TEST TEST TEST TEST
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test-data/file1.txt Thu Feb 22 14:14:03 2018 -0500 @@ -0,0 +1,4 @@ +Contents of File 1: +TEST TEST TEST TEST +TEST TEST TEST TEST +
