diff operation_filter.py @ 2:7b226a8a6722 draft

planemo upload commit 33927a87ba2eee9bf0ecdd376a66241b17b3d734
author devteam
date Tue, 13 Oct 2015 12:50:01 -0400
parents 6b81ee6c18d3
children ad25eb2c422d
line wrap: on
line diff
--- a/operation_filter.py	Thu Apr 10 13:47:13 2014 -0400
+++ b/operation_filter.py	Tue Oct 13 12:50:01 2015 -0400
@@ -1,7 +1,4 @@
 # runs after the job (and after the default post-filter)
-import os
-from galaxy import eggs
-from galaxy import jobs
 from galaxy.tools.parameters import DataToolParameter
 
 from galaxy.jobs.handler import JOB_ERROR
@@ -12,11 +9,6 @@
 except:
     from sets import Set as set
 
-#def exec_before_process(app, inp_data, out_data, param_dict, tool=None):
-#    """Sets the name of the data"""
-#    dbkeys = sets.Set( [data.dbkey for data in inp_data.values() ] ) 
-#    if len(dbkeys) != 1:
-#        raise Exception, '<p><font color="yellow">Both Queries must be from the same genome build</font></p>'
 
 def validate_input( trans, error_map, param_values, page_param_map ):
     dbkeys = set()
@@ -25,7 +17,7 @@
     for name, param in page_param_map.iteritems():
         if isinstance( param, DataToolParameter ):
             # for each dataset parameter
-            if param_values.get(name, None) != None:
+            if param_values.get(name, None) is not None:
                 dbkeys.add( param_values[name].dbkey )
                 data_params += 1
                 # check meta data
@@ -34,17 +26,15 @@
                     if isinstance( param.datatype, trans.app.datatypes_registry.get_datatype_by_extension( 'gff' ).__class__ ):
                         # TODO: currently cannot validate GFF inputs b/c they are not derived from interval.
                         pass
-                    else: # Validate interval datatype.
-                        startCol = int( param.metadata.startCol )
-                        endCol = int( param.metadata.endCol )
-                        chromCol = int( param.metadata.chromCol )
+                    else:  # Validate interval datatype.
+                        int( param.metadata.startCol )
+                        int( param.metadata.endCol )
+                        int( param.metadata.chromCol )
                         if param.metadata.strandCol is not None:
-                            strandCol = int ( param.metadata.strandCol )
-                        else:
-                            strandCol = 0
+                            int( param.metadata.strandCol )
                 except:
                     error_msg = "The attributes of this dataset are not properly set. " + \
-                    "Click the pencil icon in the history item to set the chrom, start, end and strand columns."
+                        "Click the pencil icon in the history item to set the chrom, start, end and strand columns."
                     error_map[name] = error_msg
             data_param_names.add( name )
     if len( dbkeys ) > 1:
@@ -55,38 +45,33 @@
         for name in data_param_names:
             error_map[name] = "A dataset of the appropriate type is required"
 
+
 # Commented out by INS, 5/30/2007.  What is the PURPOSE of this?
 def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
     """Verify the output data after each run"""
-    items = out_data.items()
-
-    for name, data in items:
+    for data in out_data.values():
         try:
             if stderr and len( stderr ) > 0:
                 raise Exception( stderr )
 
-        except Exception, exc:
+        except Exception:
             data.blurb = JOB_ERROR
             data.state = JOB_ERROR
 
-## def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
-##     pass
-
 
 def exec_after_merge(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
     exec_after_process(
         app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr)
 
     # strip strand column if clusters were merged
-    items = out_data.items()
-    for name, data in items:
-        if param_dict['returntype'] == True:
+    for data in out_data.values():
+        if param_dict['returntype'] is True:
             data.metadata.chromCol = 1
             data.metadata.startCol = 2
             data.metadata.endCol = 3
         # merge always clobbers strand
         data.metadata.strandCol = None
-            
+
 
 def exec_after_cluster(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
     exec_after_process(
@@ -94,6 +79,5 @@
 
     # strip strand column if clusters were merged
     if param_dict["returntype"] == '1':
-        items = out_data.items()
-        for name, data in items:
+        for data in out_data.values():
             data.metadata.strandCol = None