9
|
1 # -*- coding: utf-8 -*-
|
|
2
|
|
3 from galaxy.datatypes import data
|
|
4 from galaxy.datatypes.sniff import get_headers, get_test_fname
|
|
5 from galaxy.datatypes.data import get_file_peek
|
|
6 import subprocess
|
|
7 import os
|
|
8
|
|
9 from galaxy.datatypes.metadata import MetadataElement
|
|
10 from galaxy.datatypes import metadata
|
|
11
|
|
12 def count_special_lines( word, filename, invert = False ):
|
|
13 """
|
|
14 searching for special 'words' using the grep tool
|
|
15 grep is used to speed up the searching and counting
|
|
16 The number of hits is returned.
|
|
17 """
|
|
18 try:
|
|
19 cmd = ["grep", "-c"]
|
|
20 if invert:
|
|
21 cmd.append('-v')
|
|
22 cmd.extend([word, filename])
|
|
23 out = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
|
24 return int(out.communicate()[0].split()[0])
|
|
25 except:
|
|
26 pass
|
|
27 return 0
|
|
28
|
|
29 def count_lines( filename, non_empty = False):
|
|
30 """
|
|
31 counting the number of lines from the 'filename' file
|
|
32 """
|
|
33 try:
|
|
34 if non_empty:
|
|
35 out = subprocess.Popen(['grep', '-cve', '^\s*$', filename], stdout=subprocess.PIPE)
|
|
36 else:
|
|
37 out = subprocess.Popen(['wc', '-l', filename], stdout=subprocess.PIPE)
|
|
38 return int(out.communicate()[0].split()[0])
|
|
39 except:
|
|
40 pass
|
|
41 return 0
|
|
42
|
|
43
|
|
44 class Stockholm_1_0( data.text ):
|
|
45 file_ext = "stockholm"
|
|
46
|
|
47 MetadataElement( name="number_of_alignments", default=0, desc="Number of multiple alignments", readonly=True, visible=True, optional=True, no_value=0 )
|
|
48
|
|
49 def set_peek( self, dataset, is_multi_byte=False ):
|
|
50 if not dataset.dataset.purged:
|
|
51 dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
52 if (dataset.metadata.number_of_models == 1):
|
|
53 dataset.blurb = "1 alignment"
|
|
54 else:
|
|
55 dataset.blurb = "%s alignments" % dataset.metadata.number_of_models
|
|
56 dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
57 else:
|
|
58 dataset.peek = 'file does not exist'
|
|
59 dataset.blurb = 'file purged from disc'
|
|
60
|
|
61 def sniff( self, filename ):
|
|
62 if count_special_lines('^#[[:space:]+]STOCKHOLM[[:space:]+]1.0', filename) > 0:
|
|
63 return True
|
|
64 else:
|
|
65 return False
|
|
66
|
|
67 def set_meta( self, dataset, **kwd ):
|
|
68 """
|
|
69
|
|
70 Set the number of models in dataset.
|
|
71 """
|
|
72 dataset.metadata.number_of_models = count_special_lines('^#[[:space:]+]STOCKHOLM[[:space:]+]1.0', dataset.file_name)
|
|
73
|
|
74 def split( cls, input_datasets, subdir_generator_function, split_params):
|
|
75 """
|
|
76
|
|
77 Split the input files by model records.
|
|
78 """
|
|
79 if split_params is None:
|
|
80 return None
|
|
81
|
|
82 if len(input_datasets) > 1:
|
|
83 raise Exception("STOCKHOLM-file splitting does not support multiple files")
|
|
84 input_files = [ds.file_name for ds in input_datasets]
|
|
85
|
|
86 chunk_size = None
|
|
87 if split_params['split_mode'] == 'number_of_parts':
|
|
88 raise Exception('Split mode "%s" is currently not implemented for STOCKHOLM-files.' % split_params['split_mode'])
|
|
89 elif split_params['split_mode'] == 'to_size':
|
|
90 chunk_size = int(split_params['split_size'])
|
|
91 else:
|
|
92 raise Exception('Unsupported split mode %s' % split_params['split_mode'])
|
|
93
|
|
94 def _read_stockholm_records( filename ):
|
|
95 lines = []
|
|
96 with open(filename) as handle:
|
|
97 for line in handle:
|
|
98 lines.append( line )
|
|
99 if line.strip() == '//':
|
|
100 yield lines
|
|
101 lines = []
|
|
102
|
|
103 def _write_part_stockholm_file( accumulated_lines ):
|
|
104 part_dir = subdir_generator_function()
|
|
105 part_path = os.path.join( part_dir, os.path.basename( input_files[0] ) )
|
|
106 part_file = open( part_path, 'w' )
|
|
107 part_file.writelines( accumulated_lines )
|
|
108 part_file.close()
|
|
109
|
|
110 try:
|
|
111
|
|
112 stockholm_records = _read_stockholm_records( input_files[0] )
|
|
113 stockholm_lines_accumulated = []
|
|
114 for counter, stockholm_record in enumerate( stockholm_records, start = 1):
|
|
115 stockholm_lines_accumulated.extend( stockholm_record )
|
|
116 if counter % chunk_size == 0:
|
|
117 _write_part_stockholm_file( stockholm_lines_accumulated )
|
|
118 stockholm_lines_accumulated = []
|
|
119 if stockholm_lines_accumulated:
|
|
120 _write_part_stockholm_file( stockholm_lines_accumulated )
|
|
121 except Exception, e:
|
|
122 log.error('Unable to split files: %s' % str(e))
|
|
123 raise
|
|
124 split = classmethod(split)
|
|
125
|
|
126
|
|
127 class Infernal_CM_1_1( data.text ):
|
|
128 file_ext = "cm"
|
|
129
|
|
130 MetadataElement( name="number_of_models", default=0, desc="Number of covariance models", readonly=True, visible=True, optional=True, no_value=0 )
|
|
131
|
|
132 def set_peek( self, dataset, is_multi_byte=False ):
|
|
133 if not dataset.dataset.purged:
|
|
134 dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
135 if (dataset.metadata.number_of_models == 1):
|
|
136 dataset.blurb = "1 model"
|
|
137 else:
|
|
138 dataset.blurb = "%s models" % dataset.metadata.number_of_models
|
|
139 dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
140 else:
|
|
141 dataset.peek = 'file does not exist'
|
|
142 dataset.blurb = 'file purged from disc'
|
|
143
|
|
144 def sniff( self, filename ):
|
|
145 if count_special_lines("^INFERNAL1/a", filename) > 0:
|
|
146 return True
|
|
147 else:
|
|
148 return False
|
|
149
|
|
150 def set_meta( self, dataset, **kwd ):
|
|
151 """
|
|
152 Set the number of models in dataset.
|
|
153 """
|
|
154 dataset.metadata.number_of_models = count_special_lines("^INFERNAL1/a", dataset.file_name)
|
|
155
|
|
156 def split( cls, input_datasets, subdir_generator_function, split_params):
|
|
157 """
|
|
158 Split the input files by model records.
|
|
159 """
|
|
160 if split_params is None:
|
|
161 return None
|
|
162
|
|
163 if len(input_datasets) > 1:
|
|
164 raise Exception("CM-file splitting does not support multiple files")
|
|
165 input_files = [ds.file_name for ds in input_datasets]
|
|
166
|
|
167 chunk_size = None
|
|
168 if split_params['split_mode'] == 'number_of_parts':
|
|
169 raise Exception('Split mode "%s" is currently not implemented for CM-files.' % split_params['split_mode'])
|
|
170 elif split_params['split_mode'] == 'to_size':
|
|
171 chunk_size = int(split_params['split_size'])
|
|
172 else:
|
|
173 raise Exception('Unsupported split mode %s' % split_params['split_mode'])
|
|
174
|
|
175 def _read_cm_records( filename ):
|
|
176 lines = []
|
|
177 with open(filename) as handle:
|
|
178 for line in handle:
|
|
179 if line.startswith("INFERNAL1/a") and lines:
|
|
180 yield lines
|
|
181 lines = [line]
|
|
182 else:
|
|
183 lines.append( line )
|
|
184 yield lines
|
|
185
|
|
186 def _write_part_cm_file( accumulated_lines ):
|
|
187 part_dir = subdir_generator_function()
|
|
188 part_path = os.path.join( part_dir, os.path.basename( input_files[0] ) )
|
|
189 part_file = open( part_path, 'w' )
|
|
190 part_file.writelines( accumulated_lines )
|
|
191 part_file.close()
|
|
192
|
|
193 try:
|
|
194 cm_records = _read_cm_records( input_files[0] )
|
|
195 cm_lines_accumulated = []
|
|
196 for counter, cm_record in enumerate( cm_records, start = 1):
|
|
197 cm_lines_accumulated.extend( cm_record )
|
|
198 if counter % chunk_size == 0:
|
|
199 _write_part_cm_file( cm_lines_accumulated )
|
|
200 cm_lines_accumulated = []
|
|
201 if cm_lines_accumulated:
|
|
202 _write_part_cm_file( cm_lines_accumulated )
|
|
203 except Exception, e:
|
|
204 log.error('Unable to split files: %s' % str(e))
|
|
205 raise
|
|
206 split = classmethod(split)
|
|
207
|