0
|
1 # -*- coding: utf-8 -*-
|
|
2
|
|
3 from galaxy.datatypes import data
|
|
4 import logging
|
|
5 from galaxy.datatypes.sniff import get_headers, get_test_fname
|
|
6 from galaxy.datatypes.data import get_file_peek
|
|
7 from galaxy.datatypes.tabular import Tabular
|
|
8 from galaxy.datatypes.binary import Binary
|
|
9 import subprocess
|
|
10 import os
|
|
11 #import pybel
|
|
12 #import openbabel
|
|
13 #openbabel.obErrorLog.StopLogging()
|
|
14
|
|
15 from galaxy.datatypes.metadata import MetadataElement
|
|
16 from galaxy.datatypes import metadata
|
|
17
|
|
18 log = logging.getLogger(__name__)
|
|
19
|
|
20 def count_special_lines( word, filename, invert = False ):
|
|
21 """
|
|
22 searching for special 'words' using the grep tool
|
|
23 grep is used to speed up the searching and counting
|
|
24 The number of hits is returned.
|
|
25 """
|
|
26 try:
|
|
27 cmd = ["grep", "-c"]
|
|
28 if invert:
|
|
29 cmd.append('-v')
|
|
30 cmd.extend([word, filename])
|
|
31 out = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
|
32 return int(out.communicate()[0].split()[0])
|
|
33 except:
|
|
34 pass
|
|
35 return 0
|
|
36
|
|
37 def count_lines( filename, non_empty = False):
|
|
38 """
|
|
39 counting the number of lines from the 'filename' file
|
|
40 """
|
|
41 try:
|
|
42 if non_empty:
|
|
43 out = subprocess.Popen(['grep', '-cve', '^\s*$', filename], stdout=subprocess.PIPE)
|
|
44 else:
|
|
45 out = subprocess.Popen(['wc', '-l', filename], stdout=subprocess.PIPE)
|
|
46 return int(out.communicate()[0].split()[0])
|
|
47 except:
|
|
48 pass
|
|
49 return 0
|
|
50
|
|
51
|
|
52 class GenericMolFile( data.Text ):
|
|
53 """
|
|
54 abstract class for most of the molecule files
|
|
55 """
|
|
56 MetadataElement( name="number_of_molecules", default=0, desc="Number of molecules", readonly=True, visible=True, optional=True, no_value=0 )
|
|
57
|
|
58 def set_peek( self, dataset, is_multi_byte=False ):
|
|
59 if not dataset.dataset.purged:
|
|
60 dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
61 if (dataset.metadata.number_of_molecules == 1):
|
|
62 dataset.blurb = "1 molecule"
|
|
63 else:
|
|
64 dataset.blurb = "%s molecules" % dataset.metadata.number_of_molecules
|
|
65 dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
66 else:
|
|
67 dataset.peek = 'file does not exist'
|
|
68 dataset.blurb = 'file purged from disk'
|
|
69
|
|
70 def get_mime(self):
|
|
71 return 'text/plain'
|
|
72
|
2
|
73 class MOL( GenericMolFile ):
|
|
74 file_ext = "mol"
|
|
75 def sniff( self, filename ):
|
|
76 if count_special_lines("^M\s*END", filename) = 1:
|
|
77 return True
|
|
78 else:
|
|
79 return False
|
|
80
|
|
81 def set_meta( self, dataset, **kwd ):
|
|
82 """
|
|
83 Set the number molecules, in the case of MOL its always one.
|
|
84 """
|
|
85 dataset.metadata.number_of_molecules = 1
|
0
|
86
|
|
87
|
|
88 class SDF( GenericMolFile ):
|
|
89 file_ext = "sdf"
|
|
90 def sniff( self, filename ):
|
|
91 if count_special_lines("^\$\$\$\$", filename) > 0:
|
|
92 return True
|
|
93 else:
|
|
94 return False
|
|
95
|
|
96 def set_meta( self, dataset, **kwd ):
|
|
97 """
|
2
|
98 Set the number of molecules in dataset.
|
0
|
99 """
|
2
|
100 dataset.metadata.number_of_molecules = count_special_lines("^\$\$\$\$", dataset.file_name)
|
0
|
101
|
|
102 def split( cls, input_datasets, subdir_generator_function, split_params):
|
|
103 """
|
|
104 Split the input files by molecule records.
|
|
105 """
|
|
106 if split_params is None:
|
|
107 return None
|
|
108
|
|
109 if len(input_datasets) > 1:
|
|
110 raise Exception("SD-file splitting does not support multiple files")
|
|
111 input_files = [ds.file_name for ds in input_datasets]
|
|
112
|
|
113 chunk_size = None
|
|
114 if split_params['split_mode'] == 'number_of_parts':
|
|
115 raise Exception('Split mode "%s" is currently not implemented for SD-files.' % split_params['split_mode'])
|
|
116 elif split_params['split_mode'] == 'to_size':
|
|
117 chunk_size = int(split_params['split_size'])
|
|
118 else:
|
|
119 raise Exception('Unsupported split mode %s' % split_params['split_mode'])
|
|
120
|
|
121 def _read_sdf_records( filename ):
|
|
122 lines = []
|
|
123 with open(filename) as handle:
|
|
124 for line in handle:
|
|
125 lines.append( line )
|
|
126 if line.startswith("$$$$"):
|
|
127 yield lines
|
|
128 lines = []
|
|
129
|
|
130 def _write_part_sdf_file( accumulated_lines ):
|
|
131 part_dir = subdir_generator_function()
|
|
132 part_path = os.path.join(part_dir, os.path.basename(input_files[0]))
|
|
133 part_file = open(part_path, 'w')
|
|
134 part_file.writelines( accumulated_lines )
|
|
135 part_file.close()
|
|
136
|
|
137 try:
|
|
138 sdf_records = _read_sdf_records( input_files[0] )
|
|
139 sdf_lines_accumulated = []
|
|
140 for counter, sdf_record in enumerate( sdf_records, start = 1):
|
|
141 sdf_lines_accumulated.extend( sdf_record )
|
|
142 if counter % chunk_size == 0:
|
|
143 _write_part_sdf_file( sdf_lines_accumulated )
|
|
144 sdf_lines_accumulated = []
|
|
145 if sdf_lines_accumulated:
|
|
146 _write_part_sdf_file( sdf_lines_accumulated )
|
|
147 except Exception, e:
|
|
148 log.error('Unable to split files: %s' % str(e))
|
|
149 raise
|
|
150 split = classmethod(split)
|
|
151
|
|
152
|
|
153 class MOL2( GenericMolFile ):
|
|
154 file_ext = "mol2"
|
|
155 def sniff( self, filename ):
|
|
156 if count_special_lines("@\<TRIPOS\>MOLECULE", filename) > 0:
|
|
157 return True
|
|
158 else:
|
|
159 return False
|
|
160
|
|
161 def set_meta( self, dataset, **kwd ):
|
|
162 """
|
|
163 Set the number of lines of data in dataset.
|
|
164 """
|
|
165 dataset.metadata.number_of_molecules = count_special_lines("@<TRIPOS>MOLECULE", dataset.file_name)#self.count_data_lines(dataset)
|
|
166
|
|
167 def split( cls, input_datasets, subdir_generator_function, split_params):
|
|
168 """
|
|
169 Split the input files by molecule records.
|
|
170 """
|
|
171 if split_params is None:
|
|
172 return None
|
|
173
|
|
174 if len(input_datasets) > 1:
|
|
175 raise Exception("MOL2-file splitting does not support multiple files")
|
|
176 input_files = [ds.file_name for ds in input_datasets]
|
|
177
|
|
178 chunk_size = None
|
|
179 if split_params['split_mode'] == 'number_of_parts':
|
|
180 raise Exception('Split mode "%s" is currently not implemented for MOL2-files.' % split_params['split_mode'])
|
|
181 elif split_params['split_mode'] == 'to_size':
|
|
182 chunk_size = int(split_params['split_size'])
|
|
183 else:
|
|
184 raise Exception('Unsupported split mode %s' % split_params['split_mode'])
|
|
185
|
|
186 def _read_sdf_records( filename ):
|
|
187 lines = []
|
|
188 start = True
|
|
189 with open(filename) as handle:
|
|
190 for line in handle:
|
|
191 if line.startswith("@<TRIPOS>MOLECULE"):
|
|
192 if start:
|
|
193 start = False
|
|
194 else:
|
|
195 yield lines
|
|
196 lines = []
|
|
197 lines.append( line )
|
|
198
|
|
199 def _write_part_mol2_file( accumulated_lines ):
|
|
200 part_dir = subdir_generator_function()
|
|
201 part_path = os.path.join(part_dir, os.path.basename(input_files[0]))
|
|
202 part_file = open(part_path, 'w')
|
|
203 part_file.writelines( accumulated_lines )
|
|
204 part_file.close()
|
|
205
|
|
206 try:
|
|
207 sdf_records = _read_sdf_records( input_files[0] )
|
|
208 sdf_lines_accumulated = []
|
|
209 for counter, sdf_record in enumerate( sdf_records, start = 1):
|
|
210 sdf_lines_accumulated.extend( sdf_record )
|
|
211 if counter % chunk_size == 0:
|
|
212 _write_part_mol2_file( sdf_lines_accumulated )
|
|
213 sdf_lines_accumulated = []
|
|
214 if sdf_lines_accumulated:
|
|
215 _write_part_mol2_file( sdf_lines_accumulated )
|
|
216 except Exception, e:
|
|
217 log.error('Unable to split files: %s' % str(e))
|
|
218 raise
|
|
219 split = classmethod(split)
|
|
220
|
|
221
|
|
222
|
|
223 class FPS( GenericMolFile ):
|
|
224 """
|
|
225 chemfp fingerprint file: http://code.google.com/p/chem-fingerprints/wiki/FPS
|
|
226 """
|
|
227 file_ext = "fps"
|
|
228 def sniff( self, filename ):
|
|
229 header = get_headers( filename, sep='\t', count=1 )
|
|
230 if header[0][0].strip() == '#FPS1':
|
|
231 return True
|
|
232 else:
|
|
233 return False
|
|
234
|
|
235 def set_meta( self, dataset, **kwd ):
|
|
236 """
|
|
237 Set the number of lines of data in dataset.
|
|
238 """
|
|
239 dataset.metadata.number_of_molecules = count_special_lines('^#', dataset.file_name, invert = True)#self.count_data_lines(dataset)
|
|
240
|
|
241
|
|
242 def split( cls, input_datasets, subdir_generator_function, split_params):
|
|
243 """
|
|
244 Split the input files by fingerprint records.
|
|
245 """
|
|
246 if split_params is None:
|
|
247 return None
|
|
248
|
|
249 if len(input_datasets) > 1:
|
|
250 raise Exception("FPS-file splitting does not support multiple files")
|
|
251 input_files = [ds.file_name for ds in input_datasets]
|
|
252
|
|
253 chunk_size = None
|
|
254 if split_params['split_mode'] == 'number_of_parts':
|
|
255 raise Exception('Split mode "%s" is currently not implemented for MOL2-files.' % split_params['split_mode'])
|
|
256 elif split_params['split_mode'] == 'to_size':
|
|
257 chunk_size = int(split_params['split_size'])
|
|
258 else:
|
|
259 raise Exception('Unsupported split mode %s' % split_params['split_mode'])
|
|
260
|
|
261
|
|
262 def _write_part_fingerprint_file( accumulated_lines ):
|
|
263 part_dir = subdir_generator_function()
|
|
264 part_path = os.path.join(part_dir, os.path.basename(input_files[0]))
|
|
265 part_file = open(part_path, 'w')
|
|
266 part_file.writelines( accumulated_lines )
|
|
267 part_file.close()
|
|
268
|
|
269 try:
|
|
270 header_lines = []
|
|
271 lines_accumulated = []
|
|
272 fingerprint_counter = 0
|
|
273 for line in open( input_files[0] ):
|
|
274 if not line.strip():
|
|
275 continue
|
|
276 if line.startswith('#'):
|
|
277 header_lines.append( line )
|
|
278 else:
|
|
279 fingerprint_counter += 1
|
|
280 lines_accumulated.append( line )
|
|
281 if fingerprint_counter != 0 and fingerprint_counter % chunk_size == 0:
|
|
282 _write_part_fingerprint_file( header_lines + lines_accumulated )
|
|
283 lines_accumulated = []
|
|
284 if lines_accumulated:
|
|
285 _write_part_fingerprint_file( header_lines + lines_accumulated )
|
|
286 except Exception, e:
|
|
287 log.error('Unable to split files: %s' % str(e))
|
|
288 raise
|
|
289 split = classmethod(split)
|
|
290
|
|
291
|
|
292 def merge(split_files, output_file):
|
|
293 """
|
|
294 Merging fps files requires merging the header manually.
|
|
295 We take the header from the first file.
|
|
296 """
|
|
297 if len(split_files) == 1:
|
|
298 #For one file only, use base class method (move/copy)
|
|
299 return data.Text.merge(split_files, output_file)
|
|
300 if not split_files:
|
|
301 raise ValueError("No fps files given, %r, to merge into %s" \
|
|
302 % (split_files, output_file))
|
|
303 out = open(output_file, "w")
|
|
304 first = True
|
|
305 for filename in split_files:
|
|
306 with open(filename) as handle:
|
|
307 for line in handle:
|
|
308 if line.startswith('#'):
|
|
309 if first:
|
|
310 out.write(line)
|
|
311 else:
|
|
312 # line is no header and not a comment, we assume the first header is written to out and we set 'first' to False
|
|
313 first = False
|
|
314 out.write(line)
|
|
315 out.close()
|
|
316 merge = staticmethod(merge)
|
|
317
|
|
318
|
|
319
|
|
320 class OBFS( Binary ):
|
|
321 """OpenBabel Fastsearch format (fs)."""
|
|
322 file_ext = 'fs'
|
|
323 composite_type ='basic'
|
|
324 allow_datatype_change = False
|
|
325
|
|
326 MetadataElement( name="base_name", default='OpenBabel Fastsearch Index',
|
|
327 readonly=True, visible=True, optional=True,)
|
|
328
|
|
329 def __init__(self,**kwd):
|
|
330 """
|
|
331 A Fastsearch Index consists of a binary file with the fingerprints
|
|
332 and a pointer the actual molecule file.
|
|
333 """
|
|
334 Binary.__init__(self, **kwd)
|
|
335 self.add_composite_file('molecule.fs', is_binary = True,
|
|
336 description = 'OpenBabel Fastsearch Index' )
|
|
337 self.add_composite_file('molecule.sdf', optional=True,
|
|
338 is_binary = False, description = 'Molecule File' )
|
|
339 self.add_composite_file('molecule.smi', optional=True,
|
|
340 is_binary = False, description = 'Molecule File' )
|
|
341 self.add_composite_file('molecule.inchi', optional=True,
|
|
342 is_binary = False, description = 'Molecule File' )
|
|
343 self.add_composite_file('molecule.mol2', optional=True,
|
|
344 is_binary = False, description = 'Molecule File' )
|
|
345 self.add_composite_file('molecule.cml', optional=True,
|
|
346 is_binary = False, description = 'Molecule File' )
|
|
347
|
|
348 def set_peek( self, dataset, is_multi_byte=False ):
|
|
349 """Set the peek and blurb text."""
|
|
350 if not dataset.dataset.purged:
|
|
351 dataset.peek = "OpenBabel Fastsearch Index"
|
|
352 dataset.blurb = "OpenBabel Fastsearch Index"
|
|
353 else:
|
|
354 dataset.peek = "file does not exist"
|
|
355 dataset.blurb = "file purged from disk"
|
|
356
|
|
357 def display_peek( self, dataset ):
|
|
358 """Create HTML content, used for displaying peek."""
|
|
359 try:
|
|
360 return dataset.peek
|
|
361 except:
|
|
362 return "OpenBabel Fastsearch Index"
|
|
363
|
|
364 def display_data(self, trans, data, preview=False, filename=None,
|
|
365 to_ext=None, size=None, offset=None, **kwd):
|
|
366 """Apparently an old display method, but still gets called.
|
|
367
|
|
368 This allows us to format the data shown in the central pane via the "eye" icon.
|
|
369 """
|
|
370 return "This is a OpenBabel Fastsearch format. You can speed up your similarity and substructure search with it."
|
|
371
|
|
372 def get_mime(self):
|
|
373 """Returns the mime type of the datatype (pretend it is text for peek)"""
|
|
374 return 'text/plain'
|
|
375
|
|
376 def merge(split_files, output_file, extra_merge_args):
|
|
377 """Merging Fastsearch indices is not supported."""
|
|
378 raise NotImplementedError("Merging Fastsearch indices is not supported.")
|
|
379
|
|
380 def split( cls, input_datasets, subdir_generator_function, split_params):
|
|
381 """Splitting Fastsearch indices is not supported."""
|
|
382 if split_params is None:
|
|
383 return None
|
|
384 raise NotImplementedError("Splitting Fastsearch indices is not possible.")
|
|
385
|
|
386
|
|
387
|
|
388 class DRF( GenericMolFile ):
|
|
389 file_ext = "drf"
|
|
390
|
|
391 def set_meta( self, dataset, **kwd ):
|
|
392 """
|
|
393 Set the number of lines of data in dataset.
|
|
394 """
|
|
395 dataset.metadata.number_of_molecules = count_special_lines('\"ligand id\"', dataset.file_name, invert = True)#self.count_data_lines(dataset)
|
|
396
|
|
397
|
|
398 class PHAR( GenericMolFile ):
|
|
399 file_ext = "phar"
|
|
400 def set_peek( self, dataset, is_multi_byte=False ):
|
|
401 if not dataset.dataset.purged:
|
|
402 dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
403 dataset.blurb = "pharmacophore"
|
|
404 else:
|
|
405 dataset.peek = 'file does not exist'
|
|
406 dataset.blurb = 'file purged from disk'
|
|
407
|
|
408
|
|
409 class PDB( GenericMolFile ):
|
|
410 file_ext = "pdb"
|
|
411 def sniff( self, filename ):
|
|
412 headers = get_headers( filename, sep=' ', count=300 )
|
|
413 h = t = c = s = k = e = False
|
|
414 for line in headers:
|
|
415 section_name = line[0].strip()
|
|
416 if section_name == 'HEADER':
|
|
417 h = True
|
|
418 elif section_name == 'TITLE':
|
|
419 t = True
|
|
420 elif section_name == 'COMPND':
|
|
421 c = True
|
|
422 elif section_name == 'SOURCE':
|
|
423 s = True
|
|
424 elif section_name == 'KEYWDS':
|
|
425 k = True
|
|
426 elif section_name == 'EXPDTA':
|
|
427 e = True
|
|
428
|
|
429 if h*t*c*s*k*e == True:
|
|
430 return True
|
|
431 else:
|
|
432 return False
|
|
433
|
|
434 def set_peek( self, dataset, is_multi_byte=False ):
|
|
435 if not dataset.dataset.purged:
|
|
436 atom_numbers = count_special_lines("^ATOM", dataset.file_name)
|
|
437 hetatm_numbers = count_special_lines("^HETATM", dataset.file_name)
|
|
438 dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
439 dataset.blurb = "%s atoms and %s HET-atoms" % (atom_numbers, hetatm_numbers)
|
|
440 else:
|
|
441 dataset.peek = 'file does not exist'
|
|
442 dataset.blurb = 'file purged from disk'
|
|
443
|
|
444
|
|
445 class grd( data.Text ):
|
|
446 file_ext = "grd"
|
|
447 def set_peek( self, dataset, is_multi_byte=False ):
|
|
448 if not dataset.dataset.purged:
|
|
449 dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
450 dataset.blurb = "grids for docking"
|
|
451 else:
|
|
452 dataset.peek = 'file does not exist'
|
|
453 dataset.blurb = 'file purged from disk'
|
|
454
|
|
455
|
|
456 class grdtgz( Binary ):
|
|
457 file_ext = "grd.tgz"
|
|
458 def set_peek( self, dataset, is_multi_byte=False ):
|
|
459 if not dataset.dataset.purged:
|
|
460 dataset.peek = 'binary data'
|
|
461 dataset.blurb = "compressed grids for docking"
|
|
462 else:
|
|
463 dataset.peek = 'file does not exist'
|
|
464 dataset.blurb = 'file purged from disk'
|
|
465
|
|
466
|
|
467 class InChI( Tabular ):
|
|
468 file_ext = "inchi"
|
|
469 column_names = [ 'InChI' ]
|
|
470 MetadataElement( name="columns", default=2, desc="Number of columns", readonly=True, visible=False )
|
|
471 MetadataElement( name="column_types", default=['str'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
|
|
472 MetadataElement( name="number_of_molecules", default=0, desc="Number of molecules", readonly=True, visible=True, optional=True, no_value=0 )
|
|
473
|
|
474 def set_meta( self, dataset, **kwd ):
|
|
475 """
|
|
476 Set the number of lines of data in dataset.
|
|
477 """
|
|
478 dataset.metadata.number_of_molecules = self.count_data_lines(dataset)
|
|
479
|
|
480 def set_peek( self, dataset, is_multi_byte=False ):
|
|
481 if not dataset.dataset.purged:
|
|
482 dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
483 if (dataset.metadata.number_of_molecules == 1):
|
|
484 dataset.blurb = "1 molecule"
|
|
485 else:
|
|
486 dataset.blurb = "%s molecules" % dataset.metadata.number_of_molecules
|
|
487 dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
488 else:
|
|
489 dataset.peek = 'file does not exist'
|
|
490 dataset.blurb = 'file purged from disk'
|
|
491
|
|
492 def sniff( self, filename ):
|
|
493 """
|
|
494 InChI files starts with 'InChI='
|
|
495 """
|
|
496 inchi_lines = get_headers( filename, sep=' ', count=10 )
|
|
497 for inchi in inchi_lines:
|
|
498 if not inchi[0].startswith('InChI='):
|
|
499 return False
|
|
500 return True
|
|
501
|
|
502
|
|
503 class SMILES( Tabular ):
|
|
504 file_ext = "smi"
|
|
505 column_names = [ 'SMILES', 'TITLE' ]
|
|
506 MetadataElement( name="columns", default=2, desc="Number of columns", readonly=True, visible=False )
|
|
507 MetadataElement( name="column_types", default=['str','str'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
|
|
508 MetadataElement( name="number_of_molecules", default=0, desc="Number of molecules", readonly=True, visible=True, optional=True, no_value=0 )
|
|
509
|
|
510 def set_meta( self, dataset, **kwd ):
|
|
511 """
|
|
512 Set the number of lines of data in dataset.
|
|
513 """
|
|
514 dataset.metadata.number_of_molecules = self.count_data_lines(dataset)
|
|
515
|
|
516 def set_peek( self, dataset, is_multi_byte=False ):
|
|
517 if not dataset.dataset.purged:
|
|
518 dataset.peek = get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
519 if (dataset.metadata.number_of_molecules == 1):
|
|
520 dataset.blurb = "1 molecule"
|
|
521 else:
|
|
522 dataset.blurb = "%s molecules" % dataset.metadata.number_of_molecules
|
|
523 dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
|
|
524 else:
|
|
525 dataset.peek = 'file does not exist'
|
|
526 dataset.blurb = 'file purged from disk'
|
|
527
|
|
528
|
|
529 '''
|
|
530 def sniff( self, filename ):
|
|
531 """
|
|
532 Its hard or impossible to sniff a SMILES File. We can
|
|
533 try to import the first SMILES and check if it is a molecule, but
|
|
534 currently its not possible to use external libraries from the toolshed
|
|
535 in datatype definition files. TODO
|
|
536 """
|
|
537 self.molecule_number = count_lines( filename, non_empty = True )
|
|
538 word_count = count_lines( filename )
|
|
539
|
|
540 if self.molecule_number != word_count:
|
|
541 return False
|
|
542
|
|
543 if self.molecule_number > 0:
|
|
544 # test first 3 SMILES
|
|
545 smiles_lines = get_headers( filename, sep='\t', count=3 )
|
|
546 for smiles_line in smiles_lines:
|
|
547 if len(smiles_line) > 2:
|
|
548 return False
|
|
549 smiles = smiles_line[0]
|
|
550 try:
|
|
551 # if we have atoms, we have a molecule
|
|
552 if not len( pybel.readstring('smi', smiles).atoms ) > 0:
|
|
553 return False
|
|
554 except:
|
|
555 # if convert fails its not a smiles string
|
|
556 return False
|
|
557 return True
|
|
558 else:
|
|
559 return False
|
|
560 '''
|
|
561
|
|
562
|
|
563 if __name__ == '__main__':
|
|
564 """
|
|
565 TODO: We need to figure out, how to put example files under /lib/galaxy/datatypes/test/ from a toolshed, so that doctest can work properly.
|
|
566 """
|
|
567 inchi = get_test_fname('drugbank_drugs.inchi')
|
|
568 smiles = get_test_fname('drugbank_drugs.smi')
|
|
569 sdf = get_test_fname('drugbank_drugs.sdf')
|
|
570 fps = get_test_fname('50_chemfp_fingerprints_FPS1.fps')
|
|
571 pdb = get_test_fname('2zbz.pdb')
|
|
572
|
|
573 print 'SMILES test'
|
|
574 print SMILES().sniff(smiles), 'smi'
|
|
575 print SMILES().sniff(inchi)
|
|
576 print SMILES().sniff(pdb)
|
|
577
|
|
578 print 'InChI test'
|
|
579 print InChI().sniff(smiles)
|
|
580 print InChI().sniff(sdf)
|
|
581 print InChI().sniff(inchi), 'inchi'
|
|
582
|
|
583 print 'FPS test'
|
|
584 print FPS().sniff(smiles)
|
|
585 print FPS().sniff(sdf)
|
|
586 f = FPS()
|
|
587 print f.sniff(fps)
|
|
588
|
|
589 print 'SDF test'
|
|
590 print SDF().sniff(smiles)
|
|
591 print SDF().sniff(sdf), 'sdf'
|
|
592 print SDF().sniff(fps)
|
|
593
|
|
594 print 'PDB test'
|
|
595 print PDB().sniff(smiles)
|
|
596 print PDB().sniff(sdf)
|
|
597 print PDB().sniff(fps)
|
|
598 print PDB().sniff(pdb), 'pdb'
|