changeset 5:5e6f76507721 draft

"planemo upload for repository https://github.com/brsynth/synbiocad-galaxy-wrappers commit 47caed1dd87e80ae226fabb584e9d63d7c86a436-dirty"
author ggricourt
date Thu, 24 Feb 2022 10:56:46 +0000
parents a15b229ee755
children 65589e7476b6
files data_manager/bigg_model_sbml_fetcher.py data_manager/bigg_model_sbml_fetcher.xml
diffstat 2 files changed, 88 insertions(+), 93 deletions(-) [+]
line wrap: on
line diff
--- a/data_manager/bigg_model_sbml_fetcher.py	Thu Feb 24 09:30:37 2022 +0000
+++ b/data_manager/bigg_model_sbml_fetcher.py	Thu Feb 24 10:56:46 2022 +0000
@@ -1,7 +1,9 @@
 import argparse
+import ast
 import json
 import os
 import sys
+import time
 try:
     # For Python 3.0 and later
     from urllib.request import Request, urlopen
@@ -9,104 +11,86 @@
     # Fall back to Python 2 imports
     from urllib2 import Request, urlopen
 
-BASE_URL = 'http://bigg.ucsd.edu/static/models'
-ID2ORG = {
-    "iCN718": "Acinetobacter baumannii AYE",
-    "iYO844": "Bacillus subtilis subsp. subtilis str. 168",
-    "iRC1080": "Chlamydomonas reinhardtii",
-    "iCN900": "Clostridioides difficile 630",
-    "iHN637": "Clostridium ljungdahlii DSM 13528",
-    "iCHOv1_DG44": "Cricetulus griseus",
-    "iCHOv1": "Cricetulus griseus",
-    "iAF1260b": "Escherichia coli str. K-12 substr. MG1655",
-    "iAF1260": "Escherichia coli str. K-12 substr. MG1655",
-    "iML1515": "Escherichia coli str. K-12 substr. MG1655",
-    "iJO1366": "Escherichia coli str. K-12 substr. MG1655",
-    "iJR904": "Escherichia coli str. K-12 substr. MG1655",
-    "e_coli_core": "Escherichia coli str. K-12 substr. MG1655",
-    "iAF987": "Geobacter metallireducens GS-15",
-    "iIT341": "Helicobacter pylori 26695",
-    "iAT_PLT_636": "Homo sapiens",
-    "Recon3D": "Homo sapiens",
-    "iAB_RBC_283": "Homo sapiens",
-    "RECON1": "Homo sapiens",
-    "iYL1228": "Klebsiella pneumoniae subsp. pneumoniae MGH 78578",
-    "iNF517": "Lactococcus lactis subsp. cremoris MG1363",
-    "iAF692": "Methanosarcina barkeri str. Fusaro",
-    "iMM1415": "Mus musculus",
-    "iNJ661": "Mycobacterium tuberculosis H37Rv",
-    "iEK1008": "Mycobacterium tuberculosis H37Rv",
-    "iLB1027_lipid": "Phaeodactylum tricornutum CCAP 1055/1",
-    "iAM_Pb448": "Plasmodium berghei",
-    "iAM_Pc455": "Plasmodium cynomolgi strain B",
-    "iAM_Pf480": "Plasmodium falciparum 3D7",
-    "iAM_Pk459": "Plasmodium knowlesi strain H",
-    "iAM_Pv461": "Plasmodium vivax Sal-1",
-    "iJN746": "Pseudomonas putida KT2440",
-    "iJN1463": "Pseudomonas putida KT2440",
-    "iND750": "Saccharomyces cerevisiae S288C",
-    "iMM904": "Saccharomyces cerevisiae S288C",
-    "STM_v1_0": "Salmonella enterica subsp. enterica serovar Typhimurium str. LT2",
-    "iYS1720": "Salmonella pan-reactome",
-    "iSB619": "Staphylococcus aureus subsp. aureus N315",
-    "iYS854": "Staphylococcus aureus subsp. aureus USA300_TCH1516",
-    "iJB785": "Synechococcus elongatus PCC 7942",
-    "iJN678": "Synechocystis sp. PCC 6803",
-    "iSynCJ816": "Synechocystis sp. PCC 6803",
-    "iLJ478": "Thermotoga maritima MSB8",
-    "iIS312": "Trypanosoma cruzi Dm28c",
-    "iIS312_Trypomastigote": "Trypanosoma cruzi Dm28c",
-    "iIS312_Epimastigote": "Trypanosoma cruzi Dm28c",
-    "iIS312_Amastigote": "Trypanosoma cruzi Dm28c"
-}
+
+MODEL_URL = 'http://bigg.ucsd.edu/static/models/'
+MODEL_DETAIL_URL = 'http://bigg.ucsd.edu/api/v2/models/'
+
 
-def url_download(url, workdir):
-    file_path = os.path.abspath(os.path.join(workdir, os.path.basename(url)))
-    src = None
-    dst = None
+def url_download(url, path):
     try:
-        req = Request(url)
-        src = urlopen(req)
-        with open(file_path, 'wb') as dst:
-            while True:
-                chunk = src.read(2**10)
-                if chunk:
-                    dst.write(chunk)
-                else:
-                    break
+        with urlopen(Request(url)) as fod:
+            with open(path, 'wb') as dst:
+                while True:
+                    chunk = fod.read(2**10)
+                    if chunk:
+                        dst.write(chunk)
+                    else:
+                        break
+    except Exception as e:
+        sys.exit(str(e))
+
+
+def url_json(url):
+    data = {}
+    try:
+        with urlopen(Request(url)) as fod:
+            data = fod.read().encode('utf-8')
+        data = ast.literal_evals(data)
     except Exception as e:
         sys.exit(str(e))
-    finally:
-        if src:
-            src.close()
-    return file_path
+    return data
+
+
+def get_model_organism(model_id):
+    data = url_json(MODEL_DETAIL_URL + model_id)
+    org = data.get('organism', 'undefined')
+    res = "(%s) %s" (model_id, org)
+    return res
 
 
-def download(model_id, out_file):
+def download_entries(model_ids, workdir):
+    for model_id in model_ids:
+        model_filename = model_id + '.xml'
+        path = os.path.abspath(os.path.join(workdir, model_filename))
+
+        url_download(MODEL_DETAIL_URL + model_filename, path)
+        data_manager_entry = {}
+        data_manager_entry['value'] = model_id
+        data_manager_entry['name'] = get_model_organism(model_id)
+        data_manager_entry['path'] = path
 
-    with open(out_file) as fh:
+        # Make sure that less than 10 requests per second, as required by host (http://bigg.ucsd.edu/data_access)
+        time.sleep(1)
+        yield data_manager_entry
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    pinput = parser.add_mutually_exclusive_group(required=True)
+    pinput.add_argument('--model-id', help='Model BIGG id')
+    pinput.add_argument('--model-all', action='store_true', help='Download all models')
+    parser.add_argument('--out-file', help='JSON output file')
+    args = parser.parse_args()
+
+    # Init.
+    data_manager_json = {'data_tables': {}}
+    with open(args.out_file) as fh:
         params = json.load(fh)
 
     workdir = params['output_data'][0]['extra_files_path']
     os.makedirs(workdir)
 
-    data_manager_json = {'data_tables': {}}
-    file_path = url_download(BASE_URL + '/' + model_id + '.xml', workdir)
-
-    data_manager_entry = {}
-    data_manager_entry['value'] = model_id
-    data_manager_entry['name'] = ID2ORG.get(model_id, 'undefined')
-    data_manager_entry['path'] = file_path
+    model_ids = []
+    if args.model_id:
+        model_ids.append(args.model_id)
+    else:
+        data = url_json(MODEL_DETAIL_URL)
+        for result in data.get("results", []):
+            model_ids.append(result.get("bigg_id"))
 
-    data_manager_json['data_tables']['bigg_model_sbml'] = data_manager_entry
-    with open(out_file, 'w') as fh:
-        json.dump(data_manager_json, fh, sort_keys=True)
-
+    entries = list(download_entries(model_ids, workdir))
 
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('--model-id', help='Model BIGG id')
-    parser.add_argument('--out-file', help='JSON output file')
-    args = parser.parse_args()
-
-    download(args.model_id, args.out_file)
+    # Write data.
+    data_manager_json['data_tables']['bigg_model_sbml'] = entries
+    with open(args.out_file, 'w') as fh:
+        json.dump(data_manager_json, fh, sort_keys=True)
--- a/data_manager/bigg_model_sbml_fetcher.xml	Thu Feb 24 09:30:37 2022 +0000
+++ b/data_manager/bigg_model_sbml_fetcher.xml	Thu Feb 24 10:56:46 2022 +0000
@@ -5,14 +5,25 @@
     </requirements>
     <command detect_errors="exit_code"><![CDATA[
         python '$__tool_directory__/bigg_model_sbml_fetcher.py'
-        --model-id '$model_id'
-        --out-file '$out_file'
+            #if $model_cond.model_select == "auto"
+                --model-all
+            #else
+                --model-id '$model_cond.model_id'
+            #end if
+            --out-file '$out_file'
     ]]></command>
     <inputs>
-        <param name="model_id" type="select" label="Model Id">
-            <option value="iML1515">Escherichia coli str. K-12 substr. MG1655 (iML1515)</option>
-            <option value="iYO844">Bacillus subtilis subsp. subtilis str. 168 (iYO844)</option>
-        </param>
+        <conditional name="model_cond">
+            <param name="model_select" type="select" label="Select a reference dataset your history or use a built-in?">
+                <option value="auto">Download all models</option>
+                <option value="manual">Manual install</option>
+            </param>
+            <when value="manual">
+                <param name="model_id" type="text" label="BIGG id model" value="">
+                    <validator type="empty_field" message="BIGG model ID is required"/>
+                </param>
+            </when>
+        </conditional>
     </inputs>
     <outputs>
         <data name="out_file" format="data_manager_json" />