changeset 10:a9f72fd191b5 draft

"planemo upload for repository https://github.com/brsynth/synbiocad-galaxy-wrappers commit 47caed1dd87e80ae226fabb584e9d63d7c86a436-dirty"
author ggricourt
date Thu, 24 Feb 2022 12:05:43 +0000
parents e2f2977b1675
children 2bb0d8ca1710
files data_manager/.bigg_model_sbml_fetcher.py.swp data_manager/bigg_model_sbml_fetcher.py
diffstat 2 files changed, 21 insertions(+), 21 deletions(-) [+]
line wrap: on
line diff
Binary file data_manager/.bigg_model_sbml_fetcher.py.swp has changed
--- a/data_manager/bigg_model_sbml_fetcher.py	Thu Feb 24 11:53:26 2022 +0000
+++ b/data_manager/bigg_model_sbml_fetcher.py	Thu Feb 24 12:05:43 2022 +0000
@@ -1,5 +1,4 @@
 import argparse
-import ast
 import json
 import os
 import sys
@@ -12,14 +11,14 @@
     from urllib2 import Request, urlopen
 
 
-MODEL_URL = 'http://bigg.ucsd.edu/static/models/'
-MODEL_DETAIL_URL = 'http://bigg.ucsd.edu/api/v2/models/'
+MODEL_URL = "http://bigg.ucsd.edu/static/models/"
+MODEL_DETAIL_URL = "http://bigg.ucsd.edu/api/v2/models/"
 
 
 def url_download(url, path):
     try:
         with urlopen(Request(url)) as fod:
-            with open(path, 'wb') as dst:
+            with open(path, "wb") as dst:
                 while True:
                     chunk = fod.read(2**10)
                     if chunk:
@@ -34,9 +33,8 @@
     data = {}
     try:
         with urlopen(Request(url)) as fod:
-            data = fod.read().decode('utf-8')
-        print(data)
-        data = ast.literal_eval(data)
+            data = fod.read().decode("utf-8")
+        data = json.loads(data)
     except Exception as e:
         sys.exit(str(e))
     return data
@@ -44,41 +42,43 @@
 
 def get_model_organism(model_id):
     data = url_json(MODEL_DETAIL_URL + model_id)
-    org = data.get('organism', 'undefined')
-    res = "(%s) %s" % (model_id, org)
+    org = data.get("organism", "")
+    if org is None:
+        org = ""
+    res = "%s - %s" % (model_id, org)
     return res
 
 
 def download_entries(model_ids, workdir):
     for model_id in model_ids:
-        model_filename = model_id + '.xml'
+        model_filename = model_id + ".xml"
         path = os.path.abspath(os.path.join(workdir, model_filename))
 
         url_download(MODEL_URL + model_filename, path)
         data_manager_entry = {}
-        data_manager_entry['value'] = model_id
-        data_manager_entry['name'] = get_model_organism(model_id)
-        data_manager_entry['path'] = path
+        data_manager_entry["value"] = model_id
+        data_manager_entry["name"] = get_model_organism(model_id)
+        data_manager_entry["path"] = path
 
         # Make sure that less than 10 requests per second, as required by host (http://bigg.ucsd.edu/data_access)
         time.sleep(1)
         yield data_manager_entry
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     parser = argparse.ArgumentParser()
     pinput = parser.add_mutually_exclusive_group(required=True)
-    pinput.add_argument('--model-id', help='Model BIGG id')
-    pinput.add_argument('--model-all', action='store_true', help='Download all models')
-    parser.add_argument('--out-file', help='JSON output file')
+    pinput.add_argument("--model-id", help="Model BIGG id")
+    pinput.add_argument("--model-all", action="store_true", help="Download all models")
+    parser.add_argument("--out-file", help="JSON output file")
     args = parser.parse_args()
 
     # Init.
-    data_manager_json = {'data_tables': {}}
+    data_manager_json = {"data_tables": {}}
     with open(args.out_file) as fh:
         params = json.load(fh)
 
-    workdir = params['output_data'][0]['extra_files_path']
+    workdir = params["output_data"][0]["extra_files_path"]
     os.makedirs(workdir)
 
     model_ids = []
@@ -92,6 +92,6 @@
     entries = list(download_entries(model_ids, workdir))
 
     # Write data.
-    data_manager_json['data_tables']['bigg_model_sbml'] = entries
-    with open(args.out_file, 'w') as fh:
+    data_manager_json["data_tables"]["bigg_model_sbml"] = entries
+    with open(args.out_file, "w") as fh:
         json.dump(data_manager_json, fh, sort_keys=True)