# HG changeset patch # User ggricourt # Date 1645704343 0 # Node ID a9f72fd191b5cc29a83d9bbe4a7510dd443cb83c # Parent e2f2977b1675af63b7011d10bea579a3eee6faff "planemo upload for repository https://github.com/brsynth/synbiocad-galaxy-wrappers commit 47caed1dd87e80ae226fabb584e9d63d7c86a436-dirty" diff -r e2f2977b1675 -r a9f72fd191b5 data_manager/.bigg_model_sbml_fetcher.py.swp Binary file data_manager/.bigg_model_sbml_fetcher.py.swp has changed diff -r e2f2977b1675 -r a9f72fd191b5 data_manager/bigg_model_sbml_fetcher.py --- a/data_manager/bigg_model_sbml_fetcher.py Thu Feb 24 11:53:26 2022 +0000 +++ b/data_manager/bigg_model_sbml_fetcher.py Thu Feb 24 12:05:43 2022 +0000 @@ -1,5 +1,4 @@ import argparse -import ast import json import os import sys @@ -12,14 +11,14 @@ from urllib2 import Request, urlopen -MODEL_URL = 'http://bigg.ucsd.edu/static/models/' -MODEL_DETAIL_URL = 'http://bigg.ucsd.edu/api/v2/models/' +MODEL_URL = "http://bigg.ucsd.edu/static/models/" +MODEL_DETAIL_URL = "http://bigg.ucsd.edu/api/v2/models/" def url_download(url, path): try: with urlopen(Request(url)) as fod: - with open(path, 'wb') as dst: + with open(path, "wb") as dst: while True: chunk = fod.read(2**10) if chunk: @@ -34,9 +33,8 @@ data = {} try: with urlopen(Request(url)) as fod: - data = fod.read().decode('utf-8') - print(data) - data = ast.literal_eval(data) + data = fod.read().decode("utf-8") + data = json.loads(data) except Exception as e: sys.exit(str(e)) return data @@ -44,41 +42,43 @@ def get_model_organism(model_id): data = url_json(MODEL_DETAIL_URL + model_id) - org = data.get('organism', 'undefined') - res = "(%s) %s" % (model_id, org) + org = data.get("organism", "") + if org is None: + org = "" + res = "%s - %s" % (model_id, org) return res def download_entries(model_ids, workdir): for model_id in model_ids: - model_filename = model_id + '.xml' + model_filename = model_id + ".xml" path = os.path.abspath(os.path.join(workdir, model_filename)) url_download(MODEL_URL + model_filename, path) data_manager_entry = {} - data_manager_entry['value'] = model_id - data_manager_entry['name'] = get_model_organism(model_id) - data_manager_entry['path'] = path + data_manager_entry["value"] = model_id + data_manager_entry["name"] = get_model_organism(model_id) + data_manager_entry["path"] = path # Make sure that less than 10 requests per second, as required by host (http://bigg.ucsd.edu/data_access) time.sleep(1) yield data_manager_entry -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser() pinput = parser.add_mutually_exclusive_group(required=True) - pinput.add_argument('--model-id', help='Model BIGG id') - pinput.add_argument('--model-all', action='store_true', help='Download all models') - parser.add_argument('--out-file', help='JSON output file') + pinput.add_argument("--model-id", help="Model BIGG id") + pinput.add_argument("--model-all", action="store_true", help="Download all models") + parser.add_argument("--out-file", help="JSON output file") args = parser.parse_args() # Init. - data_manager_json = {'data_tables': {}} + data_manager_json = {"data_tables": {}} with open(args.out_file) as fh: params = json.load(fh) - workdir = params['output_data'][0]['extra_files_path'] + workdir = params["output_data"][0]["extra_files_path"] os.makedirs(workdir) model_ids = [] @@ -92,6 +92,6 @@ entries = list(download_entries(model_ids, workdir)) # Write data. - data_manager_json['data_tables']['bigg_model_sbml'] = entries - with open(args.out_file, 'w') as fh: + data_manager_json["data_tables"]["bigg_model_sbml"] = entries + with open(args.out_file, "w") as fh: json.dump(data_manager_json, fh, sort_keys=True)