Mercurial > repos > ggricourt > data_manager_bigg
comparison data_manager/bigg_model_sbml_fetcher.py @ 10:a9f72fd191b5 draft
"planemo upload for repository https://github.com/brsynth/synbiocad-galaxy-wrappers commit 47caed1dd87e80ae226fabb584e9d63d7c86a436-dirty"
| author | ggricourt |
|---|---|
| date | Thu, 24 Feb 2022 12:05:43 +0000 |
| parents | e2f2977b1675 |
| children | 2bb0d8ca1710 |
comparison
equal
deleted
inserted
replaced
| 9:e2f2977b1675 | 10:a9f72fd191b5 |
|---|---|
| 1 import argparse | 1 import argparse |
| 2 import ast | |
| 3 import json | 2 import json |
| 4 import os | 3 import os |
| 5 import sys | 4 import sys |
| 6 import time | 5 import time |
| 7 try: | 6 try: |
| 10 except ImportError: | 9 except ImportError: |
| 11 # Fall back to Python 2 imports | 10 # Fall back to Python 2 imports |
| 12 from urllib2 import Request, urlopen | 11 from urllib2 import Request, urlopen |
| 13 | 12 |
| 14 | 13 |
| 15 MODEL_URL = 'http://bigg.ucsd.edu/static/models/' | 14 MODEL_URL = "http://bigg.ucsd.edu/static/models/" |
| 16 MODEL_DETAIL_URL = 'http://bigg.ucsd.edu/api/v2/models/' | 15 MODEL_DETAIL_URL = "http://bigg.ucsd.edu/api/v2/models/" |
| 17 | 16 |
| 18 | 17 |
| 19 def url_download(url, path): | 18 def url_download(url, path): |
| 20 try: | 19 try: |
| 21 with urlopen(Request(url)) as fod: | 20 with urlopen(Request(url)) as fod: |
| 22 with open(path, 'wb') as dst: | 21 with open(path, "wb") as dst: |
| 23 while True: | 22 while True: |
| 24 chunk = fod.read(2**10) | 23 chunk = fod.read(2**10) |
| 25 if chunk: | 24 if chunk: |
| 26 dst.write(chunk) | 25 dst.write(chunk) |
| 27 else: | 26 else: |
| 32 | 31 |
| 33 def url_json(url): | 32 def url_json(url): |
| 34 data = {} | 33 data = {} |
| 35 try: | 34 try: |
| 36 with urlopen(Request(url)) as fod: | 35 with urlopen(Request(url)) as fod: |
| 37 data = fod.read().decode('utf-8') | 36 data = fod.read().decode("utf-8") |
| 38 print(data) | 37 data = json.loads(data) |
| 39 data = ast.literal_eval(data) | |
| 40 except Exception as e: | 38 except Exception as e: |
| 41 sys.exit(str(e)) | 39 sys.exit(str(e)) |
| 42 return data | 40 return data |
| 43 | 41 |
| 44 | 42 |
| 45 def get_model_organism(model_id): | 43 def get_model_organism(model_id): |
| 46 data = url_json(MODEL_DETAIL_URL + model_id) | 44 data = url_json(MODEL_DETAIL_URL + model_id) |
| 47 org = data.get('organism', 'undefined') | 45 org = data.get("organism", "") |
| 48 res = "(%s) %s" % (model_id, org) | 46 if org is None: |
| 47 org = "" | |
| 48 res = "%s - %s" % (model_id, org) | |
| 49 return res | 49 return res |
| 50 | 50 |
| 51 | 51 |
| 52 def download_entries(model_ids, workdir): | 52 def download_entries(model_ids, workdir): |
| 53 for model_id in model_ids: | 53 for model_id in model_ids: |
| 54 model_filename = model_id + '.xml' | 54 model_filename = model_id + ".xml" |
| 55 path = os.path.abspath(os.path.join(workdir, model_filename)) | 55 path = os.path.abspath(os.path.join(workdir, model_filename)) |
| 56 | 56 |
| 57 url_download(MODEL_URL + model_filename, path) | 57 url_download(MODEL_URL + model_filename, path) |
| 58 data_manager_entry = {} | 58 data_manager_entry = {} |
| 59 data_manager_entry['value'] = model_id | 59 data_manager_entry["value"] = model_id |
| 60 data_manager_entry['name'] = get_model_organism(model_id) | 60 data_manager_entry["name"] = get_model_organism(model_id) |
| 61 data_manager_entry['path'] = path | 61 data_manager_entry["path"] = path |
| 62 | 62 |
| 63 # Make sure that less than 10 requests per second, as required by host (http://bigg.ucsd.edu/data_access) | 63 # Make sure that less than 10 requests per second, as required by host (http://bigg.ucsd.edu/data_access) |
| 64 time.sleep(1) | 64 time.sleep(1) |
| 65 yield data_manager_entry | 65 yield data_manager_entry |
| 66 | 66 |
| 67 | 67 |
| 68 if __name__ == '__main__': | 68 if __name__ == "__main__": |
| 69 parser = argparse.ArgumentParser() | 69 parser = argparse.ArgumentParser() |
| 70 pinput = parser.add_mutually_exclusive_group(required=True) | 70 pinput = parser.add_mutually_exclusive_group(required=True) |
| 71 pinput.add_argument('--model-id', help='Model BIGG id') | 71 pinput.add_argument("--model-id", help="Model BIGG id") |
| 72 pinput.add_argument('--model-all', action='store_true', help='Download all models') | 72 pinput.add_argument("--model-all", action="store_true", help="Download all models") |
| 73 parser.add_argument('--out-file', help='JSON output file') | 73 parser.add_argument("--out-file", help="JSON output file") |
| 74 args = parser.parse_args() | 74 args = parser.parse_args() |
| 75 | 75 |
| 76 # Init. | 76 # Init. |
| 77 data_manager_json = {'data_tables': {}} | 77 data_manager_json = {"data_tables": {}} |
| 78 with open(args.out_file) as fh: | 78 with open(args.out_file) as fh: |
| 79 params = json.load(fh) | 79 params = json.load(fh) |
| 80 | 80 |
| 81 workdir = params['output_data'][0]['extra_files_path'] | 81 workdir = params["output_data"][0]["extra_files_path"] |
| 82 os.makedirs(workdir) | 82 os.makedirs(workdir) |
| 83 | 83 |
| 84 model_ids = [] | 84 model_ids = [] |
| 85 if args.model_id: | 85 if args.model_id: |
| 86 model_ids.append(args.model_id) | 86 model_ids.append(args.model_id) |
| 90 model_ids.append(result.get("bigg_id")) | 90 model_ids.append(result.get("bigg_id")) |
| 91 | 91 |
| 92 entries = list(download_entries(model_ids, workdir)) | 92 entries = list(download_entries(model_ids, workdir)) |
| 93 | 93 |
| 94 # Write data. | 94 # Write data. |
| 95 data_manager_json['data_tables']['bigg_model_sbml'] = entries | 95 data_manager_json["data_tables"]["bigg_model_sbml"] = entries |
| 96 with open(args.out_file, 'w') as fh: | 96 with open(args.out_file, "w") as fh: |
| 97 json.dump(data_manager_json, fh, sort_keys=True) | 97 json.dump(data_manager_json, fh, sort_keys=True) |
