Mercurial > repos > matthias > data_manager_dada2
comparison data_manager/data_manager.py @ 2:b4c303665291 draft
planemo upload for repository https://github.com/bernt-matthias/mb-galaxy-tools/tree/master/data_managers/data_manager_dada2 commit eec95ccc2189355061112ea2785b82f13a0fa077-dirty
author | matthias |
---|---|
date | Fri, 08 Mar 2019 05:38:44 -0500 |
parents | 419037fe1150 |
children | 3a4ee8bf012a |
comparison
equal
deleted
inserted
replaced
1:1c50cfb0c0ab | 2:b4c303665291 |
---|---|
17 "silva132":"Silva version 132", | 17 "silva132":"Silva version 132", |
18 "silva128":"Silva version 128", | 18 "silva128":"Silva version 128", |
19 "rdp16":"RDP trainset 16", | 19 "rdp16":"RDP trainset 16", |
20 "rdp14":"RDP trainset 14", | 20 "rdp14":"RDP trainset 14", |
21 "gg13.84":"GreenGenes version 13.8", | 21 "gg13.84":"GreenGenes version 13.8", |
22 "unite8.0_fungi": "UNITE: General Fasta release 8.0 for Fungi", | |
23 "unite8.0_fungi_singletons": "UNITE: General Fasta release 8.0 for Fungi including global and 97% singletons", | |
24 "unite8.0_euka": "UNITE: General Fasta release 8.0 for all Eukaryotes", | |
25 "unite8.0_euka_singletons": "UNITE: General Fasta release 8.0 for all Eukaryotes including global and 97% singletons", | |
26 "RefSeq_RDP_2018_05": "NCBI RefSeq 16S rRNA database supplemented by RDP (05/2018)", | |
27 "gtdb_2018_11_20": "GTDB: Genome Taxonomy Database (Bacteria & Archaea) (11/2018)", | |
28 "hitdb1": "HitDB version 1 (Human InTestinal 16S rRNA)", | |
29 "silva132_euk_18S": "Silva version 132 Eukaryotic 18S", | |
30 "PR2v4.11.1": "Protist Ribosomal Reference database (PR2) 4.11.1" | |
22 } | 31 } |
23 | 32 |
24 FILE2TAXURL = { | 33 FILE2TAXURL = { |
25 "silva132":"https://zenodo.org/record/1172783/files/silva_nr_v132_train_set.fa.gz?download=1", | 34 "silva132":"https://zenodo.org/record/1172783/files/silva_nr_v132_train_set.fa.gz?download=1", |
26 "silva128":"https://zenodo.org/record/824551/files/silva_nr_v128_train_set.fa.gz?download=1", | 35 "silva128":"https://zenodo.org/record/824551/files/silva_nr_v128_train_set.fa.gz?download=1", |
27 "rdp16":"https://zenodo.org/record/801828/files/rdp_train_set_16.fa.gz?download=1", | 36 "rdp16":"https://zenodo.org/record/801828/files/rdp_train_set_16.fa.gz?download=1", |
28 "rdp14":"https://zenodo.org/record/158955/files/rdp_train_set_14.fa.gz?download=1", | 37 "rdp14":"https://zenodo.org/record/158955/files/rdp_train_set_14.fa.gz?download=1", |
38 "unite8.0_fungi": "https://files.plutof.ut.ee/public/orig/EB/0C/EB0CCB3A871B77EA75E472D13926271076904A588D2E1C1EA5AFCF7397D48378.zip", | |
39 "unite8.0_fungi_singletons": "https://files.plutof.ut.ee/doi/06/A2/06A2C86256EED64085670EB0C54B7115F6DAC8F311C656A9CB33E386CFABA0D0.zip", | |
40 "unite8.0_euka": "https://files.plutof.ut.ee/public/orig/D6/96/D69658E99589D888A207805A744019DBA4EC0F603E67E53732767B3E03A5AA86.zip", | |
41 "unite8.0_euka_singletons": "https://files.plutof.ut.ee/doi/C2/20/C22034350E32D6AD7E5D1AF3F8BC487E34DA0BE25602B0E748906005CE6ADA97.zip", | |
29 "gg13.84":"https://zenodo.org/record/158955/files/gg_13_8_train_set_97.fa.gz?download=1", | 42 "gg13.84":"https://zenodo.org/record/158955/files/gg_13_8_train_set_97.fa.gz?download=1", |
43 "RefSeq_RDP_2018_05": "https://zenodo.org/record/2541239/files/RefSeq-RDP16S_v2_May2018.fa.gz?download=1", | |
44 "gtdb_2018_11_20": "https://zenodo.org/record/2541239/files/GTDB_bac-arc_ssu_r86.fa.gz?download=1", | |
45 "hitdb1": "https://zenodo.org/record/159205/files/hitdb_v1.00.fa.gz?download=1", | |
46 "silva132_euk_18S": "https://zenodo.org/record/1447330/files/silva_132.18s.99_rep_set.dada2.fa.gz?download=1", | |
47 "PR2v4.11.1": "https://github.com/pr2database/pr2database/releases/download/4.11.1/pr2_version_4.11.1_dada2.fasta.gz" | |
30 } | 48 } |
31 | 49 |
32 FILE2SPECIESURL = { | 50 FILE2SPECIESURL = { |
33 "silva132":"https://zenodo.org/record/1172783/files/silva_species_assignment_v132.fa.gz?download=1", | 51 "silva132":"https://zenodo.org/record/1172783/files/silva_species_assignment_v132.fa.gz?download=1", |
34 "silva128":"https://zenodo.org/record/824551/files/silva_species_assignment_v128.fa.gz?download=1", | 52 "silva128":"https://zenodo.org/record/824551/files/silva_species_assignment_v128.fa.gz?download=1", |
35 "rdp16":"https://zenodo.org/record/801828/files/rdp_species_assignment_16.fa.gz?download=1", | 53 "rdp16":"https://zenodo.org/record/801828/files/rdp_species_assignment_16.fa.gz?download=1", |
36 "rdp14":"https://zenodo.org/record/158955/files/rdp_species_assignment_14.fa.gz?download=1" | 54 "rdp14":"https://zenodo.org/record/158955/files/rdp_species_assignment_14.fa.gz?download=1" |
37 } | 55 } |
38 | 56 |
39 FILE2TAXLEVELS = { | 57 FILE2TAXLEVELS = { |
58 "PR2v4.11.1": "Kingdom,Supergroup,Division,Class,Order,Family,Genus,Species" | |
40 } | 59 } |
41 | 60 |
42 def url_download(url, fname, workdir): | 61 def url_download(url, fname, workdir): |
43 """ | 62 """ |
44 download url to workdir/fname | 63 download url to workdir/fname |
61 else: | 80 else: |
62 break | 81 break |
63 finally: | 82 finally: |
64 if src: | 83 if src: |
65 src.close() | 84 src.close() |
66 return os.path.join(workdir, fname) | 85 |
86 #special treatment of UNITE DBs: they are zip files containing two fasta (xyz.fasta and developer/xyz.fasta) | |
87 if fname.startswith("unite"): | |
88 import glob | |
89 import gzip | |
90 import shutil | |
91 import zipfile | |
92 # unzip download | |
93 zip_ref = zipfile.ZipFile(file_path, 'r') | |
94 zip_ref.extractall(workdir) | |
95 zip_ref.close() | |
96 # gzip top level fasta file | |
97 fastas = glob.glob("*fasta") | |
98 if len(fastas) != 1: | |
99 msg = "UNITE download %s contained more than one or no fasta file" | |
100 raise Exception(msg) | |
101 with open(fastas[0], 'rb') as f_in: | |
102 with gzip.open(file_path, 'wb') as f_out: | |
103 shutil.copyfileobj(f_in, f_out) | |
104 | |
105 return fname | |
67 | 106 |
68 def main(dataset, outjson): | 107 def main(dataset, outjson): |
69 | 108 |
70 params = json.loads(open(outjson).read()) | 109 params = json.loads(open(outjson).read()) |
71 target_directory = params['output_data'][0]['extra_files_path'] | 110 target_directory = params['output_data'][0]['extra_files_path'] |
72 os.mkdir(target_directory) | 111 os.mkdir(target_directory) |
73 output_path = os.path.abspath(os.path.join(os.getcwd(), 'dada2')) | 112 output_path = os.path.abspath(os.path.join(os.getcwd(), 'dada2')) |
74 | 113 |
75 workdir = os.path.join(os.getcwd(), 'dada2') | 114 workdir = os.path.join(os.getcwd(), 'dada2') |
76 path = url_download( FILE2TAXURL[dataset], taxdataset+".taxonomy", workdir) | 115 path = url_download( FILE2TAXURL[dataset], dataset+".taxonomy", workdir) |
77 | 116 |
78 data_manager_json = {"data_tables":{}} | 117 data_manager_json = {"data_tables":{}} |
79 data_manager_entry = {} | 118 data_manager_entry = {} |
80 data_manager_entry['value'] = dataset | 119 data_manager_entry['value'] = dataset |
81 data_manager_entry['name'] = FILE2NAME[dataset] | 120 data_manager_entry['name'] = FILE2NAME[dataset] |
82 data_manager_entry['path'] = path | 121 data_manager_entry['path'] = dataset+".taxonomy" |
83 data_manager_entry['taxlevels'] = FILE2TAXLEVELS.get(dataset, DEFAULT_TAXLEVELS) | 122 data_manager_entry['taxlevels'] = FILE2TAXLEVELS.get(dataset, DEFAULT_TAXLEVELS) |
84 data_manager_json["data_tables"]["dada2_taxonomy"] = data_manager_entry | 123 data_manager_json["data_tables"]["dada2_taxonomy"] = data_manager_entry |
85 | 124 |
86 | 125 |
87 if FILE2SPECIES.get(dataset, False ): | 126 if FILE2SPECIESURL.get(dataset, False ): |
88 path = url_download( FILE2SPECIES[dataset], taxdataset+".species", workdir) | 127 path = url_download( FILE2SPECIESURL[dataset], dataset+".species", workdir) |
89 | 128 |
90 data_manager_entry = {} | 129 data_manager_entry = {} |
91 data_manager_entry['value'] = dataset | 130 data_manager_entry['value'] = dataset |
92 data_manager_entry['name'] = FILE2NAME[dataset] | 131 data_manager_entry['name'] = FILE2NAME[dataset] |
93 data_manager_entry['path'] = path | 132 data_manager_entry['path'] = dataset+".species" |
94 data_manager_json["data_tables"]["dada2_species"] = data_manager_entry | 133 data_manager_json["data_tables"]["dada2_species"] = data_manager_entry |
95 | 134 |
96 for filename in os.listdir(workdir): | 135 for filename in os.listdir(workdir): |
97 shutil.move(os.path.join(output_path, filename), target_directory) | 136 shutil.move(os.path.join(output_path, filename), target_directory) |
137 | |
138 sys.stderr.write("JSON %s" %json.dumps(data_manager_json)) | |
98 file(outjson, 'w').write(json.dumps(data_manager_json)) | 139 file(outjson, 'w').write(json.dumps(data_manager_json)) |
99 | 140 |
100 if __name__ == '__main__': | 141 if __name__ == '__main__': |
101 parser = argparse.ArgumentParser(description='Create data manager json.') | 142 parser = argparse.ArgumentParser(description='Create data manager json.') |
102 parser.add_argument('--out', action='store', help='JSON filename') | 143 parser.add_argument('--out', action='store', help='JSON filename') |