Mercurial > repos > bgruening > flexynesis
comparison fetch_cbioportal_data.py @ 1:b353dad17ab7 draft default tip
planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/flexynesis commit 973836fb40ecb9c0ac26f675d12b20fc8e5f51f4
author | bgruening |
---|---|
date | Mon, 14 Apr 2025 09:56:16 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
0:98431bd19f18 | 1:b353dad17ab7 |
---|---|
1 #!/usr/bin/env python | |
2 | |
3 import argparse | |
4 import os | |
5 | |
6 from flexynesis.utils import CBioPortalData | |
7 | |
8 | |
9 def main(): | |
10 parser = argparse.ArgumentParser(description="Fetch and prepare cBioPortal data for Flexynesis.") | |
11 parser.add_argument("--study_id", required=True, help="cBioPortal study ID (e.g., 'brca_tcga')") | |
12 parser.add_argument("--data_types", required=True, help="Comma-separated list of data types (e.g., 'clin,mut,omics')") | |
13 parser.add_argument("--mapped_files", default=None, help="Comma-separated list of .txt files to map to data_types (optional)") | |
14 parser.add_argument("--split_ratio", type=float, default=0.7, help="Training/test split ratio (0.0 to 1.0)") | |
15 parser.add_argument("--output_dir", required=True, help="Output directory for datasets") | |
16 | |
17 args = parser.parse_args() | |
18 | |
19 data_types = args.data_types.split(",") | |
20 if "clin" not in data_types: | |
21 raise ValueError("Clinical data ('clin') is required for splitting the dataset.") | |
22 | |
23 file_mapping = { | |
24 "clin": "data_clinical_patient.txt", # can be any with 'clinical' in file name | |
25 "mut": "data_mutations.txt", # any with 'mutations' in file name | |
26 "omics": "data_cna.txt", | |
27 "other": None | |
28 } | |
29 | |
30 if args.mapped_files: | |
31 mapped_files = args.mapped_files.split(",") | |
32 if len(mapped_files) != len(data_types): | |
33 raise ValueError(f"Number of mapped files ({len(mapped_files)}) must match number of data types ({len(data_types)}).") | |
34 files_to_fetch = {dt: mf for dt, mf in zip(data_types, mapped_files)} | |
35 for mf in mapped_files: | |
36 if not mf.endswith(".txt"): | |
37 raise ValueError(f"Mapped file '{mf}' must end with '.txt'.") | |
38 else: | |
39 files_to_fetch = {dt: file_mapping[dt] for dt in data_types if dt in file_mapping} | |
40 | |
41 invalid_types = set(data_types) - set(file_mapping.keys()) | |
42 if invalid_types: | |
43 raise ValueError(f"Invalid data types: {invalid_types}. Supported types: {list(file_mapping.keys())}") | |
44 | |
45 cbioportal = CBioPortalData(study_id=args.study_id) | |
46 cbioportal.get_cbioportal_data(study_id=args.study_id, files=files_to_fetch) | |
47 dataset = cbioportal.split_data(ratio=args.split_ratio) | |
48 | |
49 os.makedirs(args.output_dir, exist_ok=True) | |
50 | |
51 for data_type in data_types: | |
52 if data_type in dataset['train']: | |
53 train_file = os.path.join(args.output_dir, f"{data_type}_train.csv") | |
54 dataset['train'][data_type].to_csv(train_file, index=True) | |
55 if data_type in dataset['test']: | |
56 test_file = os.path.join(args.output_dir, f"{data_type}_test.csv") | |
57 dataset['test'][data_type].to_csv(test_file, index=True) | |
58 | |
59 | |
60 if __name__ == "__main__": | |
61 main() |