File size: 4,595 Bytes
4fc3ddf f803c4b 4fc3ddf f803c4b 4fc3ddf f803c4b 4fc3ddf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
import pandas as pd
import datasets
_DESCRIPTION = """\
Multi-source dataset of antibody-mutation interactions including IC50, binding, escape, and affinity measurements.
Also includes antibody synonyms with CDR sequences and epitope information.
"""
_FEATURES = {
'antibody_name': datasets.Value("string"),
'antigen_lineage': datasets.Value("string"),
'target_value': datasets.Value("float"),
'target_type': datasets.Value("string"),
'source_name': datasets.Value("string"),
'source_doi': datasets.Value("string"),
'assay_name': datasets.Value("string"),
'pdb_id': datasets.Value("string"),
'structure_release_date': datasets.Value("string"),
'structure_resolution': datasets.Value("float"),
'mutations': datasets.Value("string"),
'antigen_chain_ids': datasets.Value("string"),
'antigen_domain': datasets.Value("string"),
'antigen_residue_indices': datasets.Value("string"),
'antigen_residue_indices_trimmed': datasets.Value("string"),
'antigen_host': datasets.Value("string"),
'antibody_heavy_chain_id': datasets.Value("string"),
'antibody_light_chain_id': datasets.Value("string"),
'epitope_residues': datasets.Value("string"),
'epitope_mutations': datasets.Value("string"),
'epitope_domain': datasets.Value("string"),
'epitope_alteration_count': datasets.Value("string"),
'spike_sequence': datasets.Value("string"),
'antibody_heavy_chain_sequence': datasets.Value("string"),
'antibody_light_chain_sequence': datasets.Value("string"),
'antibody_vh_sequence': datasets.Value("string"),
'antibody_vl_sequence': datasets.Value("string"),
'antigen_sequence': datasets.Value("string"),
'antigen_sequence_trimmed': datasets.Value("string"),
'antigen_sequence_without_indels': datasets.Value("string"),
'antigen_sequence_trimmed_without_indels': datasets.Value("string"),
'antigen_pdb_sequence': datasets.Value("string"),
'antigen_pdb_sequence_trimmed': datasets.Value("string"),
}
_ANTIBODY_SYNONYMS_FEATURES = {
'antibody_name': datasets.Value("string"),
'pdb_id': datasets.Value("string"),
'antibody_heavy_chain_cdr1': datasets.Value("string"),
'antibody_heavy_chain_cdr2': datasets.Value("string"),
'antibody_heavy_chain_cdr3': datasets.Value("string"),
'antibody_light_chain_cdr1': datasets.Value("string"),
'antibody_light_chain_cdr2': datasets.Value("string"),
'antibody_light_chain_cdr3': datasets.Value("string"),
'epitope_residues': datasets.Value("string"),
'epitope_domain': datasets.Value("string"),
}
_TABLES = {
"antibody_synonyms": {
"file": "antibody_info/antibody_synonyms_with_epitopes.parquet",
"features": _ANTIBODY_SYNONYMS_FEATURES,
},
"drdb": {
"file": "data/drdb_binding_potency.parquet",
"features": {
**_FEATURES,
}
},
"covabdab": {
"file": "data/covabdab_binding.parquet",
"features": {
**{
**_FEATURES,
"target_value": datasets.Value("bool"),
}
}
},
"dms_bloom": {
"file": "data/dms_bloom_ab_escape.parquet",
"features": {
**_FEATURES,
}
},
"dms_cao": {
"file": "data/dms_cao_ab_escape.parquet",
"features": {
**_FEATURES,
}
},
"jian_elisa": {
"file": "data/jian_elisa_ab_ic50.parquet",
"features": {
**_FEATURES,
}
},
"spr": {
"file": "data/spr_ab_affinity.parquet",
"features": {
**_FEATURES,
}
}
}
class CovUniBindConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
class CovUniBind(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
CovUniBindConfig(name=table, description=f"{table} subset") for table in _TABLES
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(_TABLES[self.config.name]["features"]),
)
def _split_generators(self, dl_manager):
file_path = _TABLES[self.config.name]["file"]
data_path = dl_manager.download_and_extract(file_path)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_path}),
]
def _generate_examples(self, filepath):
df = pd.read_parquet(filepath)
for idx, row in df.iterrows():
yield idx, row.to_dict()
|