| | import os |
| | import datasets |
| | from huggingface_hub import HfApi |
| | from datasets import DownloadManager, DatasetInfo |
| | from datasets.data_files import DataFilesDict |
| |
|
| | _EXTENSION = [".png", ".jpg", ".jpeg", ".webp", ".bmp"] |
| | _NAME = "nyanko7/danbooru2023" |
| | _REVISION = "main" |
| |
|
| |
|
| | class DanbooruDataset(datasets.GeneratorBasedBuilder): |
| | BUILDER_CONFIGS = [ |
| | |
| | datasets.BuilderConfig(name="full"), |
| | ] |
| |
|
| | def _info(self) -> DatasetInfo: |
| | features = { |
| | "image": datasets.Image(), |
| | "post_id": datasets.Value("int64") |
| | } |
| | info = datasets.DatasetInfo( |
| | features=datasets.Features(features), |
| | supervised_keys=None, |
| | citation="", |
| | ) |
| | return info |
| |
|
| | def _split_generators(self, dl_manager: DownloadManager): |
| | hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0) |
| | data_files = DataFilesDict.from_hf_repo( |
| | {datasets.Split.TRAIN: ["**"]}, |
| | dataset_info=hfh_dataset_info, |
| | allowed_extensions=["tar", ".tar"], |
| | ) |
| | gs = [] |
| | for split, files in data_files.items(): |
| | downloaded_files = dl_manager.download_and_extract(files) |
| | gs.append(datasets.SplitGenerator(name=split, gen_kwargs={"filepath": downloaded_files})) |
| | return gs |
| |
|
| | def _generate_examples(self, filepath): |
| | for path in filepath: |
| | all_fnames = {os.path.relpath(os.path.join(root, fname), start=path) for root, _dirs, files in os.walk(path) for fname in files} |
| | image_fnames = sorted([fname for fname in all_fnames if os.path.splitext(fname)[1].lower() in _EXTENSION], reverse=True) |
| | for image_fname in image_fnames: |
| | image_path = os.path.join(path, image_fname) |
| | post_id = int(os.path.splitext(os.path.basename(image_fname))[0]) |
| | yield image_fname, {"post_id": post_id} |
| |
|