NINJAL-Ainu-Folklore / NINJAL-Ainu-Folklore.py
junyinc's picture
add rest of the files
ba08a60
# coding=utf-8
# Copyright 2023 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NINJAL Ainu folklore corpus"""
import os
import json
import datasets
_DESCRIPTION = ""
_CITATION = ""
_HOMEPAGE_URL = ""
_BASE_PATH = "data/"
_DATA_URL = _BASE_PATH + "audio/{split}.tar.gz"
_META_URL = _BASE_PATH + "{split}.json"
class AinuFolkloreConfig(datasets.BuilderConfig):
def __init__(self, name, **kwargs):
super().__init__(name=name, version=datasets.Version("0.0.0", ""), **kwargs)
class AinuFolklore(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [AinuFolkloreConfig("all")]
def _info(self):
task_templates = None
features = datasets.Features(
{
"id": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=16_000),
"transcription": datasets.Value("string"),
"speaker": datasets.Value("string"),
"surface": datasets.Value("string"),
"underlying": datasets.Value("string"),
"gloss": datasets.Value("string"),
"translation": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=("audio", "transcription"),
homepage=_HOMEPAGE_URL,
citation=_CITATION,
task_templates=task_templates,
)
def _split_generators(self, dl_manager):
splits = ["train", "dev", "test"]
data_urls = {split: [_DATA_URL.format(split=split)] for split in splits}
meta_urls = {split: [_META_URL.format(split=split)] for split in splits}
archive_paths = dl_manager.download(data_urls)
local_extracted_archives = (
dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
)
archive_iters = {
split: [dl_manager.iter_archive(path) for path in paths]
for split, paths in archive_paths.items()
}
meta_paths = dl_manager.download(meta_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"local_extracted_archives": local_extracted_archives.get(
"train", [None] * len(meta_paths.get("train"))
),
"archive_iters": archive_iters.get("train"),
"text_paths": meta_paths.get("train"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"local_extracted_archives": local_extracted_archives.get(
"dev", [None] * len(meta_paths.get("dev"))
),
"archive_iters": archive_iters.get("dev"),
"text_paths": meta_paths.get("dev"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"local_extracted_archives": local_extracted_archives.get(
"test", [None] * len(meta_paths.get("test"))
),
"archive_iters": archive_iters.get("test"),
"text_paths": meta_paths.get("test"),
},
),
]
def _generate_examples(self, local_extracted_archives, archive_iters, text_paths):
assert len(local_extracted_archives) == len(archive_iters) == len(text_paths)
key = 0
for archive, text_path, local_extracted_path in zip(
archive_iters, text_paths, local_extracted_archives
):
with open(text_path, encoding="utf-8") as fin:
data = json.load(fin)
for audio_path, audio_file in archive:
audio_filename = audio_path.split("/")[-1]
if audio_filename not in data:
continue
result = data[audio_filename]
extracted_audio_path = (
os.path.join(local_extracted_path, audio_filename)
if local_extracted_path is not None
else None
)
result["audio"] = {"path": audio_path, "bytes": audio_file.read()}
yield key, result
key += 1