junyinc commited on
Commit
ba08a60
·
1 Parent(s): 00511a2

add rest of the files

Browse files
Files changed (3) hide show
  1. .gitattributes +3 -0
  2. NINJAL-Ainu-Folklore.py +137 -0
  3. README.md +40 -0
.gitattributes CHANGED
@@ -55,3 +55,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
55
  data/audio/dev.tar.gz filter=lfs diff=lfs merge=lfs -text
56
  data/audio/test.tar.gz filter=lfs diff=lfs merge=lfs -text
57
  data/audio/train.tar.gz filter=lfs diff=lfs merge=lfs -text
 
 
 
 
55
  data/audio/dev.tar.gz filter=lfs diff=lfs merge=lfs -text
56
  data/audio/test.tar.gz filter=lfs diff=lfs merge=lfs -text
57
  data/audio/train.tar.gz filter=lfs diff=lfs merge=lfs -text
58
+ data/dev.json filter=lfs diff=lfs merge=lfs -text
59
+ data/test.json filter=lfs diff=lfs merge=lfs -text
60
+ data/train.json filter=lfs diff=lfs merge=lfs -text
NINJAL-Ainu-Folklore.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """NINJAL Ainu folklore corpus"""
16
+
17
+ import os
18
+ import json
19
+
20
+ import datasets
21
+
22
+
23
+ _DESCRIPTION = ""
24
+ _CITATION = ""
25
+ _HOMEPAGE_URL = ""
26
+
27
+ _BASE_PATH = "data/"
28
+ _DATA_URL = _BASE_PATH + "audio/{split}.tar.gz"
29
+ _META_URL = _BASE_PATH + "{split}.json"
30
+
31
+
32
+ class AinuFolkloreConfig(datasets.BuilderConfig):
33
+ def __init__(self, name, **kwargs):
34
+ super().__init__(name=name, version=datasets.Version("0.0.0", ""), **kwargs)
35
+
36
+
37
+ class AinuFolklore(datasets.GeneratorBasedBuilder):
38
+ BUILDER_CONFIGS = [AinuFolkloreConfig("all")]
39
+
40
+ def _info(self):
41
+ task_templates = None
42
+ features = datasets.Features(
43
+ {
44
+ "id": datasets.Value("string"),
45
+ "audio": datasets.features.Audio(sampling_rate=16_000),
46
+ "transcription": datasets.Value("string"),
47
+ "speaker": datasets.Value("string"),
48
+ "surface": datasets.Value("string"),
49
+ "underlying": datasets.Value("string"),
50
+ "gloss": datasets.Value("string"),
51
+ "translation": datasets.Value("string"),
52
+ }
53
+ )
54
+
55
+ return datasets.DatasetInfo(
56
+ description=_DESCRIPTION,
57
+ features=features,
58
+ supervised_keys=("audio", "transcription"),
59
+ homepage=_HOMEPAGE_URL,
60
+ citation=_CITATION,
61
+ task_templates=task_templates,
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ splits = ["train", "dev", "test"]
66
+
67
+ data_urls = {split: [_DATA_URL.format(split=split)] for split in splits}
68
+ meta_urls = {split: [_META_URL.format(split=split)] for split in splits}
69
+
70
+ archive_paths = dl_manager.download(data_urls)
71
+ local_extracted_archives = (
72
+ dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
73
+ )
74
+ archive_iters = {
75
+ split: [dl_manager.iter_archive(path) for path in paths]
76
+ for split, paths in archive_paths.items()
77
+ }
78
+
79
+ meta_paths = dl_manager.download(meta_urls)
80
+
81
+ return [
82
+ datasets.SplitGenerator(
83
+ name=datasets.Split.TRAIN,
84
+ gen_kwargs={
85
+ "local_extracted_archives": local_extracted_archives.get(
86
+ "train", [None] * len(meta_paths.get("train"))
87
+ ),
88
+ "archive_iters": archive_iters.get("train"),
89
+ "text_paths": meta_paths.get("train"),
90
+ },
91
+ ),
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.VALIDATION,
94
+ gen_kwargs={
95
+ "local_extracted_archives": local_extracted_archives.get(
96
+ "dev", [None] * len(meta_paths.get("dev"))
97
+ ),
98
+ "archive_iters": archive_iters.get("dev"),
99
+ "text_paths": meta_paths.get("dev"),
100
+ },
101
+ ),
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TEST,
104
+ gen_kwargs={
105
+ "local_extracted_archives": local_extracted_archives.get(
106
+ "test", [None] * len(meta_paths.get("test"))
107
+ ),
108
+ "archive_iters": archive_iters.get("test"),
109
+ "text_paths": meta_paths.get("test"),
110
+ },
111
+ ),
112
+ ]
113
+
114
+ def _generate_examples(self, local_extracted_archives, archive_iters, text_paths):
115
+ assert len(local_extracted_archives) == len(archive_iters) == len(text_paths)
116
+ key = 0
117
+
118
+ for archive, text_path, local_extracted_path in zip(
119
+ archive_iters, text_paths, local_extracted_archives
120
+ ):
121
+ with open(text_path, encoding="utf-8") as fin:
122
+ data = json.load(fin)
123
+
124
+ for audio_path, audio_file in archive:
125
+ audio_filename = audio_path.split("/")[-1]
126
+ if audio_filename not in data:
127
+ continue
128
+
129
+ result = data[audio_filename]
130
+ extracted_audio_path = (
131
+ os.path.join(local_extracted_path, audio_filename)
132
+ if local_extracted_path is not None
133
+ else None
134
+ )
135
+ result["audio"] = {"path": audio_path, "bytes": audio_file.read()}
136
+ yield key, result
137
+ key += 1
README.md CHANGED
@@ -1,3 +1,43 @@
1
  ---
2
  license: cc-by-sa-4.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc-by-sa-4.0
3
  ---
4
+
5
+ # Dataset Card for NINJAL Ainu Folklore
6
+
7
+ ## Dataset Description
8
+
9
+ - **Original source** [A Glossed Audio Corpus of Ainu folklore](https://ainu.ninjal.ac.jp/folklore/en/)
10
+
11
+ ### Dataset Summary
12
+
13
+ Ainu is an endangered (nearly extinct) language spoken in Hokkaido, Japan. This dataset contains recordings of 38 traditional Ainu folktales by two Ainu speakers (Mrs. Kimi Kimura and Mrs. Ito Oda), along with their transcriptions (in Latin script), English translations, and underlying and surface gloss forms in English. (For transcriptions in Katakana and translation/gloss in Japanese, please see the original corpus webpage.) In total, there are over 8 hours (~7.7k sentences) of transcribed and glossed speech.
14
+
15
+ ### Annotations
16
+
17
+ The glosses in this dataset are the original glosses from the Glossed Audio Corpus, with minor changes to fit the Generalized Glossing Format (e.g. multi-word translations of individual morphemes are now separated by underscores instead of periods). Uncertainty in interpretation by the original annotators is indicated with a question mark (?). Additional notes on the Latin transcriptions in the corpus can be found on the original corpus webpage (under the "Structure, Transcriptions, and Glosses" tab).
18
+
19
+ ## Additional Information
20
+
21
+ ### Limitations
22
+
23
+ This dataset has a small number of speakers and a limited domain, and models trained on this dataset might not be suitable for general purpose applications. The audio data contain varying degrees of noise which makes this dataset a poor fit for training TTS models.
24
+
25
+ ### Acknowledgement
26
+
27
+ We would like to thank the original authors of the Glossed Audio Corpus of Ainu Folklore for their dedication and care in compiling these resources, and kindly ask anyone who uses this dataset to cite them in their work.
28
+
29
+ ### License
30
+
31
+ Attribution-ShareAlike 4.0 International ([cc-by-sa-4.0](https://creativecommons.org/licenses/by-sa/4.0/))
32
+
33
+ ### Original Source
34
+
35
+ ```
36
+ @misc{ninjal-ainu-folklore,
37
+ title={A Glossed Audio Corpus of Ainu Folklore},
38
+ url={https://ainu.ninjal.ac.jp/folklore/},
39
+ author={Nakagawa, Hiroshi and Bugaeva, Anna and Kobayashi, Miki and Yoshikawa, Yoshimi},
40
+ publisher={The National Institute for Japanese Language and Linguistics ({NINJAL})},
41
+ date={2016--2021}
42
+ }
43
+ ```