Mikolaj Pudo commited on
Commit
5a9f628
1 Parent(s): 784e9d0
Files changed (1) hide show
  1. MOCKS-test.py +208 -0
MOCKS-test.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Creative Commons version 4.0 and Mozilla Public License version 2.0,
5
+ # (the "Licenses"); you may not use this file except in compliance with the Licenses.
6
+ # You may obtain a copies of the Licenses at
7
+ #
8
+ # https://creativecommons.org/licenses/by/4.0/
9
+ # and https://www.mozilla.org/en-US/MPL/2.0/
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the Licenses for the specific language governing permissions and
15
+ # limitations under the Licenses.
16
+
17
+ # Lint as: python3
18
+
19
+ import csv
20
+ import os
21
+
22
+ import datasets
23
+
24
+
25
+ logger = datasets.logging.get_logger(__name__)
26
+
27
+
28
+ _CITATION = """\
29
+ @inproceedings{pudo23_interspeech,
30
+ author={Mikołaj Pudo and Mateusz Wosik and Adam Cieślak and Justyna Krzywdziak and Bożena Łukasiak and Artur Janicki},
31
+ title={{MOCKS} 1.0: Multilingual Open Custom Keyword Spotting Testset},
32
+ year={2023},
33
+ booktitle={Proc. Interspeech 2023},
34
+ }
35
+ """
36
+
37
+ _DESCRIPTION = """\
38
+ Multilingual Open Custom Keyword Spotting Testset (MOCKS) is a comprehensive
39
+ audio testset for evaluation and benchmarking Open-Vocabulary Keyword Spotting (OV-KWS) models.
40
+ """
41
+
42
+
43
+ #_BASE_URL = "https://huggingface.co/datasets/voiceintelligenceresearch/MOCKS/tree/main"
44
+ _BASE_URL = "https://huggingface.co/datasets/mikolaj-p/MOCKS-test/tree/main"
45
+ _DL_URLS_TEMPLATE = {
46
+ "data": "%s/%s/test/%s/data.tar.gz",
47
+ "transcription" : "%s/%s/test/data_%s_transcription.tsv",
48
+ "positive" : "%s/%s/test/%s/all.pair.positive.tsv",
49
+ "similar" : "%s/%s/test/%s/all.pair.similar.tsv",
50
+ "different" : "%s/%s/test/%s/all.pair.different.tsv",
51
+ "positive_subset" : "%s/%s/test/%s/subset.pair.positive.tsv",
52
+ "similar_subset" : "%s/%s/test/%s/subset.pair.similar.tsv",
53
+ "different_subset" : "%s/%s/test/%s/subset.pair.different.tsv",
54
+ }
55
+
56
+ _MOCKS_SETS = [
57
+ "en.LS-clean",
58
+ ]
59
+ # "en.LS-other",
60
+ # "en.MCV",
61
+ # "de.MCV",
62
+ # "es.MCV",
63
+ # "fr.MCV",
64
+ # "it.MCV"]
65
+
66
+ _MOCKS_SUFFIXES = [
67
+ "",
68
+ ".positive",
69
+ ".similar",
70
+ ".different",
71
+ ".subset",
72
+ ".positive_subset",
73
+ ".similar_subset",
74
+ ".different_subset"]
75
+
76
+
77
+ class Mocks(datasets.GeneratorBasedBuilder):
78
+ """Mocks Dataset."""
79
+ DEFAULT_CONFIG_NAME = "en.LS-clean"
80
+
81
+ BUILDER_CONFIGS = [datasets.BuilderConfig(name=subset+suffix, description=subset+suffix)
82
+ for subset in _MOCKS_SETS for suffix in _MOCKS_SUFFIXES]
83
+
84
+ def _info(self):
85
+ logger.info("info")
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=datasets.Features({
89
+ "keyword_id": datasets.Value("string"),
90
+ "keyword_transcription": datasets.Value("string"),
91
+ "test_id": datasets.Value("string"),
92
+ "test_transcription": datasets.Value("string"),
93
+ "test_audio": datasets.Audio(sampling_rate=16000),
94
+ "label": datasets.Value("bool"),
95
+ }
96
+ ),
97
+ homepage=_BASE_URL,
98
+ citation=_CITATION
99
+ )
100
+
101
+
102
+ def _split_generators(self, dl_manager):
103
+ logger.info("split_generator")
104
+ name_split = self.config.name.split(".")
105
+ subset_lang = name_split[0]
106
+ subset_name = name_split[1]
107
+
108
+ if len(name_split) == 2:
109
+ pairs_types = ["positive", "similar", "different"]
110
+ elif name_split[2] == "subset":
111
+ pairs_types = ["positive_subset", "similar_subset", "different_subset"]
112
+ else:
113
+ pairs_types = [name_split[2]]
114
+
115
+ offline_archive_path = dl_manager.download({
116
+ k: v%(subset_lang, subset_name, "offline")
117
+ for k, v in _DL_URLS_TEMPLATE.items()
118
+ })
119
+ # online_archive_path = dl_manager.download({
120
+ # k: v%(subset_lang, subset_name, "online")
121
+ # for k, v in _DL_URLS_TEMPLATE.items()
122
+ # })
123
+
124
+ split_offline = [datasets.SplitGenerator(
125
+ name="offline",
126
+ gen_kwargs={
127
+ "audio_files": dl_manager.iter_archive(offline_archive_path["data"]),
128
+ "transcription_keyword": offline_archive_path["transcription"],
129
+ "transcription_test": offline_archive_path["transcription"],
130
+ "pairs": [offline_archive_path[pair_type] for pair_type in pairs_types],
131
+ }
132
+ )
133
+ ]
134
+
135
+ # split_online = [datasets.SplitGenerator(
136
+ # name="online",
137
+ # gen_kwargs={
138
+ # "audio_files": dl_manager.iter_archive(online_archive_path["data"]),
139
+ # "transcription_keyword": offline_archive_path["transcription"],
140
+ # "transcription_test": online_archive_path["transcription"],
141
+ # "pairs": [online_archive_path[pair_type] for pair_type in pairs_types],
142
+ # }
143
+ # )
144
+ # ]
145
+
146
+ # return split_offline + split_online
147
+ return split_offline
148
+
149
+
150
+ def _read_transcription(self, transcription_path):
151
+ transcription_metadata = {}
152
+
153
+ with open(transcription_path, encoding="utf-8") as f:
154
+ reader = csv.reader(f, delimiter="\t")
155
+ next(reader, None)
156
+
157
+ for row in reader:
158
+ _, audio_id = os.path.split(row[0])
159
+ transcription = row[1]
160
+ transcription_metadata[audio_id] = {
161
+ "audio_id": audio_id,
162
+ "transcription": transcription}
163
+
164
+ return transcription_metadata
165
+
166
+
167
+ def _generate_examples(self, audio_files, transcription_keyword, transcription_test, pairs):
168
+ transcription_keyword_metadata = self._read_transcription(transcription_keyword)
169
+
170
+ transcription_test_metadata = self._read_transcription(transcription_test)
171
+
172
+ pair_metadata = {}
173
+ for pair in pairs:
174
+ with open(pair, encoding="utf-8") as f:
175
+ reader = csv.reader(f, delimiter="\t")
176
+ next(reader, None)
177
+
178
+ for row in reader:
179
+ _, keyword_id = os.path.split(row[0])
180
+ _, test_id = os.path.split(row[1])
181
+
182
+ if keyword_id not in transcription_keyword_metadata:
183
+ logger.error("No transcription and audio for keyword %s"%(keyword_id))
184
+ continue
185
+ if test_id not in transcription_test_metadata:
186
+ logger.error("No transcription and audio for test case %s"%(test_id))
187
+ continue
188
+
189
+ if test_id not in pair_metadata:
190
+ pair_metadata[test_id] = []
191
+
192
+ pair_metadata[test_id].append([keyword_id, int(row[-1])])
193
+
194
+ id_ = 0
195
+ for test_path, test_f in audio_files:
196
+ _, test_id = os.path.split(test_path)
197
+ if test_id in pair_metadata:
198
+ test_audio = {"bytes": test_f.read()}
199
+ for keyword_id, label in pair_metadata[test_id]:
200
+ yield id_, {
201
+ "keyword_id": keyword_id,
202
+ "keyword_transcription": transcription_keyword_metadata[keyword_id]["transcription"],
203
+ "test_id": test_id,
204
+ "test_transcription": transcription_test_metadata[test_id]["transcription"],
205
+ "test_audio": test_audio,
206
+ "label": label}
207
+ id_ += 1
208
+