File size: 5,405 Bytes
dc1a82e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82b49d8
dc1a82e
 
 
3696fb8
dc1a82e
9365ecc
dc1a82e
9365ecc
63f1b63
dc1a82e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63f1b63
dc1a82e
 
 
63f1b63
 
dc1a82e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63f1b63
dc1a82e
 
 
9365ecc
dc1a82e
 
 
 
 
 
 
 
9365ecc
dc1a82e
 
 
 
9365ecc
dc1a82e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9365ecc
dc1a82e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144



import csv
import os
import json

import datasets
from datasets.utils.py_utils import size_str
from tqdm import tqdm

from .languages import LANGUAGES
from .release_stats import STATS







_BASE_URL = "https://huggingface.co/datasets/Seon25/hausa_2_eng_2/resolve/main"

_AUDIO_URL = _BASE_URL + "audio/ha/{split}/ha_{split}_{shard_idx}.tar"

_TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}.tsv"
_N_SHARDS_URL ="n_shards.json"



class Hausa2Eng(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        Hausa2EngConfig(
            name=lang,
            version=STATS["version"],
            language=LANGUAGES[lang],
            release_date=STATS["date"],
            num_clips=lang_stats["clips"],
            num_speakers=lang_stats["users"],
            validated_hr=float(lang_stats["validHrs"]) if lang_stats["validHrs"] else None,
            total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
            size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
        )
        for lang, lang_stats in STATS["locales"].items()
    ]

    def _info(self):
        
        total_valid_hours = STATS["totalValidHrs"]
        description = (
            "Common Voice is Mozilla's initiative to help teach machines how real people speak. "
            f"The dataset currently consists of validated hours of speech "
            f" in  languages, but more voices and languages are always added."
        )
        features = datasets.Features(
            {
                "client_id": datasets.Value("string"),
                "path": datasets.Value("string"),
                "audio": datasets.features.Audio(sampling_rate=48_000),
                "sentence": datasets.Value("string"),
                "up_votes": datasets.Value("int64"),
                "down_votes": datasets.Value("int64"),
                "age": datasets.Value("string"),
                "gender": datasets.Value("string"),
                "accent": datasets.Value("string"),
                "locale": datasets.Value("string"),
                "segment": datasets.Value("string"),
                "variant": datasets.Value("string"),
            }
        )

        return datasets.DatasetInfo(
            description=description,
            features=features,
            supervised_keys=None,

        )

    def _split_generators(self, dl_manager):
        lang = self.config.name
        n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
        with open(n_shards_path, encoding="utf-8") as f:
            n_shards = json.load(f)

        audio_urls = {}
        splits = ("train", "dev", "test", "other", "invalidated")
        for split in splits:
            audio_urls[split] = [
                _AUDIO_URL.format(lang= lang, split=split, shard_idx=i) for i in range(n_shards[lang][split]) 
            ]
        archive_paths = dl_manager.download(audio_urls)
        local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}

        meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
        meta_paths = dl_manager.download_and_extract(meta_urls)

        split_generators = []
        split_names = {
            "train": datasets.Split.TRAIN,
            "dev": datasets.Split.VALIDATION,
            "test": datasets.Split.TEST,
        }
        for split in splits:
            split_generators.append(
                datasets.SplitGenerator(
                    name=split_names.get(split, split),
                    gen_kwargs={
                        "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
                        "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
                        "meta_path": meta_paths[split],
                    },
                ),
            )

        return split_generators

    def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
        data_fields = list (self._info().features.keys())
        metadata = {}
        with open(meta_path, encoding="utf-8") as f:
            reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
            for row in tqdm(reader, desc="Reading metadata..."):
                if not row["path"].endswith(".mp3"):
                    row["path"] += ".mp3"
                # accent -> accents in CV 8.0
                if "accents" in row:
                    row["accent"] = row["accents"]
                    del row["accents"]
                # if data is incomplete, fill with empty values
                for field in data_fields:
                    if field not in row:
                        row[field] = ""
                metadata[row["path"]] = row

        for i, audio_archive in enumerate(archives):
            for path, file in audio_archive:
                _, filename = os.path.split(path)
                if filename in metadata:
                    result = dict(metadata[filename])
                    # set the audio feature and the path to the extracted file
                    path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
                    result["audio"] = {"path": path, "bytes": file.read()}
                    result["path"] = path
                    yield path, result