from curses.ascii import isalpha import os import csv import re from typing import Sequence import json import ast import datasets _DESCRIPTION = """\ Example dataset toxic """ _LABEL = "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/test/label" _LINES = "https://raw.githubusercontent.com/VinAIResearch/JointIDSF/main/PhoATIS/syllable-level/test/seq.in" class Config(datasets.BuilderConfig): """BuilderConfig for GLUE.""" def __init__(self, data_url, label_url, **kwargs): """BuilderConfig Args: data_url: `string`, url to the dataset (word or raw level) **kwargs: keyword arguments forwarded to super. """ super(Config, self).__init__( version=datasets.Version( "1.0.0", ), **kwargs, ) self.data_url = data_url self.label_url = label_url class Guess(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("0.1.0") BUILDER_CONFIGS = [ Config( name="all", data_url=_LINES, label_url=_LABEL, description="data", ) ] def _info(self): # TODO(wikitext): Specifies the datasets.DatasetInfo object return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # datasets.features.FeatureConnectors features=datasets.Features( { "text": datasets.Value("string"), "classes": datasets.Sequence(datasets.Value("string")), "target": datasets.Value("int8") } ), # If there's a common (input, target) tuple from the features, # specify them here. They'll be used if as_supervised=True in # builder.as_dataset. supervised_keys=None, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" # TODO(wikitext): Downloads the data and defines the splits # dl_manager is a datasets.download.DownloadManager that can be used to # download and extract URLs data_file = dl_manager.download(self.config.data_url) label_file = dl_manager.download(self.config.label_url) return [ datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"data_file": data_file, "label_file": label_file}, ), ] def _generate_examples(self, data_file, label_file): _CLASS = { "abbreviation": 0, "aircraft": 1, "airfare": 2, "airline": 3, "airport": 4, "capacity": 5, "city": 6, "distance": 7, "flight": 8, "flight_no": 19, "flight_time": 10, "ground_fare": 11, "ground_service": 12, "meal": 13, "quantity": 14, "restriction": 15 } _TEXT = ["mã", "loại máy bay", "giá vé", "hãng hàng không", "sân bay", "sức chứa máy bay", "địa điểm", "khoảng cách", "chuyến bay", "số hiệu bay", "thời gian bay", "giá dịch vụ", "dịch vụ", "suất ăn", "số lượng", "hạn chế"] with open(data_file, 'r') as f: lines = f.read().splitlines() with open(label_file, 'r') as f: labels = f.read().splitlines() data = [] for idx, (line, label) in enumerate(zip(lines, labels)): if label not in _CLASS: print(line, label) continue _classes = [] for _cl in _TEXT: _classes.append(f'{_cl}: {line}') yield idx, { "text" : f'Cho xin thông tin ', "classes" : _classes, "target" : _CLASS[label] }