Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
pandas
File size: 3,061 Bytes
9c13850
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import os
import pandas as pd 
import datasets
from glob import glob
import zipfile

class dummy(datasets.GeneratorBasedBuilder):
	def _info(self):
		return datasets.DatasetInfo(features=datasets.Features({'Unnamed: 0':datasets.Value('string'),'Dataset_Source':datasets.Value('string'),'Emoji_Text':datasets.Value('string'),'E_Sentiment_Scores':datasets.Value('string'),'E_Sentiment_Labels':datasets.Value('string'),'Plain_Text':datasets.Value('string'),'P_Sentiment_Scores':datasets.Value('string'),'P_Sentiment_Labels':datasets.Value('string'),'Emoji_Sentiment_Roles':datasets.Value('string'),'Tokens':datasets.Value('string'),'Words':datasets.Value('string'),'Emoji_Patterns':datasets.Value('string'),'Emoji_Count':datasets.Value('string'),'Emoji_Load':datasets.Value('string'),'Emoji_Sentiment_Scores':datasets.Value('string'),'Emoji_Sentiment_Labels':datasets.Value('string'),'Irony_Labels':datasets.Value('string')}))

	def extract_all(self, dir):
		zip_files = glob(dir+'/**/**.zip', recursive=True)
		for file in zip_files:
			with zipfile.ZipFile(file) as item:
				item.extractall('/'.join(file.split('/')[:-1])) 


	def get_all_files(self, dir):
		files = []
		valid_file_ext = ['txt', 'csv', 'tsv', 'xlsx', 'xls', 'xml', 'json', 'jsonl', 'html', 'wav', 'mp3', 'jpg', 'png']
		for ext in valid_file_ext:
			files += glob(f"{dir}/**/**.{ext}", recursive = True)
		return files

	def _split_generators(self, dl_manager):
		url = ['https://raw.githubusercontent.com/ShathaHakami/ArSarcasMoji-Dataset/main/ArSarcasMoji.csv']
		downloaded_files = dl_manager.download(url)
		return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepaths': {'inputs':downloaded_files} })]


	def _generate_examples(self, filepaths):
		_id = 0
		for i,filepath in enumerate(filepaths['inputs']):
			df = pd.read_csv(open(filepath, 'rb'), sep = r',', skiprows = 0, error_bad_lines = False, header = 0)
			if len(df.columns) != 17:
				continue
			df.columns = ['Unnamed: 0', 'Dataset_Source', 'Emoji_Text', 'E_Sentiment_Scores', 'E_Sentiment_Labels', 'Plain_Text', 'P_Sentiment_Scores', 'P_Sentiment_Labels', 'Emoji_Sentiment_Roles', 'Tokens', 'Words', 'Emoji_Patterns', 'Emoji_Count', 'Emoji_Load', 'Emoji_Sentiment_Scores', 'Emoji_Sentiment_Labels', 'Irony_Labels']
			for _, record in df.iterrows():
				yield str(_id), {'Unnamed: 0':record['Unnamed: 0'],'Dataset_Source':record['Dataset_Source'],'Emoji_Text':record['Emoji_Text'],'E_Sentiment_Scores':record['E_Sentiment_Scores'],'E_Sentiment_Labels':record['E_Sentiment_Labels'],'Plain_Text':record['Plain_Text'],'P_Sentiment_Scores':record['P_Sentiment_Scores'],'P_Sentiment_Labels':record['P_Sentiment_Labels'],'Emoji_Sentiment_Roles':record['Emoji_Sentiment_Roles'],'Tokens':record['Tokens'],'Words':record['Words'],'Emoji_Patterns':record['Emoji_Patterns'],'Emoji_Count':record['Emoji_Count'],'Emoji_Load':record['Emoji_Load'],'Emoji_Sentiment_Scores':record['Emoji_Sentiment_Scores'],'Emoji_Sentiment_Labels':record['Emoji_Sentiment_Labels'],'Irony_Labels':record['Irony_Labels']}
				_id += 1