Datasets:

Modalities:
Text
Formats:
text
Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
QizhiPei commited on
Commit
f78450b
1 Parent(s): 16195f6

delete file

Browse files
Files changed (2) hide show
  1. .gitignore +2 -1
  2. test_task_all.py +0 -336
.gitignore CHANGED
@@ -1,2 +1,3 @@
1
  delete_tasks.sh
2
- renumber_tasks.py
 
 
1
  delete_tasks.sh
2
+ renumber_tasks.py
3
+ test_all_tasks.py
test_task_all.py DELETED
@@ -1,336 +0,0 @@
1
- #!/usr/bin/env python3
2
- from iso639 import languages
3
- import json
4
- from os import listdir
5
- from os.path import isfile, join
6
- import argparse
7
- import re
8
- import numpy as np
9
-
10
- # get the range of task you want to test, if specified in the command line
11
- parser = argparse.ArgumentParser()
12
- parser.add_argument("--task",
13
- nargs=2,
14
- type=int,
15
- required=False,
16
- help="The range of task you want to parse")
17
-
18
- args = parser.parse_args()
19
- if args.task:
20
- begin_task_number, end_task_number = args.task[0], args.task[1]
21
- assert begin_task_number > 0, "begin task must be greater than 0"
22
- assert end_task_number > begin_task_number, "please specify a range of task you would like to test; i.e. the end task number must be greater than beginning task number"
23
-
24
- # make sure that there is no json file in the root directory
25
- root_files = [f for f in listdir('.') if isfile(join('.', f))]
26
- for f in root_files:
27
- assert '.json' not in f, 'looks like there is a JSON file in the main directory???'
28
-
29
- # read all the tasks and make sure that they're following the right pattern
30
- tasks_path = 'tasks/'
31
- # tasks_path = 'tasks_special_token/'
32
-
33
- expected_keys = [
34
- "Definition",
35
- "Input_language",
36
- "Output_language",
37
- "Positive Examples",
38
- "Negative Examples",
39
- "Instances",
40
- "Contributors",
41
- "Categories",
42
- "Domains",
43
- "Source",
44
- "URL",
45
- "Reasoning"
46
- ]
47
-
48
- language_names = [
49
- x.name.replace('(individual language)', '').replace(" languages", "").strip()
50
- for x in list(languages)
51
- ]
52
-
53
-
54
- def assert_language_name(name):
55
- assert name in language_names, f"Did not find `{name}` among iso639 language names: {language_names}"
56
-
57
-
58
- def extract_categories(string):
59
- """
60
- Get all the characters between ` and `
61
- """
62
- return set(re.findall(r'`(.*?)`', string))
63
-
64
-
65
- def dict_raise_on_duplicates(ordered_pairs):
66
- """Reject duplicate keys."""
67
- d = {}
68
- for k, v in ordered_pairs:
69
- if k in d:
70
- raise ValueError("duplicate key: %r" % (k,))
71
- else:
72
- d[k] = v
73
- return d
74
-
75
- def atoi(text):
76
- return int(text) if text.isdigit() else text
77
-
78
- def natural_keys(text):
79
- '''
80
- alist.sort(key=natural_keys) sorts in human order
81
- '''
82
- return [ atoi(c) for c in re.split(r'(\d+)', text) ]
83
-
84
-
85
- with open("tasks/README.md", 'r') as readmef:
86
- task_readme_content = " ".join(readmef.readlines())
87
- with open("doc/task-hierarchy.md", 'r') as readmef:
88
- hierarchy_content_lines = readmef.readlines()
89
- hierarchy_content = " ".join(hierarchy_content_lines)
90
- all_categories = extract_categories(hierarchy_content)
91
-
92
- # make sure there are no repeated lines in the task file
93
- task_readme_lines = [x for x in task_readme_content.split("\n") if len(x) > 5]
94
- if len(set(task_readme_lines)) != len(task_readme_lines):
95
- diff = "\n --> ".join([x for x in task_readme_lines if task_readme_lines.count(x) > 1])
96
- assert False, f'looks like there are repeated lines in the task readme file?? \n {diff}'
97
-
98
- # make sure that the lines are sorted
99
- task_numbers = [int(line.replace("`task", "").split("_")[0]) for line in task_readme_lines if "`task" in line]
100
- for i in range(0, len(task_numbers) - 1):
101
- num1 = task_numbers[i]
102
- num2 = task_numbers[i + 1]
103
- assert num1 <= num2, f"ERROR: looks like `{num1}` appears before `{num2}`."
104
-
105
- files = [f for f in listdir(tasks_path) if isfile(join(tasks_path, f))]
106
- files.sort(key=natural_keys)
107
-
108
- # make sure anything that gets mentioned in the readme, correspond to an actual file
109
- task_names = [line.split("`")[1] for line in task_readme_lines if '`' in line]
110
- for name in task_names:
111
- file_name = name + ".json"
112
- # pqz
113
- # assert file_name in files, f" Did not find `{file_name}` among {files}"
114
-
115
- # test every file (README is skipped)
116
- if not args.task:
117
- begin_task_number, end_task_number = 1, len(files)
118
-
119
- # TODO: over time, we need to fix the skew of the following tasks
120
- skew_exclusion = [
121
- "150"
122
- ]
123
-
124
- contributor_stats = {}
125
- categories_stats = {}
126
- reasoning_stats = {}
127
- domain_stats = {}
128
- tasks_count = 0
129
- number_of_instances = 0
130
- for file in files[begin_task_number:end_task_number + 1]:
131
- if ".json" in file:
132
- print(f" --> testing file: {file}")
133
- assert '.json' in file, 'the file does not seem to have a .json in it: ' + file
134
- file_path = tasks_path + file
135
- with open(file_path, 'r') as f:
136
- try:
137
- data = json.load(f)
138
- except:
139
- with open(file_path, 'r', encoding='utf-8') as f:
140
- data = json.load(f, object_pairs_hook=dict_raise_on_duplicates)
141
- for key in expected_keys:
142
- assert key in data, f'did not find the key: {key}'
143
-
144
-
145
- assert len(data['Instances']) > 25, f"there must be at least 25 instances; " \
146
- f"currently you have {len(data['Instances'])} instances"
147
- # pqz
148
- # assert len(data['Instances']) <= 6500, f"there must be at most 6.5k instances; " \
149
- # f"currently you have {len(data['Instances'])} instances"
150
-
151
- assert type(data['Definition']) == list, f'Definition must be a list of strings.'
152
- assert type(data['Source']) == list, f'Sources must be a list.'
153
- assert type(data['URL']) == list, f'URL must be a list.'
154
- assert type(data['Contributors']) == list, f'Contributors must be a list.'
155
- assert type(data['Categories']) == list, f'Categories must be a list.'
156
- assert type(data['Reasoning']) == list, f'Reasoning must be a list.'
157
- assert type(data['Domains']) == list, f'Domains must be a list.'
158
-
159
- number_of_instances = number_of_instances + len(data['Instances'])
160
- for c in data['Categories']:
161
- assert c in all_categories, f'Did not find category `{c}`'
162
- if c not in categories_stats:
163
- categories_stats[c] = 0
164
- categories_stats[c] += 1
165
-
166
- for d in data['Domains']:
167
- assert d in hierarchy_content, f'Did not find domain `{d}`'
168
- if d not in domain_stats:
169
- domain_stats[d] = 0
170
- domain_stats[d] += 1
171
-
172
- for r in data['Reasoning']:
173
- assert r in hierarchy_content, f'Did not find reasoning `{r}`'
174
- if r not in reasoning_stats:
175
- reasoning_stats[r] = 0
176
- reasoning_stats[r] += 1
177
-
178
- for d in data['Definition']:
179
- assert type(d) == str, f'Each definition must be a string.'
180
- assert all((lan in d) for lan in data['Input_language'] if
181
- lan != 'English'), f'Definition must contain non-English tasks language.'
182
- assert type(data['Input_language']) == list, f'Input_language must be a list of strings.'
183
- assert type(data['Output_language']) == list, f'Output_language must be a list of strings.'
184
- assert type(data['Instruction_language']) == list, f'Output_language must be a list of strings.'
185
-
186
- assert 'instruction_language' not in data, f'Found `instruction_language`, but expected `Instruction_language`.'
187
- assert 'input_language' not in data, f'Found `input_language`, but expected `Input_language`.'
188
- assert 'output_language' not in data, f'Found `output_language`, but expected `Output_language`.'
189
-
190
- # make sure we use the standard language names
191
- # pqz
192
- # for lang in data['Input_language'] + data['Output_language'] + data['Instruction_language']:
193
- # assert_language_name(lang)
194
-
195
- instance_ids = set()
196
- for x in data['Instances']:
197
- for key in ['id', 'input', 'output']:
198
- assert key in x, f'expected the key {key} in {x}'
199
- assert x['id'] not in instance_ids, f'found duplicate instance id: {x["id"]}'
200
- instance_ids.add(x['id'])
201
- assert type(x['input']) == str, f'the input of instance {x} is not a string'
202
- assert type(x['output']) == list, f'the output of instance {x} is not a list'
203
- assert len(x['input']) > 0, f"looks like an input `{x['input']}` is empty?"
204
- assert len(x['output']) > 0, f"looks like an output `{x['output']}` is empty?"
205
- for i in x['output']:
206
- assert type(i) == str, f'the output is not a string'
207
- # pqz
208
- # assert len(data['Positive Examples']) > 1, "there must be at least 3 positive example"
209
- # assert len(data['Negative Examples']) > 0, "there must be at least 2 negative example"
210
-
211
- for x in data['Positive Examples'] + data['Negative Examples']:
212
- for key in ['input', 'output', 'explanation']:
213
- assert key in x, f'expected the key {key} in {x}'
214
- assert type(x['input']) == str, f'the input of example {x} is not a string'
215
- assert type(x['output']) == str, f'the output of example {x} is not a string'
216
- assert type(x['explanation']) == str, f'the explanation of example {x} is not a string'
217
-
218
- # Make sure there are no repeated input examples
219
- instances = data['Instances']
220
- set_of_instances = {instance['input'] for instance in instances}
221
- # Because the set length and total length are different, there must be a duplicate input
222
- # pqz
223
- # if len(instances) != len(set_of_instances):
224
- # for instance in instances:
225
- # # If the instance is a duplicate then it has already been removed from the set and a KeyError will be thrown
226
- # try:
227
- # set_of_instances.remove(instance['input'])
228
- # except KeyError:
229
- # raise Exception(f" * Looks like we have a repeated example here! "
230
- # f"Merge outputs before removing duplicates. :-/ \n {instance}")
231
-
232
- # Make sure there are no link in instances
233
- url_reg = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
234
- instances = data['Instances']
235
- for instance in instances:
236
- ck_url = re.findall(url_reg, instance['input'])
237
- if ck_url:
238
- print(f'⚠️ WARNING: Looks like there is a link in the input: {ck_url}')
239
- break
240
-
241
- # make sure classes are balanced
242
- output = [ins['output'] for ins in instances]
243
- # flattens the nested arrays
244
- outputs = sum(output, [])
245
- value, counts = np.unique(outputs, return_counts=True)
246
-
247
- task_number = file.replace("task", "").split("_")[0]
248
- # TODO: drop this condition
249
- if int(task_number) not in [902, 903]:
250
- assert len(value) > 1, f" Looks like all the instances are mapped to a single output: {value}"
251
-
252
- if task_number not in skew_exclusion and len(value) < 15:
253
- norm_counts = counts / counts.sum()
254
- entropy = -(norm_counts * np.log(norm_counts) / np.log(len(value))).sum()
255
- # pqz
256
- # assert entropy > 0.8, f"Looks like this task is heavily skewed!\n 📋 classes: {value} \n 📋 Norm_counts: {norm_counts} \n 📋 Distribution of classes: {counts} \n 📊 entropy= {entropy}"
257
- # Make sure there are no examples repeated across instances and positive examples
258
- examples = [ex['input'] for ex in data['Positive Examples']]
259
- for instance in instances:
260
- if instance['input'] in examples:
261
- raise Exception(f" * Looks like we have a same example across positive examples and instances! "
262
- f"Drop the example from the instances. :-/ \n {instance}")
263
-
264
- assert len(instance['output']) > 0, "all the instances must have at least one output"
265
-
266
- true_file = file.replace(".json", "")
267
- for char in true_file:
268
- if char.isupper():
269
- raise Exception(f" * Looks like there is an uppercase letter in `{true_file}`. "
270
- f"All letters should be lowercased.")
271
-
272
- if file in task_readme_content:
273
- raise Exception(f" * Looks like the .json file extension ending is "
274
- f"present with the task name in `tasks/README.md` when it should just be `{true_file}`")
275
-
276
- # pqz
277
- # if true_file not in task_readme_content:
278
- # raise Exception(f' * Looks like the task name `{true_file}` is not included '
279
- # f'in the task file `tasks/README.md`')
280
-
281
- if task_readme_content.count(true_file) > 1:
282
- raise Exception(f' * Looks like the task name `{true_file}` is repeated in '
283
- f'the task file `tasks/README.md`')
284
-
285
- for c in data['Contributors']:
286
- if c not in contributor_stats:
287
- contributor_stats[c] = 0
288
- contributor_stats[c] += 1
289
- tasks_count = tasks_count + 1
290
-
291
- # test the the official splits
292
- # pqz
293
- # for split_name in ["mol_text"]:
294
- # train_tasks = [l.strip() for l in open(f"splits/{split_name}/train_tasks.txt")]
295
- # test_tasks = [l.strip() for l in open(f"splits/{split_name}/test_tasks.txt")]
296
- # excluded_tasks = [l.strip() for l in open(f"splits/{split_name}/excluded_tasks.txt")]
297
-
298
- # make east task in the split actually exists
299
- # pqz
300
- # for task in train_tasks + test_tasks + excluded_tasks:
301
- # assert task in task_names, f" Task {task} doesn't exist, but it's included in the {split_name} split."
302
- # make sure each task appears in the split
303
- # pqz
304
- # for task in task_names:
305
- # assert task in train_tasks + test_tasks + excluded_tasks, f" Task {task} is missing in the {split_name} split."
306
- # make sure there is no overlap between test and train task names in the splits files.
307
- # assert len(set(train_tasks) & set(test_tasks)) == 0, f" {split_name} split has overlap tasks in the train & test sets."
308
- # assert len(set(train_tasks) & set(excluded_tasks)) == 0, f" {split_name} split has overlap tasks in the train & excluded sets."
309
- # assert len(set(test_tasks) & set(excluded_tasks)) == 0, f" {split_name} split has overlap tasks in the test & excluded sets."
310
-
311
- print("Did not find any errors! ✅")
312
-
313
- print("\n - - - - - Contributors >= 25 tasks - - - - - ")
314
- keyvalues = sorted(list(contributor_stats.items()), key=lambda x: x[1])
315
- for author, count in keyvalues:
316
- if count >= 25:
317
- print(f" ✍️ {author} -> {count}")
318
-
319
- print("\n - - - - - Category Stats - - - - - ")
320
- keyvalues = sorted(list(categories_stats.items()), key=lambda x: x[1])
321
- for cat, count in categories_stats.items():
322
- print(f" ✍️ {cat} -> {count}")
323
-
324
- print("\n - - - - - Domain Stats - - - - - ")
325
- keyvalues = sorted(list(domain_stats.items()), key=lambda x: x[1])
326
- for dom, count in domain_stats.items():
327
- print(f" ✍️ {dom} -> {count}")
328
-
329
- print("\n - - - - - Reasoning Stats - - - - - ")
330
- keyvalues = sorted(list(reasoning_stats.items()), key=lambda x: x[1])
331
- for res, count in reasoning_stats.items():
332
- print(f" ✍️ {res} -> {count}")
333
-
334
- print("\n - - - - - Instances Stats - - - - - ")
335
- average_number_of_instances = number_of_instances / tasks_count
336
- print(f" ✍️ Average number of Instances -> {average_number_of_instances}")