Datasets:

Languages:
code
ArXiv:
Tags:
code
License:
Muennighoff commited on
Commit
62e6492
1 Parent(s): a403da0
Files changed (1) hide show
  1. humaneval-x-bugs.py +8 -7
humaneval-x-bugs.py CHANGED
@@ -57,14 +57,14 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
57
  name="python",
58
  description="Python HumanEvalBugs",
59
  features=[
60
- "task_id", "prompt", "declaration", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
61
  ]
62
  ),
63
  HumanEvalXBugsConfig(
64
  name="cpp",
65
  description="C++ HumanEvalBugs",
66
  features=[
67
- "task_id", "prompt", "declaration", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
68
  ]
69
  ),
70
 
@@ -72,14 +72,14 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
72
  name="go",
73
  description="Go HumanEvalBugs",
74
  features=[
75
- "task_id", "prompt", "declaration", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
76
  ]
77
  ),
78
  HumanEvalXBugsConfig(
79
  name="java",
80
  description="Java HumanEvalBugs",
81
  features=[
82
- "task_id", "prompt", "declaration", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
83
  ]
84
  ),
85
 
@@ -87,7 +87,7 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
87
  name="js",
88
  description="JavaScript HumanEvalBugs",
89
  features=[
90
- "task_id", "prompt", "declaration", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
91
  ]
92
  ),
93
  ]
@@ -138,11 +138,12 @@ class HumanEvalXBugs(datasets.GeneratorBasedBuilder):
138
  "task_id": row["task_id"],
139
  "prompt": row["prompt"],
140
  "declaration": row["declaration"],
 
141
  "canonical_solution": row["canonical_solution"],
142
- "test": row["test"],
143
- "example_test": row["example_test"],
144
  "bug_type": row["bug_type"],
145
  "failure_symptoms": row["failure_symptoms"],
146
  "entry_point": row["entry_point"],
 
 
147
  }
148
  key += 1
 
57
  name="python",
58
  description="Python HumanEvalBugs",
59
  features=[
60
+ "task_id", "prompt", "declaration", "buggy_solution", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
61
  ]
62
  ),
63
  HumanEvalXBugsConfig(
64
  name="cpp",
65
  description="C++ HumanEvalBugs",
66
  features=[
67
+ "task_id", "prompt", "declaration", "buggy_solution", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
68
  ]
69
  ),
70
 
 
72
  name="go",
73
  description="Go HumanEvalBugs",
74
  features=[
75
+ "task_id", "prompt", "declaration", "buggy_solution", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
76
  ]
77
  ),
78
  HumanEvalXBugsConfig(
79
  name="java",
80
  description="Java HumanEvalBugs",
81
  features=[
82
+ "task_id", "prompt", "declaration", "buggy_solution", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
83
  ]
84
  ),
85
 
 
87
  name="js",
88
  description="JavaScript HumanEvalBugs",
89
  features=[
90
+ "task_id", "prompt", "declaration", "buggy_solution", "canonical_solution", "test", "example_test", "bug_type", "failure_symptoms", "entry_point"
91
  ]
92
  ),
93
  ]
 
138
  "task_id": row["task_id"],
139
  "prompt": row["prompt"],
140
  "declaration": row["declaration"],
141
+ "buggy_solution": row["buggy_solution"],
142
  "canonical_solution": row["canonical_solution"],
 
 
143
  "bug_type": row["bug_type"],
144
  "failure_symptoms": row["failure_symptoms"],
145
  "entry_point": row["entry_point"],
146
+ "test": row["test"],
147
+ "example_test": row["example_test"],
148
  }
149
  key += 1