BDas commited on
Commit
4fc057f
1 Parent(s): 62e93f0

Create new file

Browse files
Files changed (1) hide show
  1. ner-tr.py +155 -0
ner-tr.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The ner-tr Entities Dataset."""
18
+
19
+
20
+ import datasets
21
+
22
+
23
+ logger = datasets.logging.get_logger(__name__)
24
+
25
+
26
+ _CITATION = """\
27
+ aa
28
+ }
29
+ """
30
+
31
+ _DESCRIPTION = """\
32
+ aa
33
+ """
34
+
35
+ _URL = "https://raw.githubusercontent.com/BihterDass/named/main/"
36
+ _TRAINING_FILE = "train.conll"
37
+ _DEV_FILE = "train.conll"
38
+ _TEST_FILE = "train.conll"
39
+
40
+
41
+ class NERTRConfig(datasets.BuilderConfig):
42
+ """The NERTRConfig Entities Dataset."""
43
+
44
+ def __init__(self, **kwargs):
45
+ """BuilderConfig for NERTRConfig.
46
+ Args:
47
+ **kwargs: keyword arguments forwarded to super.
48
+ """
49
+ super(NERTRConfig, self).__init__(**kwargs)
50
+
51
+
52
+ class NERTR(datasets.GeneratorBasedBuilder):
53
+ """The NERTR Entities Dataset."""
54
+
55
+ BUILDER_CONFIGS = [
56
+ NERTRConfig(
57
+ name="NERTR", version=datasets.Version("1.0.0"), description="The NERTR Entities Dataset"
58
+ ),
59
+ ]
60
+
61
+ def _info(self):
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=datasets.Features(
65
+ {
66
+ "id": datasets.Value("string"),
67
+ "tokens": datasets.Sequence(datasets.Value("string")),
68
+ "ner_tags": datasets.Sequence(
69
+ datasets.features.ClassLabel(
70
+ names=[
71
+ "O",
72
+ "B-DepositProduct",
73
+ "I-DepositProduct",
74
+ "B-Product",
75
+ "I-Product",
76
+ "B-ProductProblemInfo",
77
+ "I-ProductProblemInfo",
78
+ "B-ServiceInformation",
79
+ "I-ServiceInformation",
80
+ "B-ServiceClosest",
81
+ "I-ServiceClosest",
82
+ "B-Location",
83
+ "I-Location",
84
+ "B-ServiceNumber",
85
+ "I-ServiceNumber",
86
+ "B-Brand",
87
+ "I-Brand",
88
+ "B-Campaign",
89
+ "I-Campaign",
90
+ "B-ProductSelector",
91
+ "I-ProductSelector",
92
+ "B-SpecialCampaign",
93
+ "I-SpecialCampaign",
94
+ ]
95
+ )
96
+ ),
97
+ }
98
+ ),
99
+ supervised_keys=None,
100
+ homepage="https://github.com/BihterDass/named",
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager):
105
+ """Returns SplitGenerators."""
106
+ urls_to_download = {
107
+ "train": f"{_URL}{_TRAINING_FILE}",
108
+ "dev": f"{_URL}{_DEV_FILE}",
109
+ "test": f"{_URL}{_TEST_FILE}",
110
+ }
111
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
112
+
113
+ return [
114
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
115
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
116
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
117
+ ]
118
+
119
+ def _generate_examples(self, filepath):
120
+ logger.info("⏳ Generating examples from = %s", filepath)
121
+ with open(filepath, encoding="utf-8") as f:
122
+ current_tokens = []
123
+ current_labels = []
124
+ sentence_counter = 0
125
+ for row in f:
126
+ row = row.rstrip()
127
+ if row:
128
+ token, label = row.split("\t")
129
+ current_tokens.append(token)
130
+ current_labels.append(label)
131
+ else:
132
+ # New sentence
133
+ if not current_tokens:
134
+ # Consecutive empty lines will cause empty sentences
135
+ continue
136
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
137
+ sentence = (
138
+ sentence_counter,
139
+ {
140
+ "id": str(sentence_counter),
141
+ "tokens": current_tokens,
142
+ "ner_tags": current_labels,
143
+ },
144
+ )
145
+ sentence_counter += 1
146
+ current_tokens = []
147
+ current_labels = []
148
+ yield sentence
149
+ # Don't forget last sentence in dataset 🧐
150
+ if current_tokens:
151
+ yield sentence_counter, {
152
+ "id": str(sentence_counter),
153
+ "tokens": current_tokens,
154
+ "ner_tags": current_labels,
155
+ }