Text Generation
English
Eval Results
zifei9 commited on
Commit
c056808
1 Parent(s): ec3a086

testing modeling_gpt2.py overwrite

Browse files
Files changed (2) hide show
  1. config.json +41 -37
  2. configuration_gpt2.py +277 -0
config.json CHANGED
@@ -1,38 +1,42 @@
1
  {
2
- "_num_labels": 1,
3
- "activation_function": "gelu_new",
4
- "architectures": [
5
- "GPT2LMHeadModel"
6
- ],
7
- "attn_pdrop": 0.1,
8
- "bos_token_id": 50256,
9
- "embd_pdrop": 0.1,
10
- "eos_token_id": 50256,
11
- "id2label": {
12
- "0": "LABEL_0"
13
- },
14
- "initializer_range": 0.02,
15
- "label2id": {
16
- "LABEL_0": 0
17
- },
18
- "layer_norm_epsilon": 1e-05,
19
- "model_type": "gpt2",
20
- "n_ctx": 1024,
21
- "n_embd": 768,
22
- "n_head": 12,
23
- "n_layer": 6,
24
- "n_positions": 1024,
25
- "resid_pdrop": 0.1,
26
- "summary_activation": null,
27
- "summary_first_dropout": 0.1,
28
- "summary_proj_to_labels": true,
29
- "summary_type": "cls_index",
30
- "summary_use_proj": true,
31
- "task_specific_params": {
32
- "text-generation": {
33
- "do_sample": true,
34
- "max_length": 50
35
- }
36
- },
37
- "vocab_size": 50257
38
- }
 
 
 
 
 
1
  {
2
+ "_num_labels": 1,
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "id2label": {
12
+ "0": "LABEL_0"
13
+ },
14
+ "auto_map": {
15
+ "AutoConfig": "configuration_gpt2.GPT2Config",
16
+ "AutoModelForCausalLM": "modeling_gpt2.GPT2LMHeadModel"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "label2id": {
20
+ "LABEL_0": 0
21
+ },
22
+ "layer_norm_epsilon": 1e-05,
23
+ "model_type": "gpt2",
24
+ "n_ctx": 1024,
25
+ "n_embd": 768,
26
+ "n_head": 12,
27
+ "n_layer": 6,
28
+ "n_positions": 1024,
29
+ "resid_pdrop": 0.1,
30
+ "summary_activation": null,
31
+ "summary_first_dropout": 0.1,
32
+ "summary_proj_to_labels": true,
33
+ "summary_type": "cls_index",
34
+ "summary_use_proj": true,
35
+ "task_specific_params": {
36
+ "text-generation": {
37
+ "do_sample": true,
38
+ "max_length": 50
39
+ }
40
+ },
41
+ "vocab_size": 50257
42
+ }
configuration_gpt2.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ OpenAI GPT-2 configuration"""
17
+ from collections import OrderedDict
18
+ from typing import Any, List, Mapping, Optional
19
+
20
+ from ... import PreTrainedTokenizer, TensorType, is_torch_available
21
+ from ...configuration_utils import PretrainedConfig
22
+ from ...onnx import OnnxConfigWithPast, PatchingSpec
23
+ from ...utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
29
+ "gpt2": "https://huggingface.co/gpt2/resolve/main/config.json",
30
+ "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/config.json",
31
+ "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/config.json",
32
+ "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/config.json",
33
+ "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/config.json",
34
+ }
35
+
36
+
37
+ class GPT2Config(PretrainedConfig):
38
+ """
39
+ This is the configuration class to store the configuration of a [`GPT2Model`] or a [`TFGPT2Model`]. It is used to
40
+ instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a
41
+ configuration with the defaults will yield a similar configuration to that of the GPT-2
42
+ [gpt2](https://huggingface.co/gpt2) architecture.
43
+
44
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
45
+ documentation from [`PretrainedConfig`] for more information.
46
+
47
+
48
+ Args:
49
+ vocab_size (`int`, *optional*, defaults to 50257):
50
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
51
+ `inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`].
52
+ n_positions (`int`, *optional*, defaults to 1024):
53
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
54
+ just in case (e.g., 512 or 1024 or 2048).
55
+ n_embd (`int`, *optional*, defaults to 768):
56
+ Dimensionality of the embeddings and hidden states.
57
+ n_layer (`int`, *optional*, defaults to 12):
58
+ Number of hidden layers in the Transformer encoder.
59
+ n_head (`int`, *optional*, defaults to 12):
60
+ Number of attention heads for each attention layer in the Transformer encoder.
61
+ n_inner (`int`, *optional*):
62
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
63
+ activation_function (`str`, *optional*, defaults to `"gelu_new"`):
64
+ Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
65
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
66
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
67
+ embd_pdrop (`float`, *optional*, defaults to 0.1):
68
+ The dropout ratio for the embeddings.
69
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
70
+ The dropout ratio for the attention.
71
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
72
+ The epsilon to use in the layer normalization layers.
73
+ initializer_range (`float`, *optional*, defaults to 0.02):
74
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
75
+ summary_type (`string`, *optional*, defaults to `"cls_index"`):
76
+ Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
77
+ [`TFGPT2DoubleHeadsModel`].
78
+
79
+ Has to be one of the following options:
80
+
81
+ - `"last"`: Take the last token hidden state (like XLNet).
82
+ - `"first"`: Take the first token hidden state (like BERT).
83
+ - `"mean"`: Take the mean of all tokens hidden states.
84
+ - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
85
+ - `"attn"`: Not implemented now, use multi-head attention.
86
+ summary_use_proj (`bool`, *optional*, defaults to `True`):
87
+ Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
88
+ [`TFGPT2DoubleHeadsModel`].
89
+
90
+ Whether or not to add a projection after the vector extraction.
91
+ summary_activation (`str`, *optional*):
92
+ Argument used when doing sequence summary. Used in for the multiple choice head in
93
+ [`GPT2DoubleHeadsModel`].
94
+
95
+ Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
96
+ summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
97
+ Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
98
+ [`TFGPT2DoubleHeadsModel`].
99
+
100
+ Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
101
+ summary_first_dropout (`float`, *optional*, defaults to 0.1):
102
+ Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
103
+ [`TFGPT2DoubleHeadsModel`].
104
+
105
+ The dropout ratio to be used after the projection and activation.
106
+ scale_attn_weights (`bool`, *optional*, defaults to `True`):
107
+ Scale attention weights by dividing by sqrt(hidden_size)..
108
+ use_cache (`bool`, *optional*, defaults to `True`):
109
+ Whether or not the model should return the last key/values attentions (not used by all models).
110
+ bos_token_id (`int`, *optional*, defaults to 50256):
111
+ Id of the beginning of sentence token in the vocabulary.
112
+ eos_token_id (`int`, *optional*, defaults to 50256):
113
+ Id of the end of sentence token in the vocabulary.
114
+ scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
115
+ Whether to additionally scale attention weights by `1 / layer_idx + 1`.
116
+ reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
117
+ Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
118
+ dot-product/softmax to float() when training with mixed precision.
119
+
120
+ Example:
121
+
122
+ ```python
123
+ >>> from transformers import GPT2Config, GPT2Model
124
+
125
+ >>> # Initializing a GPT2 configuration
126
+ >>> configuration = GPT2Config()
127
+
128
+ >>> # Initializing a model (with random weights) from the configuration
129
+ >>> model = GPT2Model(configuration)
130
+
131
+ >>> # Accessing the model configuration
132
+ >>> configuration = model.config
133
+ ```"""
134
+
135
+ model_type = "gpt2"
136
+ keys_to_ignore_at_inference = ["past_key_values"]
137
+ attribute_map = {
138
+ "hidden_size": "n_embd",
139
+ "max_position_embeddings": "n_positions",
140
+ "num_attention_heads": "n_head",
141
+ "num_hidden_layers": "n_layer",
142
+ }
143
+
144
+ def __init__(
145
+ self,
146
+ vocab_size=50257,
147
+ n_positions=1024,
148
+ n_embd=768,
149
+ n_layer=12,
150
+ n_head=12,
151
+ n_inner=None,
152
+ activation_function="gelu_new",
153
+ resid_pdrop=0.1,
154
+ embd_pdrop=0.1,
155
+ attn_pdrop=0.1,
156
+ layer_norm_epsilon=1e-5,
157
+ initializer_range=0.02,
158
+ summary_type="cls_index",
159
+ summary_use_proj=True,
160
+ summary_activation=None,
161
+ summary_proj_to_labels=True,
162
+ summary_first_dropout=0.1,
163
+ scale_attn_weights=True,
164
+ use_cache=True,
165
+ bos_token_id=50256,
166
+ eos_token_id=50256,
167
+ scale_attn_by_inverse_layer_idx=False,
168
+ reorder_and_upcast_attn=False,
169
+ **kwargs,
170
+ ):
171
+ self.vocab_size = vocab_size
172
+ self.n_positions = n_positions
173
+ self.n_embd = n_embd
174
+ self.n_layer = n_layer
175
+ self.n_head = n_head
176
+ self.n_inner = n_inner
177
+ self.activation_function = activation_function
178
+ self.resid_pdrop = resid_pdrop
179
+ self.embd_pdrop = embd_pdrop
180
+ self.attn_pdrop = attn_pdrop
181
+ self.layer_norm_epsilon = layer_norm_epsilon
182
+ self.initializer_range = initializer_range
183
+ self.summary_type = summary_type
184
+ self.summary_use_proj = summary_use_proj
185
+ self.summary_activation = summary_activation
186
+ self.summary_first_dropout = summary_first_dropout
187
+ self.summary_proj_to_labels = summary_proj_to_labels
188
+ self.scale_attn_weights = scale_attn_weights
189
+ self.use_cache = use_cache
190
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
191
+ self.reorder_and_upcast_attn = reorder_and_upcast_attn
192
+
193
+ self.bos_token_id = bos_token_id
194
+ self.eos_token_id = eos_token_id
195
+
196
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
197
+
198
+
199
+ class GPT2OnnxConfig(OnnxConfigWithPast):
200
+ def __init__(
201
+ self,
202
+ config: PretrainedConfig,
203
+ task: str = "default",
204
+ patching_specs: List[PatchingSpec] = None,
205
+ use_past: bool = False,
206
+ ):
207
+ super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
208
+ if not getattr(self._config, "pad_token_id", None):
209
+ # TODO: how to do that better?
210
+ self._config.pad_token_id = 0
211
+
212
+ @property
213
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
214
+ common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
215
+ if self.use_past:
216
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
217
+ common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
218
+ else:
219
+ common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
220
+
221
+ return common_inputs
222
+
223
+ @property
224
+ def num_layers(self) -> int:
225
+ return self._config.n_layer
226
+
227
+ @property
228
+ def num_attention_heads(self) -> int:
229
+ return self._config.n_head
230
+
231
+ def generate_dummy_inputs(
232
+ self,
233
+ tokenizer: PreTrainedTokenizer,
234
+ batch_size: int = -1,
235
+ seq_length: int = -1,
236
+ is_pair: bool = False,
237
+ framework: Optional[TensorType] = None,
238
+ ) -> Mapping[str, Any]:
239
+ common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
240
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
241
+ )
242
+
243
+ # We need to order the input in the way they appears in the forward()
244
+ ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
245
+
246
+ # Need to add the past_keys
247
+ if self.use_past:
248
+ if not is_torch_available():
249
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
250
+ else:
251
+ import torch
252
+
253
+ batch, seqlen = common_inputs["input_ids"].shape
254
+ # Not using the same length for past_key_values
255
+ past_key_values_length = seqlen + 2
256
+ past_shape = (
257
+ batch,
258
+ self.num_attention_heads,
259
+ past_key_values_length,
260
+ self._config.hidden_size // self.num_attention_heads,
261
+ )
262
+ ordered_inputs["past_key_values"] = [
263
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
264
+ ]
265
+
266
+ ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
267
+ if self.use_past:
268
+ mask_dtype = ordered_inputs["attention_mask"].dtype
269
+ ordered_inputs["attention_mask"] = torch.cat(
270
+ [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
271
+ )
272
+
273
+ return ordered_inputs
274
+
275
+ @property
276
+ def default_onnx_opset(self) -> int:
277
+ return 13