maywell commited on
Commit
ee52d7d
1 Parent(s): 2dcdcf5

Upload 3 files

Browse files
Files changed (3) hide show
  1. configuration_minicpm.py +113 -0
  2. modeling_minicpmv.py +702 -0
  3. resampler.py +163 -0
configuration_minicpm.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ MiniCPM model configuration"""
21
+ import os
22
+ from typing import Union
23
+
24
+ from transformers.utils import logging
25
+ from transformers import LlamaConfig, PretrainedConfig
26
+ from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionConfig
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ class MiniCPMVSliceConfig(PretrainedConfig):
32
+ model_type = "minicpmv"
33
+
34
+ def __init__(
35
+ self,
36
+ patch_size=14,
37
+ max_slice_nums=9,
38
+ scale_resolution=448,
39
+ **kwargs,
40
+ ):
41
+ super().__init__(**kwargs)
42
+ self.patch_size = patch_size
43
+ self.max_slice_nums = max_slice_nums
44
+ self.scale_resolution = scale_resolution
45
+
46
+ @classmethod
47
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
48
+ cls._set_token_in_kwargs(kwargs)
49
+
50
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
51
+
52
+ if config_dict.get("model_type") == "minicpmv":
53
+ config_dict = config_dict["slice_config"]
54
+
55
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
56
+ logger.warning(
57
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
58
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
59
+ )
60
+
61
+ return cls.from_dict(config_dict, **kwargs)
62
+
63
+
64
+
65
+ class MiniCPMVConfig(LlamaConfig):
66
+ model_type = "minicpmv"
67
+ keys_to_ignore_at_inference = ["past_key_values"]
68
+
69
+ default_vision_config = {
70
+ "hidden_size": 1152,
71
+ "image_size": 980,
72
+ "intermediate_size": 4304,
73
+ "model_type": "idefics2",
74
+ "num_attention_heads": 16,
75
+ "num_hidden_layers": 27,
76
+ "patch_size": 14,
77
+ }
78
+
79
+ def __init__(
80
+ self,
81
+ use_cache=True,
82
+ query_num=64,
83
+ image_size=448,
84
+ drop_vision_last_layer=True,
85
+ batch_vision_input=True,
86
+ slice_config=None,
87
+ vision_config=None,
88
+ **kwargs,
89
+ ):
90
+ self.use_cache = use_cache
91
+ self.query_num = query_num
92
+ self.image_size = image_size
93
+ self.drop_vision_last_layer = drop_vision_last_layer
94
+ self.batch_vision_input = batch_vision_input
95
+
96
+ if slice_config is None:
97
+ self.slice_config = MiniCPMVSliceConfig(max_slice_nums=1)
98
+ else:
99
+ self.slice_config = MiniCPMVSliceConfig(**slice_config)
100
+ self.slice_mode = True
101
+
102
+ # same as HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
103
+ if vision_config is None:
104
+ self.vision_config = Idefics2VisionConfig(**self.default_vision_config)
105
+ logger.info("vision_config is None, using default vision config")
106
+ elif isinstance(vision_config, dict):
107
+ self.vision_config = Idefics2VisionConfig(**vision_config)
108
+ elif isinstance(vision_config, Idefics2VisionConfig):
109
+ self.vision_config = vision_config
110
+
111
+ self.patch_size = self.vision_config.patch_size
112
+
113
+ super().__init__(**kwargs)
modeling_minicpmv.py ADDED
@@ -0,0 +1,702 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import List, Optional
3
+ import json
4
+ import torch
5
+ import torchvision
6
+ from threading import Thread
7
+ from copy import deepcopy
8
+ from PIL import Image
9
+ from torchvision import transforms
10
+ from transformers import LlamaTokenizer, LlamaPreTrainedModel, LlamaForCausalLM, AutoModel, PreTrainedTokenizerFast, TextIteratorStreamer
11
+ from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer
12
+
13
+ from .configuration_minicpm import MiniCPMVConfig
14
+ from .resampler import Resampler
15
+
16
+ IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5) # timm.data.IMAGENET_INCEPTION_MEAN
17
+ IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5) # timm.data.IMAGENET_INCEPTION_STD
18
+
19
+ class MiniCPMVPreTrainedModel(LlamaPreTrainedModel):
20
+ config_class = MiniCPMVConfig
21
+
22
+
23
+ class MiniCPMV(MiniCPMVPreTrainedModel):
24
+ def __init__(self, config):
25
+ super().__init__(config)
26
+
27
+ self.llm = LlamaForCausalLM(config)
28
+ self.vpm = self.init_vision_module()
29
+ self.vision_dim = self.vpm.embed_dim
30
+ self.embed_dim = self.llm.config.hidden_size
31
+ self.resampler = self.init_resampler(self.embed_dim, self.vision_dim)
32
+ self.transform = self.init_transform()
33
+
34
+ def init_vision_module(self):
35
+ # same as HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit
36
+ model = Idefics2VisionTransformer(self.config.vision_config)
37
+ if self.config.drop_vision_last_layer:
38
+ model.encoder.layers = model.encoder.layers[:-1]
39
+
40
+ setattr(model, 'embed_dim', model.embeddings.embed_dim)
41
+ setattr(model, 'patch_size', model.embeddings.patch_size)
42
+
43
+ return model
44
+
45
+ def init_resampler(self, embed_dim, vision_dim):
46
+ return Resampler(
47
+ num_queries=self.config.query_num,
48
+ embed_dim=embed_dim,
49
+ num_heads=embed_dim // 128,
50
+ kv_dim=vision_dim,
51
+ adaptive=True
52
+ )
53
+
54
+ def init_transform(self):
55
+ return transforms.Compose(
56
+ [
57
+ transforms.ToTensor(),
58
+ transforms.Normalize(
59
+ mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD
60
+ ),
61
+ ]
62
+ )
63
+
64
+ def get_input_embeddings(self):
65
+ return self.llm.get_input_embeddings()
66
+
67
+ def set_input_embeddings(self, value):
68
+ self.llm.embed_tokens = value
69
+
70
+ def get_vllm_embedding(self, data):
71
+ if 'vision_hidden_states' not in data:
72
+ dtype = self.vpm.embeddings.position_embedding.weight.dtype
73
+ device = self.vpm.embeddings.position_embedding.weight.device
74
+ tgt_sizes = data['tgt_sizes']
75
+ pixel_values_list = data['pixel_values']
76
+ vision_hidden_states = []
77
+ all_pixel_values = []
78
+ img_cnt = []
79
+ for pixel_values in pixel_values_list:
80
+ img_cnt.append(len(pixel_values))
81
+ all_pixel_values.extend([i.flatten(end_dim=1).permute(1, 0) for i in pixel_values])
82
+
83
+ # exist image
84
+ if all_pixel_values:
85
+ tgt_sizes = torch.vstack(tgt_sizes).type(torch.int32)
86
+
87
+ if self.config.batch_vision_input:
88
+ max_patches = torch.max(tgt_sizes[:, 0] * tgt_sizes[:, 1])
89
+
90
+ all_pixel_values = torch.nn.utils.rnn.pad_sequence(all_pixel_values, batch_first=True,
91
+ padding_value=0.0)
92
+ B, L, _ = all_pixel_values.shape
93
+ all_pixel_values = all_pixel_values.permute(0, 2, 1).reshape(B, 3, -1, L)
94
+
95
+ patch_attn_mask = torch.zeros((B, 1, max_patches), dtype=torch.bool, device=device)
96
+ for i in range(B):
97
+ patch_attn_mask[i, :tgt_sizes[i][0] * tgt_sizes[i][1]] = True
98
+
99
+ vision_embedding = self.vpm(all_pixel_values.type(dtype), patch_attention_mask=patch_attn_mask).last_hidden_state
100
+ vision_embedding = self.resampler(vision_embedding, tgt_sizes)
101
+ else:
102
+ # get vision_embedding foreach
103
+ vision_embedding = []
104
+ for single_tgt_size, single_pixel_values in zip(tgt_sizes, all_pixel_values):
105
+ single_pixel_values = single_pixel_values.unsqueeze(0)
106
+ B, L, _ = single_pixel_values.shape
107
+ single_pixel_values = single_pixel_values.permute(0, 2, 1).reshape(B, 3, -1, L)
108
+ single_vision_embedding = self.vpm(single_pixel_values.type(dtype)).last_hidden_state
109
+ single_vision_embedding = self.resampler(single_vision_embedding, single_tgt_size.unsqueeze(0))
110
+ vision_embedding.append(single_vision_embedding)
111
+ vision_embedding = torch.vstack(vision_embedding)
112
+
113
+ start = 0
114
+ for pixel_values in pixel_values_list:
115
+ img_cnt = len(pixel_values)
116
+ if img_cnt > 0:
117
+ vision_hidden_states.append(vision_embedding[start: start + img_cnt])
118
+ start += img_cnt
119
+ else:
120
+ vision_hidden_states.append([])
121
+ else: # no image
122
+ if self.training:
123
+ dummy_image = torch.zeros(
124
+ (1, 3, 224, 224),
125
+ device=device, dtype=dtype
126
+ )
127
+ tgt_sizes = torch.Tensor([[(224 // self.config.patch_size), math.ceil(224 / self.config.patch_size)]]).type(torch.int32)
128
+ dummy_feature = self.resampler(self.vpm(dummy_image).last_hidden_state, tgt_sizes)
129
+ else:
130
+ dummy_feature = []
131
+ for _ in range(len(pixel_values_list)):
132
+ vision_hidden_states.append(dummy_feature)
133
+
134
+ else:
135
+ vision_hidden_states = data['vision_hidden_states']
136
+
137
+ if hasattr(self.llm.config, 'scale_emb'):
138
+ vllm_embedding = self.llm.model.embed_tokens(data['input_ids']) * self.llm.config.scale_emb
139
+ else:
140
+ vllm_embedding = self.llm.model.embed_tokens(data['input_ids'])
141
+
142
+ vision_hidden_states = [i.type(vllm_embedding.dtype) if isinstance(
143
+ i, torch.Tensor) else i for i in vision_hidden_states]
144
+
145
+ bs = len(data['input_ids'])
146
+ for i in range(bs):
147
+ cur_vs_hs = vision_hidden_states[i]
148
+ if len(cur_vs_hs) > 0:
149
+ cur_vllm_emb = vllm_embedding[i]
150
+ cur_image_bound = data['image_bound'][i]
151
+ if len(cur_image_bound) > 0:
152
+ image_indices = torch.stack(
153
+ [torch.arange(r[0], r[1], dtype=torch.long) for r in cur_image_bound]
154
+ ).to(vllm_embedding.device)
155
+
156
+ cur_vllm_emb.scatter_(0, image_indices.view(-1, 1).repeat(1, cur_vllm_emb.shape[-1]),
157
+ cur_vs_hs.view(-1, cur_vs_hs.shape[-1]))
158
+ elif self.training:
159
+ cur_vllm_emb += cur_vs_hs[0].mean() * 0
160
+
161
+ return vllm_embedding, vision_hidden_states
162
+
163
+ def forward(self, data, **kwargs):
164
+ vllm_embedding, vision_hidden_states = self.get_vllm_embedding(data)
165
+ position_ids = data["position_ids"]
166
+ if position_ids.dtype != torch.int64:
167
+ position_ids = position_ids.long()
168
+
169
+ return self.llm(
170
+ input_ids=None,
171
+ position_ids=position_ids,
172
+ inputs_embeds=vllm_embedding,
173
+ **kwargs
174
+ )
175
+
176
+ def _convert_to_tensors(
177
+ self, tokenizer, input_ids, max_inp_length: Optional[int] = None
178
+ ):
179
+ if max_inp_length is not None:
180
+ input_ids = input_ids[:max_inp_length]
181
+ input_ids = torch.tensor(input_ids, dtype=torch.int32)
182
+
183
+ image_start_tokens = torch.where(input_ids == tokenizer.im_start_id)[0]
184
+ # 跳过 im_start
185
+ image_start_tokens += 1
186
+ image_end_tokens = torch.where(input_ids == tokenizer.im_end_id)[0]
187
+ valid_image_nums = max(len(image_start_tokens), len(image_end_tokens))
188
+ image_bound = torch.hstack(
189
+ [
190
+ image_start_tokens[:valid_image_nums].unsqueeze(-1),
191
+ image_end_tokens[:valid_image_nums].unsqueeze(-1),
192
+ ]
193
+ )
194
+
195
+ model_input = {}
196
+ model_input["input_ids"] = input_ids.unsqueeze(0).to(self.device)
197
+ model_input["image_bound"] = image_bound
198
+
199
+ return model_input
200
+
201
+ def _process_list(
202
+ self, tokenizer, input_id_list, max_inp_length: Optional[int] = None
203
+ ):
204
+ pad_keys = ["input_ids"]
205
+ input_tensors = []
206
+ for input_ids in input_id_list:
207
+ input_tensors.append(
208
+ self._convert_to_tensors(tokenizer, input_ids, max_inp_length)
209
+ )
210
+ padded = {}
211
+ for key in pad_keys:
212
+ padded[key] = pad(input_tensors, key, padding_side="left").to(self.device)
213
+ padded["image_bound"] = [i["image_bound"] for i in input_tensors]
214
+ return padded
215
+
216
+ def _decode(self, inputs_embeds, tokenizer, **kwargs):
217
+ terminators = [
218
+ tokenizer.eos_token_id,
219
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
220
+ ]
221
+ output = self.llm.generate(
222
+ inputs_embeds=inputs_embeds,
223
+ pad_token_id=0,
224
+ eos_token_id=terminators,
225
+ **kwargs
226
+ )
227
+ return self._decode_text(output, tokenizer)
228
+
229
+ def _decode_stream(self, inputs_embeds, tokenizer, **kwargs):
230
+ terminators = [
231
+ tokenizer.eos_token_id,
232
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
233
+ ]
234
+ streamer = TextIteratorStreamer(tokenizer=tokenizer)
235
+ generation_kwargs = {
236
+ 'inputs_embeds': inputs_embeds,
237
+ 'pad_token_id': 0,
238
+ 'eos_token_id': terminators,
239
+ 'streamer': streamer
240
+ }
241
+ generation_kwargs.update(kwargs)
242
+
243
+ thread = Thread(target=self.llm.generate, kwargs=generation_kwargs)
244
+ thread.start()
245
+
246
+ return streamer
247
+
248
+ def _decode_text(self, result_ids, tokenizer):
249
+ result_text = []
250
+ for result in result_ids:
251
+ result = result[result != 0]
252
+ if result[0] == tokenizer.bos_id:
253
+ result = result[1:]
254
+ if result[-1] == tokenizer.eos_id or result[-1] == tokenizer.eot_id:
255
+ result = result[:-1]
256
+ result_text.append(tokenizer.decode(result).strip())
257
+ return result_text
258
+
259
+ def slice_image(self, image):
260
+ return slice_image(
261
+ image,
262
+ self.config.slice_config.max_slice_nums,
263
+ self.config.slice_config.scale_resolution,
264
+ self.config.slice_config.patch_size,
265
+ )
266
+
267
+ def get_slice_image_placeholder(self, image, tokenizer):
268
+ image_placeholder = (
269
+ tokenizer.im_start
270
+ + tokenizer.unk_token * self.config.query_num
271
+ + tokenizer.im_end
272
+ )
273
+
274
+ slice_images = []
275
+
276
+ source_image, patches, best_grid = slice_image(
277
+ image,
278
+ self.config.slice_config.max_slice_nums,
279
+ self.config.slice_config.scale_resolution,
280
+ self.config.slice_config.patch_size,
281
+ )
282
+
283
+ slice_images.append(source_image)
284
+ final_placeholder = image_placeholder
285
+
286
+ if len(patches) > 0:
287
+ for i in range(len(patches)):
288
+ for j in range(len(patches[0])):
289
+ slice_images.append(patches[i][j])
290
+
291
+ final_placeholder += get_grid_placeholder(
292
+ tokenizer, best_grid, self.config.query_num
293
+ )
294
+
295
+ return slice_images, final_placeholder
296
+
297
+ def reshape_by_patch(self, image_tensor):
298
+ """
299
+ :param image_tensor: shape [3, H, W]
300
+ :param patch_size:
301
+ :return: [3, patch_size, HW/patch_size]
302
+ """
303
+ patch_size = self.config.patch_size
304
+ patches = torch.nn.functional.unfold(
305
+ image_tensor,
306
+ (patch_size, patch_size),
307
+ stride=(patch_size, patch_size)
308
+ )
309
+
310
+ patches = patches.reshape(image_tensor.size(0), patch_size, patch_size, -1)
311
+ patches = patches.permute(0, 1, 3, 2).reshape(image_tensor.size(0), patch_size, -1)
312
+ return patches
313
+
314
+ def generate(
315
+ self,
316
+ input_id_list=None,
317
+ img_list=None,
318
+ tgt_sizes=None,
319
+ tokenizer=None,
320
+ max_inp_length: Optional[int] = None,
321
+ vision_hidden_states=None,
322
+ return_vision_hidden_states=False,
323
+ stream=False,
324
+ **kwargs
325
+ ):
326
+
327
+ assert input_id_list is not None
328
+ bs = len(input_id_list)
329
+ if img_list == None:
330
+ img_list = [[] for i in range(bs)]
331
+ assert bs == len(img_list)
332
+
333
+ model_inputs = self._process_list(tokenizer, input_id_list, max_inp_length)
334
+
335
+ if vision_hidden_states is None:
336
+ pixel_values = []
337
+ for i in range(bs):
338
+ img_inps = []
339
+ for img in img_list[i]:
340
+ img_inps.append(img.to(self.device))
341
+ if img_inps:
342
+ pixel_values.append(img_inps)
343
+ else:
344
+ pixel_values.append([])
345
+ model_inputs["pixel_values"] = pixel_values
346
+ model_inputs['tgt_sizes'] = tgt_sizes
347
+ else:
348
+ model_inputs["vision_hidden_states"] = vision_hidden_states
349
+
350
+ with torch.inference_mode():
351
+ (
352
+ model_inputs["inputs_embeds"],
353
+ vision_hidden_states,
354
+ ) = self.get_vllm_embedding(model_inputs)
355
+
356
+ if stream:
357
+ result = self._decode_stream(model_inputs["inputs_embeds"], tokenizer, **kwargs)
358
+ else:
359
+ result = self._decode(model_inputs["inputs_embeds"], tokenizer, **kwargs)
360
+
361
+ if return_vision_hidden_states:
362
+ return result, vision_hidden_states
363
+
364
+ return result
365
+
366
+ def chat(
367
+ self,
368
+ image,
369
+ msgs,
370
+ tokenizer,
371
+ vision_hidden_states=None,
372
+ max_new_tokens=1024,
373
+ sampling=True,
374
+ max_inp_length=2048,
375
+ system_prompt='',
376
+ stream=False,
377
+ **kwargs
378
+ ):
379
+ if isinstance(msgs, str):
380
+ msgs = json.loads(msgs)
381
+
382
+ copy_msgs = deepcopy(msgs)
383
+ assert len(copy_msgs) > 0, 'msgs is empty'
384
+ assert sampling or not stream, 'if use stream mode, make sure sampling=True'
385
+
386
+ if image is not None and isinstance(copy_msgs[0]['content'], str):
387
+ copy_msgs[0]['content'] = [image, copy_msgs[0]['content']]
388
+
389
+ images = []
390
+ tgt_sizes = []
391
+ for i, msg in enumerate(copy_msgs):
392
+ role = msg["role"]
393
+ content = msg["content"]
394
+ assert role in ["user", "assistant"]
395
+ if i == 0:
396
+ assert role == "user", "The role of first msg should be user"
397
+ if isinstance(content, str):
398
+ content = [content]
399
+
400
+ cur_msgs = []
401
+ for c in content:
402
+ if isinstance(c, Image.Image):
403
+ image = c
404
+ if self.config.slice_mode:
405
+ slice_images, image_placeholder = self.get_slice_image_placeholder(
406
+ image, tokenizer
407
+ )
408
+ cur_msgs.append(image_placeholder)
409
+ for slice_image in slice_images:
410
+ slice_image = self.transform(slice_image)
411
+ H, W = slice_image.shape[1:]
412
+ images.append(self.reshape_by_patch(slice_image))
413
+ tgt_sizes.append(torch.Tensor([H // self.config.patch_size, W // self.config.patch_size]).type(torch.int32))
414
+ else:
415
+ images.append(self.transform(image))
416
+ cur_msgs.append(
417
+ tokenizer.im_start
418
+ + tokenizer.unk_token * self.config.query_num
419
+ + tokenizer.im_end
420
+ )
421
+ elif isinstance(c, str):
422
+ cur_msgs.append(c)
423
+
424
+
425
+ msg['content'] = '\n'.join(cur_msgs)
426
+ if tgt_sizes:
427
+ tgt_sizes = torch.vstack(tgt_sizes)
428
+
429
+ if system_prompt:
430
+ sys_msg = {'role': 'system', 'content': system_prompt}
431
+ copy_msgs = [sys_msg] + copy_msgs
432
+
433
+ input_ids = tokenizer.apply_chat_template(copy_msgs, tokenize=True, add_generation_prompt=False)
434
+
435
+ if sampling:
436
+ generation_config = {
437
+ "top_p": 0.8,
438
+ "top_k": 100,
439
+ "temperature": 0.7,
440
+ "do_sample": True,
441
+ "repetition_penalty": 1.05
442
+ }
443
+ else:
444
+ generation_config = {
445
+ "num_beams": 3,
446
+ "repetition_penalty": 1.2,
447
+ }
448
+
449
+ generation_config.update(
450
+ (k, kwargs[k]) for k in generation_config.keys() & kwargs.keys()
451
+ )
452
+
453
+ with torch.inference_mode():
454
+ res, vision_hidden_states = self.generate(
455
+ input_id_list=[input_ids],
456
+ max_inp_length=max_inp_length,
457
+ img_list=[images],
458
+ tgt_sizes=[tgt_sizes],
459
+ tokenizer=tokenizer,
460
+ max_new_tokens=max_new_tokens,
461
+ vision_hidden_states=vision_hidden_states,
462
+ return_vision_hidden_states=True,
463
+ stream=stream,
464
+ **generation_config
465
+ )
466
+
467
+ if stream:
468
+ def stream_gen():
469
+ for text in res:
470
+ text = text.replace(tokenizer.eot_token, '').replace(tokenizer.eos_token, '')
471
+ yield text
472
+ return stream_gen()
473
+
474
+ else:
475
+ answer = res[0]
476
+ return answer
477
+
478
+
479
+ class PreTrainedTokenizerFastWrapper(PreTrainedTokenizerFast):
480
+ def __init__(self, **kwargs):
481
+ super().__init__(**kwargs)
482
+ self.eot_token = "<|eot_id|>"
483
+ self.im_start = ""
485
+ self.ref_start = "<ref>"
486
+ self.ref_end = "</ref>"
487
+ self.box_start = "<box>"
488
+ self.box_end = "</box>"
489
+ self.quad_start = "<quad>"
490
+ self.quad_end = "</quad>"
491
+ self.slice_start = "<slice>"
492
+ self.slice_end = "</slice>"
493
+
494
+ @property
495
+ def eos_id(self):
496
+ return self.eos_token_id
497
+
498
+ @property
499
+ def bos_id(self):
500
+ return self.bos_token_id
501
+
502
+ @property
503
+ def unk_id(self):
504
+ return self.unk_token_id
505
+
506
+ @property
507
+ def eot_id(self):
508
+ return self.convert_tokens_to_ids(self.eot_token)
509
+
510
+ @property
511
+ def im_start_id(self):
512
+ return self.convert_tokens_to_ids(self.im_start)
513
+
514
+ @property
515
+ def im_end_id(self):
516
+ return self.convert_tokens_to_ids(self.im_end)
517
+
518
+ @staticmethod
519
+ def escape(text: str) -> str:
520
+ return text
521
+
522
+ @staticmethod
523
+ def unescape(text: str) -> str:
524
+ return text
525
+
526
+
527
+ def pad(orig_items, key, max_length=None, padding_value=0, padding_side="left"):
528
+ items = []
529
+ if isinstance(orig_items[0][key], list):
530
+ assert isinstance(orig_items[0][key][0], torch.Tensor)
531
+ for it in orig_items:
532
+ for tr in it[key]:
533
+ items.append({key: tr})
534
+ else:
535
+ assert isinstance(orig_items[0][key], torch.Tensor)
536
+ items = orig_items
537
+
538
+ batch_size = len(items)
539
+ shape = items[0][key].shape
540
+ dim = len(shape)
541
+ assert dim <= 3
542
+ if max_length is None:
543
+ max_length = 0
544
+ max_length = max(max_length, max(item[key].shape[-1] for item in items))
545
+ min_length = min(item[key].shape[-1] for item in items)
546
+ dtype = items[0][key].dtype
547
+
548
+ if dim == 1:
549
+ return torch.cat([item[key] for item in items], dim=0)
550
+ elif dim == 2:
551
+ if max_length == min_length:
552
+ return torch.cat([item[key] for item in items], dim=0)
553
+ tensor = torch.zeros((batch_size, max_length), dtype=dtype) + padding_value
554
+ else:
555
+ tensor = (
556
+ torch.zeros((batch_size, max_length, shape[-1]), dtype=dtype)
557
+ + padding_value
558
+ )
559
+
560
+ for i, item in enumerate(items):
561
+ if dim == 2:
562
+ if padding_side == "left":
563
+ tensor[i, -len(item[key][0]) :] = item[key][0].clone()
564
+ else:
565
+ tensor[i, : len(item[key][0])] = item[key][0].clone()
566
+ elif dim == 3:
567
+ if padding_side == "left":
568
+ tensor[i, -len(item[key][0]) :, :] = item[key][0].clone()
569
+ else:
570
+ tensor[i, : len(item[key][0]), :] = item[key][0].clone()
571
+
572
+ return tensor
573
+
574
+
575
+ def slice_image(
576
+ image, max_slice_nums=9, scale_resolution=448, patch_size=14, never_split=False
577
+ ):
578
+ original_size = image.size
579
+ original_width, original_height = original_size
580
+ log_ratio = math.log(original_width / original_height)
581
+ ratio = original_width * original_height / (scale_resolution * scale_resolution)
582
+ multiple = min(math.ceil(ratio), max_slice_nums)
583
+
584
+ source_image = None
585
+ best_grid = None
586
+ patches = []
587
+
588
+ if multiple <= 1 or never_split:
589
+ # dont need to slice, upsample
590
+ best_size = find_best_resize(
591
+ original_size, scale_resolution, patch_size, allow_upscale=True
592
+ )
593
+ source_image = image.resize(best_size, Image.Resampling.BICUBIC)
594
+ else:
595
+ candidate_split_grids_nums = []
596
+ for i in [multiple - 1, multiple, multiple + 1]:
597
+ if i == 1 or i > max_slice_nums:
598
+ continue
599
+ candidate_split_grids_nums.append(i)
600
+
601
+ # source image, down-sampling and ensure divided by patch_size
602
+ best_resize = find_best_resize(original_size, scale_resolution, patch_size)
603
+ source_image = image.copy().resize(best_resize, Image.Resampling.BICUBIC)
604
+ candidate_grids = []
605
+
606
+ # find best grid
607
+ for split_grids_nums in candidate_split_grids_nums:
608
+ m = 1
609
+ while m <= split_grids_nums:
610
+ if split_grids_nums % m == 0:
611
+ candidate_grids.append([m, split_grids_nums // m])
612
+ m += 1
613
+
614
+ best_grid = [1, 1]
615
+ min_error = float("inf")
616
+ for grid in candidate_grids:
617
+ error = abs(log_ratio - math.log(grid[0] / grid[1]))
618
+ if error < min_error:
619
+ best_grid = grid
620
+ min_error = error
621
+
622
+ refine_size = get_refine_size(
623
+ original_size, best_grid, scale_resolution, patch_size, allow_upscale=True
624
+ )
625
+
626
+ refine_image = image.resize(refine_size, Image.Resampling.BICUBIC)
627
+ patches = split_to_patches(refine_image, best_grid)
628
+
629
+ return source_image, patches, best_grid
630
+
631
+
632
+ def ensure_divide(length, patch_size):
633
+ return max(round(length / patch_size) * patch_size, patch_size)
634
+
635
+
636
+ def find_best_resize(original_size, scale_resolution, patch_size, allow_upscale=False):
637
+ width, height = original_size
638
+ if (width * height > scale_resolution * scale_resolution) or allow_upscale:
639
+ r = width / height
640
+ height = int(scale_resolution / math.sqrt(r))
641
+ width = int(height * r)
642
+ best_width = ensure_divide(width, patch_size)
643
+ best_height = ensure_divide(height, patch_size)
644
+ return (best_width, best_height)
645
+
646
+
647
+ def get_refine_size(
648
+ original_size, grid, scale_resolution, patch_size, allow_upscale=False
649
+ ):
650
+ width, height = original_size
651
+ grid_x, grid_y = grid
652
+
653
+ refine_width = ensure_divide(width, grid_x)
654
+ refine_height = ensure_divide(height, grid_y)
655
+
656
+ grid_width = refine_width / grid_x
657
+ grid_height = refine_height / grid_y
658
+
659
+ best_grid_size = find_best_resize(
660
+ (grid_width, grid_height),
661
+ scale_resolution,
662
+ patch_size,
663
+ allow_upscale=allow_upscale,
664
+ )
665
+
666
+ refine_size = (best_grid_size[0] * grid_x, best_grid_size[1] * grid_y)
667
+
668
+ return refine_size
669
+
670
+
671
+ def split_to_patches(image, grid):
672
+ patches = []
673
+ width, height = image.size
674
+ grid_x = int(width / grid[0])
675
+ grid_y = int(height / grid[1])
676
+
677
+ for i in range(0, height, grid_y):
678
+ images = []
679
+ for j in range(0, width, grid_x):
680
+ box = (j, i, j + grid_x, i + grid_y)
681
+ patch = image.crop(box)
682
+ images.append(patch)
683
+ patches.append(images)
684
+
685
+ return patches
686
+
687
+
688
+ def get_grid_placeholder(tokenizer, grid, query_num):
689
+ image_placeholder = (
690
+ tokenizer.im_start + tokenizer.unk_token * query_num + tokenizer.im_end
691
+ )
692
+
693
+ cols = grid[0]
694
+ rows = grid[1]
695
+ slices = []
696
+ for i in range(rows):
697
+ lines = []
698
+ for j in range(cols):
699
+ lines.append(image_placeholder)
700
+ slices.append("".join(lines))
701
+ slice_placeholder = tokenizer.slice_start + "\n".join(slices) + tokenizer.slice_end
702
+ return slice_placeholder
resampler.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ import numpy as np
3
+
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn.init import trunc_normal_
7
+
8
+ def get_2d_sincos_pos_embed(embed_dim, image_size):
9
+ """
10
+ image_size: image_size or (image_height, image_width)
11
+ return:
12
+ pos_embed: [image_height, image_width, embed_dim]
13
+ """
14
+ if isinstance(image_size, int):
15
+ grid_h_size, grid_w_size = image_size, image_size
16
+ else:
17
+ grid_h_size, grid_w_size = image_size[0], image_size[1]
18
+
19
+ grid_h = np.arange(grid_h_size, dtype=np.float32)
20
+ grid_w = np.arange(grid_w_size, dtype=np.float32)
21
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
22
+ grid = np.stack(grid, axis=0)
23
+
24
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
25
+ return pos_embed
26
+
27
+
28
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
29
+ assert embed_dim % 2 == 0
30
+
31
+ # use half of dimensions to encode grid_h
32
+ emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[0]) # (H, W, D/2)
33
+ emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim // 2, grid[1]) # (H, W, D/2)
34
+
35
+ emb = np.concatenate([emb_h, emb_w], axis=-1) # (H, W, D)
36
+ return emb
37
+
38
+
39
+ def get_1d_sincos_pos_embed_from_grid_new(embed_dim, pos):
40
+ """
41
+ embed_dim: output dimension for each position
42
+ pos: a list of positions to be encoded: size (H, W)
43
+ out: (H, W, D)
44
+ """
45
+ assert embed_dim % 2 == 0
46
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
47
+ omega /= embed_dim / 2.
48
+ omega = 1. / 10000 ** omega # (D/2,)
49
+
50
+ out = np.einsum('hw,d->hwd', pos, omega) # (H, W, D/2), outer product
51
+
52
+ emb_sin = np.sin(out) # (H, W, D/2)
53
+ emb_cos = np.cos(out) # (H, W, D/2)
54
+
55
+ emb = np.concatenate([emb_sin, emb_cos], axis=-1) # (H, W, D)
56
+ return emb
57
+
58
+
59
+ class Resampler(nn.Module):
60
+ """
61
+ A 2D perceiver-resampler network with one cross attention layers by
62
+ given learnable queries and 2d sincos pos_emb
63
+ Outputs:
64
+ A tensor with the shape of (batch_size, num_queries, embed_dim)
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ num_queries,
70
+ embed_dim,
71
+ num_heads,
72
+ kv_dim=None,
73
+ norm_layer=partial(nn.LayerNorm, eps=1e-6),
74
+ adaptive=False,
75
+ max_size=(70, 70),
76
+ ):
77
+ super().__init__()
78
+ self.num_queries = num_queries
79
+ self.embed_dim = embed_dim
80
+ self.num_heads = num_heads
81
+ self.adaptive = adaptive
82
+ self.max_size = max_size
83
+
84
+ self.query = nn.Parameter(torch.zeros(self.num_queries, embed_dim))
85
+ trunc_normal_(self.query, std=.02)
86
+
87
+ if kv_dim is not None and kv_dim != embed_dim:
88
+ self.kv_proj = nn.Linear(kv_dim, embed_dim, bias=False)
89
+ else:
90
+ self.kv_proj = nn.Identity()
91
+
92
+ self.attn = nn.MultiheadAttention(embed_dim, num_heads)
93
+ self.ln_q = norm_layer(embed_dim)
94
+ self.ln_kv = norm_layer(embed_dim)
95
+
96
+ self.ln_post = norm_layer(embed_dim)
97
+ self.proj = nn.Parameter((embed_dim ** -0.5) * torch.randn(embed_dim, embed_dim))
98
+
99
+ self._set_2d_pos_cache(self.max_size)
100
+ self.apply(self._init_weights)
101
+
102
+ def _set_2d_pos_cache(self, max_size, device='cpu'):
103
+ pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.embed_dim, max_size)).float().to(device)
104
+ self.register_buffer("pos_embed", pos_embed, persistent=False)
105
+
106
+ def _adjust_pos_cache(self, tgt_sizes, device):
107
+ max_h = torch.max(tgt_sizes[:, 0])
108
+ max_w = torch.max(tgt_sizes[:, 1])
109
+ if max_h > self.max_size[0] or max_w > self.max_size[1]:
110
+ self.max_size = [max(max_h, self.max_size[0]), max(max_w, self.max_size[1])]
111
+ self._set_2d_pos_cache(self.max_size, device)
112
+
113
+ def _init_weights(self, m):
114
+ if isinstance(m, nn.Linear):
115
+ trunc_normal_(m.weight, std=.02)
116
+ if isinstance(m, nn.Linear) and m.bias is not None:
117
+ nn.init.constant_(m.bias, 0)
118
+ elif isinstance(m, nn.LayerNorm):
119
+ nn.init.constant_(m.bias, 0)
120
+ nn.init.constant_(m.weight, 1.0)
121
+
122
+ def forward(self, x, tgt_sizes=None):
123
+ assert x.shape[0] == tgt_sizes.shape[0]
124
+ bs = x.shape[0]
125
+
126
+ device = x.device
127
+ dtype = x.dtype
128
+
129
+ patch_len = tgt_sizes[:, 0] * tgt_sizes[:, 1]
130
+
131
+ self._adjust_pos_cache(tgt_sizes, device=device)
132
+
133
+ max_patch_len = torch.max(patch_len)
134
+ key_padding_mask = torch.zeros((bs, max_patch_len), dtype=torch.bool, device=device)
135
+
136
+ pos_embed = []
137
+ for i in range(bs):
138
+ tgt_h, tgt_w = tgt_sizes[i]
139
+ pos_embed.append(self.pos_embed[:tgt_h, :tgt_w, :].reshape((tgt_h * tgt_w, -1)).to(dtype)) # patches * D
140
+ key_padding_mask[i, patch_len[i]:] = True
141
+
142
+ pos_embed = torch.nn.utils.rnn.pad_sequence(
143
+ pos_embed, batch_first=True, padding_value=0.0).permute(1, 0, 2) # BLD => L * B * D
144
+
145
+ x = self.kv_proj(x) # B * L * D
146
+ x = self.ln_kv(x).permute(1, 0, 2) # L * B * D
147
+
148
+ q = self.ln_q(self.query) # Q * D
149
+
150
+ out = self.attn(
151
+ self._repeat(q, bs), # Q * B * D
152
+ x + pos_embed, # L * B * D + L * B * D
153
+ x,
154
+ key_padding_mask=key_padding_mask)[0]
155
+ # out: Q * B * D
156
+ x = out.permute(1, 0, 2) # B * Q * D
157
+
158
+ x = self.ln_post(x)
159
+ x = x @ self.proj
160
+ return x
161
+
162
+ def _repeat(self, query, N: int):
163
+ return query.unsqueeze(1).repeat(1, N, 1)