LiquidoNoNewtoniano commited on
Commit
ceb986e
1 Parent(s): 6c9d3a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -4
app.py CHANGED
@@ -1,7 +1,43 @@
1
- import gradio as gr
 
 
2
 
 
 
 
3
 
4
- image_to_text = gr.Interface.load("models/nlpconnect/vit-gpt2-image-captioning")
 
5
 
6
- demo = gr.Interface(image_to_text, inputs="image", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
2
+ import torch
3
+ from PIL import Image
4
 
5
+ model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
6
+ feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
7
+ tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
8
 
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+ model.to(device)
11
 
12
+ max_length = 16
13
+ num_beams = 4
14
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
15
+ def image_to_text(image_paths):
16
+ images=[image_paths]
17
+
18
+ pixel_values = feature_extractor(images=images, return_tensors="pt").pixel_values
19
+ pixel_values = pixel_values.to(device)
20
+
21
+ output_ids = model.generate(pixel_values, **gen_kwargs)
22
+
23
+ preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
24
+ preds = [pred.strip() for pred in preds]
25
+ return preds[0]
26
+
27
+
28
+ title = ""
29
+ description = ""
30
+
31
+ interface = gr.Interface(
32
+ fn=image_to_text,
33
+ inputs=[
34
+ gr.inputs.Image(type="pil")
35
+ ],
36
+ outputs=gr.Image(),
37
+ title=title,
38
+ description=description,
39
+ enable_queue=True
40
+
41
+ )
42
+
43
+ interface.launch(debug=True)