import gradio as gr import requests from PIL import Image import os import torch import numpy as np from transformers import AutoImageProcessor, Swin2SRForImageSuperResolution processor = AutoImageProcessor.from_pretrained("caidas/swin2SR-classical-sr-x2-64") model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-classical-sr-x2-64") def enhance(image): # prepare image for the model inputs = processor(image, return_tensors="pt") # forward pass with torch.no_grad(): outputs = model(**inputs) # postprocess output = outputs.reconstruction.data.squeeze().float().cpu().clamp_(0, 1).numpy() output = np.moveaxis(output, source=0, destination=-1) output = (output * 255.0).round().astype(np.uint8) # float32 to uint8 return Image.fromarray(output) title = "Demo: Swin2SR for Image Super-Resolution 🚀🚀🔥" description = ''' **This demo expects low-quality and low-resolution JPEG compressed images.** **Demo notebook can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/Swin2SR/Perform_image_super_resolution_with_Swin2SR.ipynb). ''' article = "

Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration | HuggingFace docs

" examples = [['00003.jpg'], ['0855.jpg'], ['ali_eye.jpg'], ['butterfly.jpg'], ['chain-eye.jpg'], ['gojou-eyes.jpg'], ['shanghai.jpg'], ['vagabond.jpg']] gr.Interface( enhance, gr.inputs.Image(type="pil", label="Input").style(height=260), gr.inputs.Image(type="pil", label="Ouput").style(height=240), title=title, description=description, article=article, examples=examples, ).launch(enable_queue=True, share= True)