File size: 3,178 Bytes
24a5a1b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import cv2
import os
import insightface
import onnxruntime
from tqdm import tqdm
import shutil
import gfpgan
import gradio as gr
import subprocess
from PIL import Image


def video_to_frames(video_path, output_folder):
    vidcap = cv2.VideoCapture(video_path)
    fps = vidcap.get(cv2.CAP_PROP_FPS)
    success, image = vidcap.read()
    count = 0
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    while success:
        frame_name = os.path.join(output_folder, f"frame_{count}.jpg")
        cv2.imwrite(frame_name, image)
        success, image = vidcap.read()
        count += 1
        if count>500:
          break
    print(f"{count} frames extracted from {video_path}.")
    return [count,fps]

def frames_to_video(frame_folder, video_path, image_path, frame_count,fps):
    print("ImagePath",image_path)
    frames = [f for f in os.listdir(frame_folder) if f.endswith('.jpg')]
    frames.sort(key=lambda x: int(x.split('_')[1].split('.')[0]))  # Sort frames in ascending order

    frame = cv2.imread(os.path.join(frame_folder, frames[0]))
    height, width, _ = frame.shape

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(video_path, fourcc, fps, (width, height))
    
    providers = ["CUDAExecutionProvider"]
    print("Available Providers==",providers)
    app = insightface.app.FaceAnalysis(name='buffalo_l', providers=providers)
    app.prepare(ctx_id=0, det_size=(640, 640))
    swapper = insightface.model_zoo.get_model("inswapper_128.onnx",download=False, download_zip=False,providers=providers)
    face_enhancer = gfpgan.GFPGANer(model_path="GFPGANv1.4.pth", upscale=1, device='cuda')
    
    for i in tqdm(range(frame_count), desc="Converting frames to video"):
        img1 = cv2.imread(os.path.join(frame_folder, frames[i]))
        #img2_pil = Image.open(image_path)
        #img2_cv2 = cv2.cvtColor(np.array(img2_pil), cv2.COLOR_RGB2BGR)

        faces1 = app.get(img1)
        for _ in range(20):
            faces2 = app.get(image_path)
            if faces2:
                break
        else:
            return
        if faces1:
            face1 = faces1[0]
            face2 = faces2[0]
            result = img1.copy()
            result = swapper.get(result, face1, face2, paste_back=True)
            _, _, result = face_enhancer.enhance(result)
            out.write(result)
        else:
            out.write(img1)
        progress = int((i + 1) / frame_count * 100)
        print(progress)
    out.release()
    
    print(f"Video saved at {video_path}.")

def face_swap(video_path, image_path):
    output_folder = "Out_Frames"
    frame_count = video_to_frames(video_path, output_folder)
    if frame_count[0] > 150:
        frame_count[0] = 150
    output_video_path = "output_video.mp4" 
    frames_to_video(output_folder, output_video_path, image_path, frame_count[0],frame_count[1])
    return output_video_path 
    

iface = gr.Interface(
    fn=face_swap,
    inputs=["video", "image"],
    outputs="video",
    title="Face Swap",
    description="Upload a video and an image. The faces in the video will be swapped with the face in the image.",
)
iface.launch(share=True)