import gradio as gr import json import logging import torch from PIL import Image import spaces from diffusers import DiffusionPipeline import copy # Load LoRAs from JSON file with open('loras.json', 'r') as f: loras = json.load(f) # Initialize the base model base_model = "black-forest-labs/FLUX.1-dev" pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16) pipe.to("cuda") MAX_SEED = 2**32-1 def update_selection(evt: gr.SelectData): selected_lora = loras[evt.index] new_placeholder = f"Type a prompt for {selected_lora['title']}" lora_repo = selected_lora["repo"] updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨" return ( gr.update(placeholder=new_placeholder), updated_text, evt.index ) @spaces.GPU(duration=90) def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)): if selected_index is None: raise gr.Error("You must select a LoRA before proceeding.") selected_lora = loras[selected_index] lora_path = selected_lora["repo"] trigger_word = selected_lora["trigger_word"] # Load LoRA weights if "weights" in selected_lora: pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"]) else: pipe.load_lora_weights(lora_path) # Set random seed for reproducibility if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator(device="cuda").manual_seed(seed) # Generate image image = pipe( prompt=f"{prompt} {trigger_word}", num_inference_steps=steps, guidance_scale=cfg_scale, width=width, height=height, generator=generator, joint_attention_kwargs={"scale": lora_scale}, ).images[0] yield image pipe.unload_lora_weights() css = ''' #gen_btn{height: 100%} #title{text-align: center;} #title h1{font-size: 3em; display:inline-flex; align-items:center} #title img{width: 100px; margin-right: 0.5em} ''' with gr.Blocks(theme=gr.themes.Soft(), css=css) as app: title = gr.HTML( """