multimodalart HF staff commited on
Commit
7b45d83
1 Parent(s): 7466734

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -7
app.py CHANGED
@@ -86,9 +86,9 @@ div#share-btn-container > div {flex-direction: row;background: black;align-items
86
  #thumbs_down_clicked{background:red}
87
  .title_lora a{color: var(--body-text-color) !important; opacity:0.6}
88
  #prompt_area .form{border:0}
 
89
  '''
90
 
91
-
92
  original_pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
93
 
94
  @spaces.GPU
@@ -104,7 +104,6 @@ def merge_and_run(prompt, negative_prompt, shuffled_items, lora_1_scale=0.5, lor
104
  state_dict_2 = {k: v.to(device="cuda", dtype=torch.float16) for k,v in state_dict_2.items() if torch.is_tensor(v)}
105
  state_dict_time = time() - start_time
106
  print(f"State Dict time: {state_dict_time}")
107
- #pipe = copy.deepcopy(original_pipe)
108
  start_time = time()
109
  unet = copy.deepcopy(original_pipe.unet)
110
  text_encoder=copy.deepcopy(original_pipe.text_encoder)
@@ -134,15 +133,12 @@ def merge_and_run(prompt, negative_prompt, shuffled_items, lora_1_scale=0.5, lor
134
  seed = random.randint(0, 2147483647)
135
  generator = torch.Generator(device="cuda").manual_seed(seed)
136
  image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=20, width=768, height=768, generator=generator).images[0]
137
- del pipe
138
- gc.collect()
139
  return image, gr.update(visible=True), seed, gr.update(visible=True, interactive=True), gr.update(visible=False), gr.update(visible=True, interactive=True), gr.update(visible=False)
140
 
141
  def get_description(item):
142
  trigger_word = item["trigger_word"]
143
  return f"Trigger: `{trigger_word}`" if trigger_word else "No trigger, applied automatically", trigger_word
144
 
145
-
146
  def truncate_string(s, max_length=29):
147
  return s[:max_length - 3] + "..." if len(s) > max_length else s
148
 
@@ -179,7 +175,7 @@ def save_preferences(lora_1_id, lora_1_scale, lora_2_id, lora_2_scale, prompt, g
179
  with gr.Blocks(css=css) as demo:
180
  shuffled_items = gr.State()
181
  title = gr.HTML(
182
- '''<h1>LoRA Roulette 🎲</h1>
183
  <p>This random LoRAs are loaded into SDXL, can you find a fun way to combine them? 🎨</p>
184
  ''',
185
  elem_id="title"
@@ -201,6 +197,7 @@ with gr.Blocks(css=css) as demo:
201
  lora_2_prompt = gr.Markdown(visible=False)
202
  with gr.Column(min_width=10, scale=2, elem_classes="plus_column"):
203
  equal = gr.HTML("=", elem_classes="plus_button")
 
204
  with gr.Column(min_width=10, scale=14):
205
  with gr.Box(elem_id="generate_area"):
206
  with gr.Row(elem_id="prompt_area"):
@@ -226,7 +223,6 @@ with gr.Blocks(css=css) as demo:
226
  with gr.Row():
227
  lora_1_scale = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=1, step=0.1, value=0.7)
228
  lora_2_scale = gr.Slider(label="LoRa 2 Scale", minimum=0, maximum=1, step=0.1, value=0.7)
229
- shuffle_button = gr.Button("Reshuffle!")
230
  gr.Markdown("For generating with intent visit the [LoRA the Explorer Space](https://huggingface.co/spaces/multimodalart/LoraTheExplorer), but don't forget that sometimes restirctions flourish creativity 🌸")
231
 
232
  demo.load(shuffle_images, inputs=[], outputs=[lora_1_link, lora_1, lora_1_prompt, lora_1_id, lora_2_link, lora_2, lora_2_prompt, lora_2_id, prompt, shuffled_items, lora_1_scale, lora_2_scale], queue=False, show_progress="hidden")
 
86
  #thumbs_down_clicked{background:red}
87
  .title_lora a{color: var(--body-text-color) !important; opacity:0.6}
88
  #prompt_area .form{border:0}
89
+ #reroll_button{position: absolute;right: 0;flex-grow: 1;min-width: 75px;padding: .1em}
90
  '''
91
 
 
92
  original_pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
93
 
94
  @spaces.GPU
 
104
  state_dict_2 = {k: v.to(device="cuda", dtype=torch.float16) for k,v in state_dict_2.items() if torch.is_tensor(v)}
105
  state_dict_time = time() - start_time
106
  print(f"State Dict time: {state_dict_time}")
 
107
  start_time = time()
108
  unet = copy.deepcopy(original_pipe.unet)
109
  text_encoder=copy.deepcopy(original_pipe.text_encoder)
 
133
  seed = random.randint(0, 2147483647)
134
  generator = torch.Generator(device="cuda").manual_seed(seed)
135
  image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=20, width=768, height=768, generator=generator).images[0]
 
 
136
  return image, gr.update(visible=True), seed, gr.update(visible=True, interactive=True), gr.update(visible=False), gr.update(visible=True, interactive=True), gr.update(visible=False)
137
 
138
  def get_description(item):
139
  trigger_word = item["trigger_word"]
140
  return f"Trigger: `{trigger_word}`" if trigger_word else "No trigger, applied automatically", trigger_word
141
 
 
142
  def truncate_string(s, max_length=29):
143
  return s[:max_length - 3] + "..." if len(s) > max_length else s
144
 
 
175
  with gr.Blocks(css=css) as demo:
176
  shuffled_items = gr.State()
177
  title = gr.HTML(
178
+ '''<h1>LoRA Roulette </h1>
179
  <p>This random LoRAs are loaded into SDXL, can you find a fun way to combine them? 🎨</p>
180
  ''',
181
  elem_id="title"
 
197
  lora_2_prompt = gr.Markdown(visible=False)
198
  with gr.Column(min_width=10, scale=2, elem_classes="plus_column"):
199
  equal = gr.HTML("=", elem_classes="plus_button")
200
+ shuffle_button = gr.Button("🎲 reroll", elem_id="reroll_button")
201
  with gr.Column(min_width=10, scale=14):
202
  with gr.Box(elem_id="generate_area"):
203
  with gr.Row(elem_id="prompt_area"):
 
223
  with gr.Row():
224
  lora_1_scale = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=1, step=0.1, value=0.7)
225
  lora_2_scale = gr.Slider(label="LoRa 2 Scale", minimum=0, maximum=1, step=0.1, value=0.7)
 
226
  gr.Markdown("For generating with intent visit the [LoRA the Explorer Space](https://huggingface.co/spaces/multimodalart/LoraTheExplorer), but don't forget that sometimes restirctions flourish creativity 🌸")
227
 
228
  demo.load(shuffle_images, inputs=[], outputs=[lora_1_link, lora_1, lora_1_prompt, lora_1_id, lora_2_link, lora_2, lora_2_prompt, lora_2_id, prompt, shuffled_items, lora_1_scale, lora_2_scale], queue=False, show_progress="hidden")