Manjushri commited on
Commit
2e9f5b9
1 Parent(s): d2ef8ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -333,7 +333,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
333
  return image
334
  else:
335
  if upscale == "Yes":
336
-
337
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
338
  torch.cuda.empty_cache()
339
  torch.cuda.max_memory_allocated(device=device)
@@ -351,9 +351,12 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
351
  return image
352
 
353
  if Model == 'Cascade':
 
354
  from diffusers import StableCascadeCombinedPipeline
355
  pipe = StableCascadeCombinedPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16)
356
- image = (prompt=Prompt, negative_prompt=negative_prompt, num_inference_steps=10, prior_num_inference_steps=20, prior_guidance_scale=3.0, width=width, height=height).images[0]
 
 
357
  return image
358
 
359
  return image
 
333
  return image
334
  else:
335
  if upscale == "Yes":
336
+ torch.cuda.empty_cache()
337
  image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
338
  torch.cuda.empty_cache()
339
  torch.cuda.max_memory_allocated(device=device)
 
351
  return image
352
 
353
  if Model == 'Cascade':
354
+ torch.cuda.empty_cache()
355
  from diffusers import StableCascadeCombinedPipeline
356
  pipe = StableCascadeCombinedPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16)
357
+ pipe.enable_xformers_memory_efficient_attention()
358
+ pipe = pipe.to(device)
359
+ image = pipe(prompt=Prompt, negative_prompt=negative_prompt, num_inference_steps=10, prior_num_inference_steps=20, prior_guidance_scale=3.0, width=width, height=height).images[0]
360
  return image
361
 
362
  return image