Edit model card

Example Fine-Tuned Model for Unit 2 of the Diffusion Models Class 🧨

You are fine-tuning the google/ddpm-celebahq-256 model using the huggan/few-shot-anime-face dataset, with the goal of enabling users to generate cartoon character avatars.

Usage

from diffusers import DDPMPipeline

pipeline = DDPMPipeline.from_pretrained('your_hub_username/ddpm-fewshot_anime_face')
image = pipeline().images[0]
image.show()  # To display the generated image

#也可以使用下面的方法微调模型,我当前数据量较少
from torchvision import transforms
import torch
from diffusers import DDPMPipeline,DDIMScheduler
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm_notebook
import torch.nn.functional as F
from matplotlib import pyplot as plt

path = "/Volumes/mac1/pytorchlearning/diff_learning/ddpm-fewshot_anime_face" #改为model_id
image_pipe = DDPMPipeline.from_pretrained(path)
scheduler = DDIMScheduler.from_pretrained(path)
dataset = load_dataset("huggan/few-shot-anime-face",split="train")
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
image_pipe.to(device)

model_name = "ddpm-fewshot_anime_face"
image_size = 256
batch_size = 8
preprocess = transforms.Compose([
    transforms.Resize(image_size),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize([0.5],[0.5])
])

def transform(examples):
    images = [preprocess(image.convert("RGB")) for image in examples["image"]]
    return {"images":images}

dataset.set_transform(transform)
train_loader = DataLoader(dataset,batch_size,shuffle=True)

num_epochs = 20
lr = 1e-5
grad_accumulation_steps = 2
optimizer = torch.optim.AdamW(image_pipe.unet.parameters(), lr=lr)
losses = []
for epoch in range(num_epochs):
    for step,batch in tqdm_notebook(enumerate(train_loader), total=len(train_loader)):
        clean_images = batch["images"].to(device)
        noise = torch.randn(clean_images.shape).to(device)
        bs = clean_images.shape[0]
        
        time_steps = torch.randint(0,image_pipe.scheduler.num_train_timesteps,(bs,),device=device).long()
        noise_image = image_pipe.scheduler.add_noise(clean_images,noise,time_steps)
        noise_pred = image_pipe.unet(noise_image,time_steps,return_dict=False)[0]
        loss = F.mse_loss(noise_pred,noise_image)
        
        losses.append(loss.item())
        loss.backward()
        if (step + 1) % grad_accumulation_steps == 0:
            optimizer.step()
            optimizer.zero_grad()

    print(f"Epoch {epoch} average loss: {sum(losses[-len(train_loader):])/len(train_loader)}")
    image_pipe.save_pretrained(model_name)

plt.plot(losses)
Downloads last month
66
Inference API
Inference API (serverless) does not yet support diffusers models for this pipeline type.