WeatherFlow / train_reflow.py
JacobLinCool's picture
Upload folder using huggingface_hub
4c7009f verified
import argparse
import os
import yaml
import glob
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from models.unet import DiffusionUNet
from diff2flow import dict2namespace
import utils.logging
class ReflowDataset(data.Dataset):
def __init__(self, data_dir):
super().__init__()
self.files = sorted(glob.glob(os.path.join(data_dir, "*.pth")))
print(f"Found {len(self.files)} files in {data_dir}")
def __len__(self):
# We might have batched files.
# For simplicity, let's load on demand.
return len(self.files)
def __getitem__(self, index):
# Each file is a dictionary of a BATCH
# To make a proper dataset, we should either flattened the files first or handle batches.
# Since files are batched, returning a batch from __getitem__ is tricky for DataLoader if batch_size > 1.
# However, if we set DataLoader batch_size=1, and use a custom collate, we can just return the batch tensor.
# Or we can just load the whole batch and let the training loop handle it (gradient accumulation or just variable batch size).
# Let's assume we train with whatever batch size was used for generation (or we can re-batch).
# We'll just return the content of the file.
path = self.files[index]
data_dict = torch.load(path)
return data_dict
def train_reflow(args, config):
device = config.device
# helper for tensorboard
writer = SummaryWriter(log_dir=os.path.join(args.output, "logs"))
# Load Model
print("Loading model...")
model = DiffusionUNet(config)
model.to(device)
# Load Pretrained Weights (Optional but recommended for Reflow)
if args.resume:
print(f"Loading pretrained weights from {args.resume}")
checkpoint = torch.load(args.resume, map_location=device)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
# Strip module. prefix
new_state_dict = {}
for k, v in state_dict.items():
if k.startswith("module."):
new_state_dict[k[7:]] = v
else:
new_state_dict[k] = v
model.load_state_dict(new_state_dict, strict=True)
optimizer = optim.Adam(model.parameters(), lr=config.optim.lr)
# Dataset
dataset = ReflowDataset(args.data_dir_reflow)
# DataLoader: batch_size=1 because __getitem__ returns a batch already
loader = data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=4)
model.train()
print("Starting training...")
step = 0
N = config.diffusion.num_diffusion_timesteps # e.g. 1000
for epoch in range(args.epochs):
for i, batch_dict in enumerate(loader):
# batch_dict contains keys with shape [1, B, C, H, W] due to DataLoader batch_size=1
x_0 = batch_dict["x_data"].squeeze(0).to(device) # Data (Clean)
x_1 = batch_dict["x_noise"].squeeze(0).to(device) # Noise (Latent)
x_cond = batch_dict["x_cond"].squeeze(0).to(device) # Condition
B = x_0.shape[0]
# Sample t uniform [0, 1]
t = torch.rand(B, device=device)
# Interpolate: x_t = t * x_1 + (1 - t) * x_0
# Note: Reflow definition: x_t = x_0 + t * (x_1 - x_0).
# If t=0, x_t = x_0 (Data). If t=1, x_t = x_1 (Noise).
# Velocity v = x_1 - x_0.
# d x_t / dt = x_1 - x_0 = v.
# Reshape t for broadcasting
t_expand = t.view(B, 1, 1, 1)
x_t = (1 - t_expand) * x_0 + t_expand * x_1
v_target = x_1 - x_0
# Prepare input for model
# Model forward needs (x, t_emb).
# Reuse UNet's embedding logic by scaling t
# UNet expects t indices or values that match the embedding frequency.
# VP-SDE config usually has t in 0..1000.
# So we pass t * N.
t_input = t * (N - 1)
# Forward
# Input to model: concat(x_cond, x_t) usually
model_input = torch.cat([x_cond, x_t], dim=1)
v_pred = model(model_input, t_input)
# Loss
loss = torch.mean((v_pred - v_target) ** 2)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 10 == 0:
print(f"Epoch {epoch}, Step {step}, Loss: {loss.item():.6f}")
writer.add_scalar("Loss/train", loss.item(), step)
step += 1
# Save checkpoint
if (epoch + 1) % 5 == 0 or epoch == 0:
save_path = os.path.join(args.output, f"reflow_ckpt_{epoch}.pth")
torch.save(model.state_dict(), save_path)
print(f"Saved checkpoint to {save_path}")
writer.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=True)
parser.add_argument("--resume", type=str, default="")
parser.add_argument("--data_dir_reflow", type=str, required=True)
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--output", type=str, default="results/reflow_train")
parser.add_argument("--seed", type=int, default=61)
parser.add_argument("--lr", type=float, default=1e-5)
args = parser.parse_args()
with open(os.path.join("configs", args.config), "r") as f:
config_dict = yaml.safe_load(f)
config = dict2namespace(config_dict)
if args.lr:
config.optim.lr = args.lr
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
config.device = device
torch.manual_seed(args.seed)
np.random.seed(args.seed)
os.makedirs(args.output, exist_ok=True)
train_reflow(args, config)
if __name__ == "__main__":
main()