HikariDawn commited on
Commit
6f53b1b
·
1 Parent(s): d759e44

docs: little update

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -138,7 +138,7 @@ vae = AutoencoderKLWan.from_pretrained(base_model_id, subfolder="vae", torch_dty
138
  print("Loading the pipeline!")
139
  pipe = WanImageToVideoPipeline.from_pretrained(base_model_id, transformer=transformer, vae=vae, torch_dtype=torch.bfloat16)
140
  pipe.to("cuda")
141
- pipe.enable_model_cpu_offload()
142
 
143
  #####################################################################################################################################
144
 
@@ -327,14 +327,14 @@ def build_canvas(input_image_path, resized_height, resized_width, top_left_heigh
327
 
328
 
329
 
330
- def process_points(traj_list, num_frames=49):
331
 
332
 
333
  if len(traj_list) < 2: # First point
334
  return [traj_list[0]] * num_frames
335
 
336
  elif len(traj_list) >= num_frames:
337
- raise gr.Info("The number of trajectory points is more than 49 limits, we will do cropping!")
338
  skip = len(traj_list) // num_frames
339
  return traj_list[::skip][: num_frames - 1] + traj_list[-1:]
340
 
 
138
  print("Loading the pipeline!")
139
  pipe = WanImageToVideoPipeline.from_pretrained(base_model_id, transformer=transformer, vae=vae, torch_dtype=torch.bfloat16)
140
  pipe.to("cuda")
141
+ # pipe.enable_model_cpu_offload()
142
 
143
  #####################################################################################################################################
144
 
 
327
 
328
 
329
 
330
+ def process_points(traj_list, num_frames=81):
331
 
332
 
333
  if len(traj_list) < 2: # First point
334
  return [traj_list[0]] * num_frames
335
 
336
  elif len(traj_list) >= num_frames:
337
+ raise gr.Info("The number of trajectory points is more than the limits, we will do cropping!")
338
  skip = len(traj_list) // num_frames
339
  return traj_list[::skip][: num_frames - 1] + traj_list[-1:]
340