robomaster2025 commited on
Commit
000e4e8
·
verified ·
1 Parent(s): e32a730

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. inference_eval.py +237 -0
inference_eval.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import tqdm
4
+ import numpy as np
5
+ import torch
6
+ from diffusers import (AutoencoderKL, CogVideoXDDIMScheduler, DDIMScheduler,
7
+ DPMSolverMultistepScheduler,
8
+ EulerAncestralDiscreteScheduler, EulerDiscreteScheduler,
9
+ PNDMScheduler)
10
+ from transformers import T5EncoderModel, T5Tokenizer
11
+ from omegaconf import OmegaConf
12
+ from PIL import Image
13
+ import torch.nn.functional as F
14
+ from einops import rearrange
15
+ import cv2
16
+ import decord
17
+
18
+ from robomaster.models.transformer3d import CogVideoXTransformer3DModel
19
+ from robomaster.models.autoencoder_magvit import AutoencoderKLCogVideoX
20
+ from robomaster.pipeline.pipeline_cogvideox_inpaint import CogVideoX_Fun_Pipeline_Inpaint
21
+ from robomaster.utils.utils import get_image_to_video_latent, save_videos_grid
22
+ from utils import *
23
+
24
+ # Low gpu memory mode, this is used when the GPU memory is under 16GB
25
+ low_gpu_memory_mode = False
26
+
27
+ # Model path
28
+ model_name = "ckpts/CogVideoX-Fun-V1.5-5b-InP"
29
+ transformer_path = "ckpts/RoboMaster"
30
+
31
+ # Choose the sampler in "Euler" "Euler A" "DPM++" "PNDM" "DDIM_Cog" and "DDIM_Origin"
32
+ sampler_name = "DDIM_Origin"
33
+
34
+ # If you want to generate ultra long videos, please set partial_video_length as the length of each sub video segment
35
+ partial_video_length = None
36
+ overlap_video_length = 4
37
+
38
+ # Use torch.float16 if GPU does not support torch.bfloat16
39
+ # ome graphics cards, such as v100, 2080ti, do not support torch.bfloat16
40
+ weight_dtype = torch.bfloat16
41
+
42
+ # Configs
43
+ negative_prompt = "The video is not of a high quality, it has a low resolution. Watermark present in each frame. The background is solid. Strange body and strange trajectory. Distortion. "
44
+ guidance_scale = 6.0
45
+ seed = 43
46
+ num_inference_steps = 50
47
+ video_length = 37
48
+ fps = 12
49
+ validation_image_path = "eval_metrics/results/bridge_eval_gt"
50
+ save_path = "samples/bridge_eval_ours"
51
+
52
+ # Get Transformer
53
+ transformer = CogVideoXTransformer3DModel.from_pretrained_2d(
54
+ transformer_path,
55
+ low_cpu_mem_usage=True,
56
+ finetune_init=False,
57
+ ).to(weight_dtype)
58
+
59
+ # Get Vae
60
+ vae = AutoencoderKLCogVideoX.from_pretrained(
61
+ model_name,
62
+ subfolder="vae"
63
+ ).to(weight_dtype)
64
+
65
+ text_encoder = T5EncoderModel.from_pretrained(
66
+ model_name, subfolder="text_encoder", torch_dtype=weight_dtype
67
+ )
68
+
69
+ # Get Scheduler
70
+ Choosen_Scheduler = scheduler_dict = {
71
+ "Euler": EulerDiscreteScheduler,
72
+ "Euler A": EulerAncestralDiscreteScheduler,
73
+ "DPM++": DPMSolverMultistepScheduler,
74
+ "PNDM": PNDMScheduler,
75
+ "DDIM_Cog": CogVideoXDDIMScheduler,
76
+ "DDIM_Origin": DDIMScheduler,
77
+ }[sampler_name]
78
+ scheduler = Choosen_Scheduler.from_pretrained(
79
+ model_name,
80
+ subfolder="scheduler"
81
+ )
82
+
83
+ pipeline = CogVideoX_Fun_Pipeline_Inpaint.from_pretrained(
84
+ model_name,
85
+ vae=vae,
86
+ text_encoder=text_encoder,
87
+ transformer=transformer,
88
+ scheduler=scheduler,
89
+ torch_dtype=weight_dtype
90
+ )
91
+
92
+ if low_gpu_memory_mode:
93
+ pipeline.enable_sequential_cpu_offload()
94
+ else:
95
+ pipeline.enable_model_cpu_offload()
96
+
97
+ # If you want to generate from text, please set the validation_image_start = None and validation_image_end = None
98
+ validation_images = [validation_image for validation_image in sorted(os.listdir(validation_image_path)) if validation_image.endswith('.png')]
99
+ vae_scale_factor_spatial = (2 ** (len(vae.config.block_out_channels) - 1) if vae is not None else 8)
100
+ if not os.path.exists(save_path):
101
+ os.makedirs(save_path, exist_ok=True)
102
+ generator = torch.Generator(device="cuda").manual_seed(seed)
103
+
104
+ for validation_image in tqdm.tqdm(validation_images):
105
+
106
+ if os.path.exists(os.path.join(save_path, validation_image.replace('.png','.mp4'))):
107
+ continue
108
+
109
+ validation_image_start = os.path.join(validation_image_path, validation_image)
110
+ validation_image_end = None
111
+ image = Image.open(validation_image_start).convert("RGB")
112
+ sample_size_ori = (image.size[1], image.size[0])
113
+ sample_size = (round(image.size[1]/8)*8, round(image.size[0]/8)*8)
114
+ image = image.resize(sample_size)
115
+ prompt_path = validation_image_start.replace('.png', '.txt')
116
+ with open(prompt_path, 'r') as file: prompt = file.readline().strip()
117
+ obj_tracking_path = os.path.join(validation_image_path, validation_image.replace('.png', '_obj.npy'))
118
+ robot_tracking_path = os.path.join(validation_image_path, validation_image.replace('.png', '_robot.npy'))
119
+
120
+ video_length = int((video_length - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1
121
+ latent_frames = (video_length - 1) // vae.config.temporal_compression_ratio + 1
122
+ if video_length != 1 and transformer.config.patch_size_t is not None and latent_frames % transformer.config.patch_size_t != 0:
123
+ additional_frames = transformer.config.patch_size_t - latent_frames % transformer.config.patch_size_t
124
+ video_length += additional_frames * vae.config.temporal_compression_ratio
125
+ input_video, input_video_mask, clip_image = get_image_to_video_latent(validation_image_start, validation_image_end, video_length=video_length, sample_size=sample_size)
126
+
127
+ points_obj = process_traj(obj_tracking_path, video_length, [sample_size_ori[0], sample_size_ori[1]])
128
+ points_obj = torch.tensor(points_obj)
129
+ points_obj = (points_obj / vae_scale_factor_spatial).int()
130
+
131
+ points_robot = process_traj(robot_tracking_path, video_length, [sample_size_ori[0], sample_size_ori[1]])
132
+ points_robot = torch.tensor(points_robot)
133
+ points_robot = (points_robot / vae_scale_factor_spatial).int()
134
+
135
+ mask_obj = torch.from_numpy(np.load(os.path.join(validation_image_path, validation_image.replace('.png', '_obj_mask.npy'))))
136
+ diameter_obj = max(int(torch.sqrt(mask_obj.sum()) / vae_scale_factor_spatial), 2)
137
+
138
+ with torch.no_grad():
139
+
140
+ latents_obj = vae.encode((input_video[:,:,0].unsqueeze(2)*2-1).to(dtype=weight_dtype, device='cuda'))[0]
141
+ latents_obj = latents_obj.sample()
142
+ latents_obj = latents_obj * vae.config.scaling_factor
143
+
144
+ mask_obj = F.interpolate(
145
+ mask_obj[None, None, None].float(),
146
+ size=latents_obj.shape[2:],
147
+ mode='trilinear',
148
+ align_corners=False
149
+ )
150
+
151
+ ground_sam_robot_path = './robot'
152
+ latents_robot = torch.load(os.path.join(ground_sam_robot_path, 'bridge.pth'))
153
+ mask_robot = torch.from_numpy(np.load(os.path.join(ground_sam_robot_path, 'bridge_mask.npy')))
154
+ diameter_robot = max(int(torch.sqrt(mask_robot.sum()) / 2 / vae_scale_factor_spatial), 2)
155
+ latents_robot = latents_robot.to(device=latents_obj.device, dtype=weight_dtype)
156
+ mask_robot = F.interpolate(
157
+ mask_robot[None, None, None].float(),
158
+ size=latents_robot.shape[2:],
159
+ mode='trilinear',
160
+ align_corners=False
161
+ )
162
+
163
+ transit_start, transit_end = np.load(os.path.join(validation_image_path, validation_image.replace('.png', '_transit.npy')))
164
+ video_path = os.path.join(validation_image_path, validation_image.replace('.png', '.mp4'))
165
+ cap = cv2.VideoCapture(video_path)
166
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
167
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
168
+ ctx = decord.cpu(0)
169
+ reader = decord.VideoReader(video_path, ctx=ctx, height=height, width=width)
170
+ transit_start = int(transit_start * video_length / len(reader))
171
+ transit_end = int(transit_end * video_length / len(reader))
172
+ transit_start_latent = transit_start // vae.config.temporal_compression_ratio
173
+ transit_end_latent = transit_end // vae.config.temporal_compression_ratio
174
+ if transit_end >= (video_length - 3):
175
+ transit_end_latent = latent_frames
176
+
177
+ # pre-interaction
178
+ flow_latents = sample_flowlatents(
179
+ latents_robot,
180
+ torch.zeros_like(latents_obj).repeat(1,1,latent_frames,1,1),
181
+ mask_robot,
182
+ points_robot,
183
+ diameter_robot,
184
+ 0,
185
+ transit_start_latent,
186
+ )
187
+
188
+ # interaction
189
+ flow_latents = sample_flowlatents(
190
+ latents_obj,
191
+ flow_latents,
192
+ mask_obj,
193
+ points_obj,
194
+ diameter_obj,
195
+ transit_start_latent,
196
+ transit_end_latent,
197
+ )
198
+
199
+ # post-interaction
200
+ flow_latents = sample_flowlatents(
201
+ latents_robot,
202
+ flow_latents,
203
+ mask_robot,
204
+ points_robot,
205
+ diameter_robot,
206
+ transit_end_latent,
207
+ latent_frames,
208
+ )
209
+
210
+ flow_latents = rearrange(flow_latents, "b c f h w -> b f c h w")
211
+
212
+ sample = pipeline(
213
+ prompt,
214
+ num_frames = video_length,
215
+ negative_prompt = negative_prompt,
216
+ height = sample_size[0],
217
+ width = sample_size[1],
218
+ generator = generator,
219
+ guidance_scale = guidance_scale,
220
+ num_inference_steps = num_inference_steps,
221
+ video = input_video,
222
+ mask_video = input_video_mask,
223
+ flow_latents = flow_latents,
224
+ ).videos
225
+
226
+ sample = F.interpolate(
227
+ sample,
228
+ size=torch.Size([video_length, sample_size_ori[0], sample_size_ori[1]]),
229
+ mode='trilinear',
230
+ align_corners=False
231
+ )
232
+
233
+ # save files
234
+ video_chunk = (rearrange(sample[0], "c f h w -> f h w c").numpy()*255).astype(np.uint8)
235
+ save_video_name = os.path.join(save_path, os.path.basename(validation_image_start).split('.png')[0])
236
+ save_images2video(video_chunk, save_video_name, fps=12)
237
+ os.system(f'cp -r {prompt_path} {save_path}')