aducsdr commited on
Commit
04f69f6
·
verified ·
1 Parent(s): 9c8a744

Update aduc_framework/managers/seedvr_manager.py

Browse files
aduc_framework/managers/seedvr_manager.py CHANGED
@@ -1,12 +1,22 @@
1
  # managers/seedvr_manager.py
2
  #
3
- # Version: 6.0.0 (GPU Isolation Fix) - FINAL
4
- # Este código já está pronto para gerenciar um pool em quaisquer GPUs
5
- # que sejam designadas a ele pelo hardware_manager.
 
 
 
 
 
 
6
 
7
  import torch
8
- # ... (resto dos imports)
9
- import os, gc, logging, sys, subprocess, threading
 
 
 
 
10
  from pathlib import Path
11
  from urllib.parse import urlparse
12
  from torch.hub import download_url_to_file
@@ -15,20 +25,25 @@ from einops import rearrange
15
  import shutil
16
  from omegaconf import OmegaConf
17
  import yaml
 
 
18
  from ..tools.hardware_manager import hardware_manager
19
 
20
  logger = logging.getLogger(__name__)
21
 
 
22
  APP_ROOT = Path("/home/user/app")
23
  DEPS_DIR = APP_ROOT / "deps"
24
  SEEDVR_SPACE_DIR = DEPS_DIR / "SeedVR_Space"
25
  SEEDVR_SPACE_URL = "https://huggingface.co/spaces/ByteDance-Seed/SeedVR2-3B"
26
 
27
  class SeedVrWorker:
 
28
  def __init__(self, device_id: str):
29
  self.global_device_id = device_id
30
  self.local_device_name = 'cuda:0'
31
  self.gpu_index = self.global_device_id.split(':')[-1]
 
32
  self.runner = None
33
  self.is_initialized = False
34
  self.setup_complete = self._check_and_run_global_setup()
@@ -36,55 +51,79 @@ class SeedVrWorker:
36
 
37
  @staticmethod
38
  def _check_and_run_global_setup():
 
39
  setup_flag = DEPS_DIR / "seedvr.setup.complete"
40
- if str(APP_ROOT) not in sys.path: sys.path.insert(0, str(APP_ROOT))
41
- if setup_flag.exists(): return True
 
 
 
 
42
  logger.info("--- Iniciando Setup Global do SeedVR (primeira execução) ---")
 
43
  if not SEEDVR_SPACE_DIR.exists():
44
  DEPS_DIR.mkdir(exist_ok=True, parents=True)
45
  subprocess.run(["git", "clone", "--depth", "1", SEEDVR_SPACE_URL, str(SEEDVR_SPACE_DIR)], check=True)
 
46
  required_dirs = ["projects", "common", "models", "configs_3b", "configs_7b", "data"]
47
  for dirname in required_dirs:
48
- source, target = SEEDVR_SPACE_DIR / dirname, APP_ROOT / dirname
49
- if not target.exists(): shutil.copytree(source, target)
 
 
 
50
  try:
51
  import apex
52
  except ImportError:
53
  apex_url = 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/apex-0.1-cp310-cp310-linux_x86_64.whl'
54
  apex_wheel_path = _load_file_from_url(url=apex_url, model_dir=str(DEPS_DIR))
55
  subprocess.run(f"pip install {apex_wheel_path}", check=True, shell=True)
 
56
  ckpt_dir = APP_ROOT / 'ckpts'
57
  ckpt_dir.mkdir(exist_ok=True)
58
  model_urls = {
59
  'vae': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/ema_vae.pth',
60
  'dit_3b': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/seedvr2_ema_3b.pth',
61
- 'dit_7b': 'https://huggingface.co/ByteDance-Seed/SeedVR2-7B/resolve/main/seedvr2_ema_7b.pth',
62
  'pos_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/pos_emb.pt',
63
  'neg_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/neg_emb.pt'
64
  }
65
  for name, url in model_urls.items():
66
  _load_file_from_url(url=url, model_dir=str(ckpt_dir))
 
67
  setup_flag.touch()
68
  logger.info("--- Setup Global do SeedVR Concluído ---")
69
  return True
70
 
71
  def initialize_runner(self, model_version: str):
 
72
  if self.runner is not None: return
 
73
  os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu_index
 
74
  from projects.video_diffusion_sr.infer import VideoDiffusionInfer
75
  from common.config import load_config
 
76
  logger.info(f"Worker {self.global_device_id}: Inicializando runner... (Processo vê apenas {self.local_device_name})")
77
- config_path = APP_ROOT / (f'configs_{model_version.lower()}' / 'main.yaml')
78
- checkpoint_path = APP_ROOT / 'ckpts' / f'seedvr2_ema_{model_version.lower()}.pth'
 
 
 
 
 
79
  config = load_config(str(config_path))
80
  self.runner = VideoDiffusionInfer(config)
81
  OmegaConf.set_readonly(self.runner.config, False)
 
82
  self.runner.configure_dit_model(device=self.local_device_name, checkpoint=str(checkpoint_path))
83
  self.runner.configure_vae_model()
 
84
  self.is_initialized = True
85
  logger.info(f"Worker {self.global_device_id}: Runner pronto na VRAM.")
86
 
87
  def unload_runner(self):
 
88
  if self.runner is not None:
89
  del self.runner
90
  self.runner = None
@@ -92,12 +131,15 @@ class SeedVrWorker:
92
  torch.cuda.empty_cache()
93
  self.is_initialized = False
94
  logger.info(f"Worker {self.global_device_id}: Runner descarregado da VRAM.")
 
95
  if 'CUDA_VISIBLE_DEVICES' in os.environ:
96
  del os.environ['CUDA_VISIBLE_DEVICES']
97
 
98
  def process_video_internal(self, input_video_path, output_video_path, prompt, model_version, steps, seed):
 
99
  os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu_index
100
  device = torch.device(self.local_device_name)
 
101
  from common.seed import set_seed
102
  from data.image.transforms.divisible_crop import DivisibleCrop
103
  from data.image.transforms.na_resize import NaResize
@@ -105,41 +147,59 @@ class SeedVrWorker:
105
  from projects.video_diffusion_sr.color_fix import wavelet_reconstruction
106
  from torchvision.transforms import Compose, Lambda, Normalize
107
  from torchvision.io.video import read_video
 
108
  set_seed(seed, same_across_ranks=True)
109
  self.runner.config.diffusion.timesteps.sampling.steps = steps
110
  self.runner.configure_diffusion()
 
111
  video_tensor = read_video(input_video_path, output_format="TCHW")[0] / 255.0
112
  res_h, res_w = video_tensor.shape[-2:]
113
  video_transform = Compose([
114
  NaResize(resolution=(res_h * res_w) ** 0.5, mode="area", downsample_only=False),
115
  Lambda(lambda x: torch.clamp(x, 0.0, 1.0)),
116
- DivisibleCrop((16, 16)), Normalize(0.5, 0.5), Rearrange("t c h w -> c t h w"),
 
 
117
  ])
118
  cond_latents = [video_transform(video_tensor.to(device))]
119
- self.runner.dit.to("cpu"); self.runner.vae.to(device)
 
120
  cond_latents = self.runner.vae_encode(cond_latents)
121
- self.runner.vae.to("cpu"); gc.collect(); torch.cuda.empty_cache(); self.runner.dit.to(device)
 
 
122
  pos_emb = torch.load(APP_ROOT / 'ckpts' / 'pos_emb.pt').to(device)
123
  neg_emb = torch.load(APP_ROOT / 'ckpts' / 'neg_emb.pt').to(device)
124
  text_embeds_dict = {"texts_pos": [pos_emb], "texts_neg": [neg_emb]}
 
125
  noises = [torch.randn_like(latent) for latent in cond_latents]
126
  conditions = [self.runner.get_condition(noise, latent_blur=latent, task="sr") for noise, latent in zip(noises, cond_latents)]
 
127
  with torch.no_grad(), torch.autocast("cuda", torch.bfloat16, enabled=True):
128
  video_tensors = self.runner.inference(noises=noises, conditions=conditions, dit_offload=True, **text_embeds_dict)
129
- self.runner.dit.to("cpu"); gc.collect(); torch.cuda.empty_cache(); self.runner.vae.to(device)
 
 
130
  samples = self.runner.vae_decode(video_tensors)
131
- final_sample, input_video_sample = samples[0], cond_latents[0]
 
132
  if final_sample.shape[1] < input_video_sample.shape[1]:
133
  input_video_sample = input_video_sample[:, :final_sample.shape[1]]
 
134
  final_sample = wavelet_reconstruction(rearrange(final_sample, "c t h w -> t c h w"), rearrange(input_video_sample, "c t h w -> t c h w"))
135
  final_sample = rearrange(final_sample, "t c h w -> t h w c")
136
  final_sample = final_sample.clip(-1, 1).mul_(0.5).add_(0.5).mul_(255).round()
137
  final_sample_np = final_sample.to(torch.uint8).cpu().numpy()
 
138
  mediapy.write_video(output_video_path, final_sample_np, fps=24)
139
- if 'CUDA_VISIBLE_DEVICES' in os.environ: del os.environ['CUDA_VISIBLE_DEVICES']
 
 
 
140
  return output_video_path
141
 
142
  class SeedVrPoolManager:
 
143
  def __init__(self, device_ids: list[str]):
144
  logger.info(f"SEEDVR POOL MANAGER: Criando workers para os dispositivos: {device_ids}")
145
  if not device_ids or 'cpu' in device_ids:
@@ -150,25 +210,30 @@ class SeedVrPoolManager:
150
  self.last_cleanup_thread = None
151
 
152
  def _cleanup_worker_thread(self, worker: SeedVrWorker):
 
153
  logger.info(f"SEEDVR CLEANUP THREAD: Iniciando limpeza de {worker.global_device_id} em background...")
154
  worker.unload_runner()
155
 
156
  def process_video(self, input_video_path: str, output_video_path: str, prompt: str,
157
- model_version: str = '7B', steps: int = 100, seed: int = 666,
158
- progress: gr.Progress = None) -> str:
159
  worker_to_use = None
160
  try:
161
  with self.lock:
162
  if self.last_cleanup_thread and self.last_cleanup_thread.is_alive():
163
  self.last_cleanup_thread.join()
 
164
  worker_to_use = self.workers[self.current_worker_index]
165
  previous_worker_index = (self.current_worker_index - 1 + len(self.workers)) % len(self.workers)
166
  worker_to_cleanup = self.workers[previous_worker_index]
 
167
  cleanup_thread = threading.Thread(target=self._cleanup_worker_thread, args=(worker_to_cleanup,))
168
  cleanup_thread.start()
169
  self.last_cleanup_thread = cleanup_thread
 
170
  worker_to_use.initialize_runner(model_version)
 
171
  self.current_worker_index = (self.current_worker_index + 1) % len(self.workers)
 
172
  logger.info(f"SEEDVR POOL MANAGER: Processando vídeo na GPU {worker_to_use.global_device_id}...")
173
  return worker_to_use.process_video_internal(
174
  input_video_path, output_video_path, prompt, model_version, steps, seed
@@ -185,13 +250,15 @@ def _load_file_from_url(url, model_dir='./', file_name=None):
185
  download_url_to_file(url, cached_file, hash_prefix=None, progress=True)
186
  return cached_file
187
 
 
188
  class SeedVrPlaceholder:
189
  def process_video(self, input_video_path, *args, **kwargs):
190
  logger.warning("SeedVR está desabilitado (gpus_required: 0). Pulando etapa de masterização HD.")
191
  return input_video_path
192
 
193
  try:
194
- with open("config.yaml", 'r') as f: config = yaml.safe_load(f)
 
195
  seedvr_gpus_required = config['specialists'].get('seedvr', {}).get('gpus_required', 0)
196
  seedvr_device_ids = hardware_manager.allocate_gpus('SeedVR', seedvr_gpus_required)
197
  if seedvr_gpus_required > 0 and 'cpu' not in seedvr_device_ids:
 
1
  # managers/seedvr_manager.py
2
  #
3
+ # Copyright (C) 2025 Carlos Rodrigues dos Santos
4
+ #
5
+ # Version: 6.0.0 (GPU Isolation Fix)
6
+ #
7
+ # Esta versão implementa a solução definitiva para os erros de dispositivo
8
+ # usando a variável de ambiente CUDA_VISIBLE_DEVICES. Cada worker agora opera
9
+ # em um ambiente completamente isolado, vendo apenas sua própria GPU.
10
+ # Isso força todo o código de terceiros a usar o dispositivo correto e
11
+ # elimina a necessidade de gerenciar manualmente o torch.distributed.
12
 
13
  import torch
14
+ import os
15
+ import gc
16
+ import logging
17
+ import sys
18
+ import subprocess
19
+ import threading
20
  from pathlib import Path
21
  from urllib.parse import urlparse
22
  from torch.hub import download_url_to_file
 
25
  import shutil
26
  from omegaconf import OmegaConf
27
  import yaml
28
+
29
+ # Imports relativos para o hardware_manager
30
  from ..tools.hardware_manager import hardware_manager
31
 
32
  logger = logging.getLogger(__name__)
33
 
34
+ # --- Caminhos Globais ---
35
  APP_ROOT = Path("/home/user/app")
36
  DEPS_DIR = APP_ROOT / "deps"
37
  SEEDVR_SPACE_DIR = DEPS_DIR / "SeedVR_Space"
38
  SEEDVR_SPACE_URL = "https://huggingface.co/spaces/ByteDance-Seed/SeedVR2-3B"
39
 
40
  class SeedVrWorker:
41
+ """Representa uma única instância do pipeline SeedVR em um dispositivo isolado."""
42
  def __init__(self, device_id: str):
43
  self.global_device_id = device_id
44
  self.local_device_name = 'cuda:0'
45
  self.gpu_index = self.global_device_id.split(':')[-1]
46
+
47
  self.runner = None
48
  self.is_initialized = False
49
  self.setup_complete = self._check_and_run_global_setup()
 
51
 
52
  @staticmethod
53
  def _check_and_run_global_setup():
54
+ """Executa o setup de arquivos uma única vez para toda a aplicação."""
55
  setup_flag = DEPS_DIR / "seedvr.setup.complete"
56
+ if str(APP_ROOT) not in sys.path:
57
+ sys.path.insert(0, str(APP_ROOT))
58
+
59
+ if setup_flag.exists():
60
+ return True
61
+
62
  logger.info("--- Iniciando Setup Global do SeedVR (primeira execução) ---")
63
+
64
  if not SEEDVR_SPACE_DIR.exists():
65
  DEPS_DIR.mkdir(exist_ok=True, parents=True)
66
  subprocess.run(["git", "clone", "--depth", "1", SEEDVR_SPACE_URL, str(SEEDVR_SPACE_DIR)], check=True)
67
+
68
  required_dirs = ["projects", "common", "models", "configs_3b", "configs_7b", "data"]
69
  for dirname in required_dirs:
70
+ source = SEEDVR_SPACE_DIR / dirname
71
+ target = APP_ROOT / dirname
72
+ if not target.exists():
73
+ shutil.copytree(source, target)
74
+
75
  try:
76
  import apex
77
  except ImportError:
78
  apex_url = 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/apex-0.1-cp310-cp310-linux_x86_64.whl'
79
  apex_wheel_path = _load_file_from_url(url=apex_url, model_dir=str(DEPS_DIR))
80
  subprocess.run(f"pip install {apex_wheel_path}", check=True, shell=True)
81
+
82
  ckpt_dir = APP_ROOT / 'ckpts'
83
  ckpt_dir.mkdir(exist_ok=True)
84
  model_urls = {
85
  'vae': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/ema_vae.pth',
86
  'dit_3b': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/seedvr2_ema_3b.pth',
87
+ #'dit_7b': 'https://huggingface.co/ByteDance-Seed/SeedVR2-7B/resolve/main/seedvr2_ema_7b.pth',
88
  'pos_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/pos_emb.pt',
89
  'neg_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/neg_emb.pt'
90
  }
91
  for name, url in model_urls.items():
92
  _load_file_from_url(url=url, model_dir=str(ckpt_dir))
93
+
94
  setup_flag.touch()
95
  logger.info("--- Setup Global do SeedVR Concluído ---")
96
  return True
97
 
98
  def initialize_runner(self, model_version: str):
99
+ """Carrega os modelos para a VRAM do dispositivo, usando um ambiente de GPU isolado."""
100
  if self.runner is not None: return
101
+
102
  os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu_index
103
+
104
  from projects.video_diffusion_sr.infer import VideoDiffusionInfer
105
  from common.config import load_config
106
+
107
  logger.info(f"Worker {self.global_device_id}: Inicializando runner... (Processo vê apenas {self.local_device_name})")
108
+
109
+ config_path_str = f'configs_{model_version.lower()}'
110
+ checkpoint_path_str = f'seedvr2_ema_{model_version.lower()}.pth'
111
+
112
+ config_path = APP_ROOT / config_path_str / 'main.yaml'
113
+ checkpoint_path = APP_ROOT / 'ckpts' / checkpoint_path_str
114
+
115
  config = load_config(str(config_path))
116
  self.runner = VideoDiffusionInfer(config)
117
  OmegaConf.set_readonly(self.runner.config, False)
118
+
119
  self.runner.configure_dit_model(device=self.local_device_name, checkpoint=str(checkpoint_path))
120
  self.runner.configure_vae_model()
121
+
122
  self.is_initialized = True
123
  logger.info(f"Worker {self.global_device_id}: Runner pronto na VRAM.")
124
 
125
  def unload_runner(self):
126
+ """Descarrega os modelos da VRAM e limpa o ambiente."""
127
  if self.runner is not None:
128
  del self.runner
129
  self.runner = None
 
131
  torch.cuda.empty_cache()
132
  self.is_initialized = False
133
  logger.info(f"Worker {self.global_device_id}: Runner descarregado da VRAM.")
134
+
135
  if 'CUDA_VISIBLE_DEVICES' in os.environ:
136
  del os.environ['CUDA_VISIBLE_DEVICES']
137
 
138
  def process_video_internal(self, input_video_path, output_video_path, prompt, model_version, steps, seed):
139
+ """Executa a inferência em um ambiente de GPU isolado."""
140
  os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu_index
141
  device = torch.device(self.local_device_name)
142
+
143
  from common.seed import set_seed
144
  from data.image.transforms.divisible_crop import DivisibleCrop
145
  from data.image.transforms.na_resize import NaResize
 
147
  from projects.video_diffusion_sr.color_fix import wavelet_reconstruction
148
  from torchvision.transforms import Compose, Lambda, Normalize
149
  from torchvision.io.video import read_video
150
+
151
  set_seed(seed, same_across_ranks=True)
152
  self.runner.config.diffusion.timesteps.sampling.steps = steps
153
  self.runner.configure_diffusion()
154
+
155
  video_tensor = read_video(input_video_path, output_format="TCHW")[0] / 255.0
156
  res_h, res_w = video_tensor.shape[-2:]
157
  video_transform = Compose([
158
  NaResize(resolution=(res_h * res_w) ** 0.5, mode="area", downsample_only=False),
159
  Lambda(lambda x: torch.clamp(x, 0.0, 1.0)),
160
+ DivisibleCrop((16, 16)),
161
+ Normalize(0.5, 0.5),
162
+ Rearrange("t c h w -> c t h w"),
163
  ])
164
  cond_latents = [video_transform(video_tensor.to(device))]
165
+ self.runner.dit.to("cpu")
166
+ self.runner.vae.to(device)
167
  cond_latents = self.runner.vae_encode(cond_latents)
168
+ self.runner.vae.to("cpu"); gc.collect(); torch.cuda.empty_cache()
169
+ self.runner.dit.to(device)
170
+
171
  pos_emb = torch.load(APP_ROOT / 'ckpts' / 'pos_emb.pt').to(device)
172
  neg_emb = torch.load(APP_ROOT / 'ckpts' / 'neg_emb.pt').to(device)
173
  text_embeds_dict = {"texts_pos": [pos_emb], "texts_neg": [neg_emb]}
174
+
175
  noises = [torch.randn_like(latent) for latent in cond_latents]
176
  conditions = [self.runner.get_condition(noise, latent_blur=latent, task="sr") for noise, latent in zip(noises, cond_latents)]
177
+
178
  with torch.no_grad(), torch.autocast("cuda", torch.bfloat16, enabled=True):
179
  video_tensors = self.runner.inference(noises=noises, conditions=conditions, dit_offload=True, **text_embeds_dict)
180
+
181
+ self.runner.dit.to("cpu"); gc.collect(); torch.cuda.empty_cache()
182
+ self.runner.vae.to(device)
183
  samples = self.runner.vae_decode(video_tensors)
184
+ final_sample = samples[0]
185
+ input_video_sample = cond_latents[0] # Usar o latente de condição como base
186
  if final_sample.shape[1] < input_video_sample.shape[1]:
187
  input_video_sample = input_video_sample[:, :final_sample.shape[1]]
188
+
189
  final_sample = wavelet_reconstruction(rearrange(final_sample, "c t h w -> t c h w"), rearrange(input_video_sample, "c t h w -> t c h w"))
190
  final_sample = rearrange(final_sample, "t c h w -> t h w c")
191
  final_sample = final_sample.clip(-1, 1).mul_(0.5).add_(0.5).mul_(255).round()
192
  final_sample_np = final_sample.to(torch.uint8).cpu().numpy()
193
+
194
  mediapy.write_video(output_video_path, final_sample_np, fps=24)
195
+
196
+ if 'CUDA_VISIBLE_DEVICES' in os.environ:
197
+ del os.environ['CUDA_VISIBLE_DEVICES']
198
+
199
  return output_video_path
200
 
201
  class SeedVrPoolManager:
202
+ """Gerencia um pool de SeedVrWorkers para processamento em GPUs dedicadas."""
203
  def __init__(self, device_ids: list[str]):
204
  logger.info(f"SEEDVR POOL MANAGER: Criando workers para os dispositivos: {device_ids}")
205
  if not device_ids or 'cpu' in device_ids:
 
210
  self.last_cleanup_thread = None
211
 
212
  def _cleanup_worker_thread(self, worker: SeedVrWorker):
213
+ """Thread para descarregar o worker em segundo plano."""
214
  logger.info(f"SEEDVR CLEANUP THREAD: Iniciando limpeza de {worker.global_device_id} em background...")
215
  worker.unload_runner()
216
 
217
  def process_video(self, input_video_path: str, output_video_path: str, prompt: str,
218
+ model_version: str = '3B', steps: int = 100, seed: int = 666) -> str:
 
219
  worker_to_use = None
220
  try:
221
  with self.lock:
222
  if self.last_cleanup_thread and self.last_cleanup_thread.is_alive():
223
  self.last_cleanup_thread.join()
224
+
225
  worker_to_use = self.workers[self.current_worker_index]
226
  previous_worker_index = (self.current_worker_index - 1 + len(self.workers)) % len(self.workers)
227
  worker_to_cleanup = self.workers[previous_worker_index]
228
+
229
  cleanup_thread = threading.Thread(target=self._cleanup_worker_thread, args=(worker_to_cleanup,))
230
  cleanup_thread.start()
231
  self.last_cleanup_thread = cleanup_thread
232
+
233
  worker_to_use.initialize_runner(model_version)
234
+
235
  self.current_worker_index = (self.current_worker_index + 1) % len(self.workers)
236
+
237
  logger.info(f"SEEDVR POOL MANAGER: Processando vídeo na GPU {worker_to_use.global_device_id}...")
238
  return worker_to_use.process_video_internal(
239
  input_video_path, output_video_path, prompt, model_version, steps, seed
 
250
  download_url_to_file(url, cached_file, hash_prefix=None, progress=True)
251
  return cached_file
252
 
253
+ # --- Instanciação Singleton ---
254
  class SeedVrPlaceholder:
255
  def process_video(self, input_video_path, *args, **kwargs):
256
  logger.warning("SeedVR está desabilitado (gpus_required: 0). Pulando etapa de masterização HD.")
257
  return input_video_path
258
 
259
  try:
260
+ with open("config.yaml", 'r') as f:
261
+ config = yaml.safe_load(f)
262
  seedvr_gpus_required = config['specialists'].get('seedvr', {}).get('gpus_required', 0)
263
  seedvr_device_ids = hardware_manager.allocate_gpus('SeedVR', seedvr_gpus_required)
264
  if seedvr_gpus_required > 0 and 'cpu' not in seedvr_device_ids: