# ===== app_demo.py - compaTAI ===== import torch import cv2 import gradio as gr from pathlib import Path from utils.video_processor import VideoProcessor from utils.config import PROCESSING, CHAOS_THRESHOLDS, CDMX_BEHAVIORS import spaces class DemoComponent: def __init__(self): # No inicializamos GPU aquí — VideoProcessor.process tiene @spaces.GPU self.processor = VideoProcessor() # NOTE: quitamos @spaces.GPU de aquí para evitar doble inicialización en ZeroGPU def process_video(self, video_path, max_frames=30): """ Versión optimizada para ZeroGPU con: - Manejo robusto de errores CUDA - Validación mejorada de archivos - Fallback automático a CPU - Compatibilidad con espacios limitados de GPU """ print(f"\n{'='*40}") print(f"[APP] Procesando video: {video_path} (max_frames={max_frames})") # Validación inicial reforzada try: video_path = str(video_path) # Asegurar formato string if not video_path or not Path(video_path).exists(): return None, "", "❌ Video no encontrado o inválido" # Pre-chequeo de memoria GPU si está disponible gpu_available = torch.cuda.is_available() if gpu_available: try: torch.zeros(1).cuda() # Test simple de GPU except RuntimeError: gpu_available = False print("[WARNING] GPU detectada pero no accesible") # Intento principal de procesamiento try: result = self.processor.process(video_path, max_frames=max_frames) except RuntimeError as e: if "CUDA" in str(e) or "cuda" in str(e).lower(): print("[FALLBACK] Error de GPU, intentando con CPU") self.processor.detector = TrafficDetector(device='cpu') result = self.processor.process(video_path, max_frames=max_frames) else: raise # Validación de resultados estricta if not isinstance(result, dict): return None, "", "❌ Formato de resultado inválido" if not result.get("success", False): return None, "", f"❌ {result.get('error', 'Procesamiento fallido')}" # Manejo de rutas de salida output_path = next( (result[k] for k in ['output_path', 'video_path', 'output'] if k in result), None ) if not output_path or not Path(output_path).exists(): return None, "", "❌ Archivo de salida no generado" # Verificación de video legible try: cap = cv2.VideoCapture(output_path) if not cap.isOpened(): return None, "", "❌ Video de salida corrupto" cap.release() except Exception as e: print(f"[WARNING] Error validando video: {str(e)}") return None, "", "❌ Error validando resultado" # Generación de estadísticas stats_md = self._generate_stats(result.get("analytics", {})) or \ "⚠️ Video procesado (métricas no disponibles)" print(f"[SUCCESS] Procesamiento completado") print(f"{'='*40}\n") return output_path, stats_md, "" except torch.cuda.OutOfMemoryError: return None, "", "🚨 Memoria insuficiente. Reduce los frames o el tamaño del video" except RuntimeError as e: print(f"[RUNTIME ERROR] {str(e)}") return None, "", f"❌ Error del sistema: {str(e)}" except Exception as e: print(f"[UNEXPECTED ERROR] {str(e)}") return None, "", f"❌ Error inesperado: {str(e)}" def _generate_stats(self, analytics): # defensivo: evitar KeyError si analytics parcial frames = analytics.get('frames_processed', 0) total_objects = analytics.get('total_objects', 0) avg_per = analytics.get('avg_objects_per_frame', 0.0) avg_chaos = analytics.get('avg_chaos_score', 0.0) stats = f""" 📊 **compaTAI TRAFFIC ANALYSIS REPORT** 🎯 **Object Detection:** • Frames processed: {frames} • Total objects: {total_objects} • Average per frame: {avg_per:.1f} ⚠️ **CDMX Chaos Level: {avg_chaos:.2f}/100** {self._get_chaos_interpretation(avg_chaos)} 🇲🇽 **CDMX-Specific Behaviors Detected:** {self._format_cdmx_behaviors(analytics.get('cdmx_behaviors', {}))} """.strip() return stats def _format_cdmx_behaviors(self, behaviors): if not behaviors: return " - Ninguno detectado" # behaviors puede ser lista o dict; manejamos ambos if isinstance(behaviors, dict): return "\n".join([f" - {b}: {c}" for b, c in behaviors.items()]) if isinstance(behaviors, list): return "\n".join([f" - {b}" for b in behaviors]) return str(behaviors) def _get_chaos_interpretation(self, score): try: if score < CHAOS_THRESHOLDS['low']: return "🟢 Orderly traffic - Current robotaxis might work" elif score < CHAOS_THRESHOLDS['medium']: return "🟡 Moderate chaos - Requires minor adaptations" elif score < CHAOS_THRESHOLDS['high']: return "🟠 CDMX-typical chaos - Requires specialized AI" else: return "🔴 Extreme chaos - Only advanced cultural AI can handle" except Exception: return "N/A" def create_interface(self): EXAMPLES_DIR = Path(__file__).parent / "examples" example_files = [str(f) for f in EXAMPLES_DIR.glob("*.mp4")] with gr.Blocks(title="CDMX Traffic Analysis") as interface: with gr.Row(): with gr.Column(): gr.Markdown("### 📹 Upload or select a CDMX traffic video") video_input = gr.Video(label="🎥 Select video file") if example_files: gr.Examples( examples=example_files, inputs=[video_input], label="🎞️ Try Example Videos" ) analyze_btn = gr.Button("Analyze Traffic", variant="primary") with gr.Column(): gr.Markdown("### 📊 Analysis Results") video_output = gr.Video(label="AI Analysis Results", height=400) with gr.Accordion("📝 Detailed Report", open=False): stats_output = gr.Markdown() error_output = gr.Textbox(label="Status", visible=False) # Llamamos con lambda para pasar siempre max_frames (evita warnings de Gradio) analyze_btn.click( fn=lambda video: self.process_video(video, PROCESSING.get('max_frames', 30)), inputs=[video_input], outputs=[video_output, stats_output, error_output] ) return interface