from __future__ import annotations
import html
from collections import deque
from threading import Lock
from typing import Any, Callable, Deque, Dict, Optional, Tuple
from queue import Queue, LifoQueue
import numpy as np
import os
import gradio as gr # type: ignore
from third_party_tools.text_to_audio_file import text_to_audio_file
from .cv_interface import CVInterface
from .render_plan_html import render_plan_html
should_narrate_events = os.getenv("SHOULD_NARRATE_EVENTS", "False").lower() == "true"
new_events_check_interval_seconds = 3
class EastSyncInterface:
"""
EASTSYNC ENTERPRISE INTERFACE
Aesthetic: High Contrast / Dark Mode / Professional Analytics.
"""
SAMPLE_PROMPT = (
"PROJECT: Data Analytics Dashboard\n"
"SCOPE: Develop a real-time visualization layer for regional sales data.\n"
"TEAM: Data Science Team Alpha (2 Juniors, 1 Senior).\n"
"OBJECTIVE: Analyze current team capabilities and generate a training roadmap to close skill gaps."
)
def __init__(self):
self._action_log: Deque[str] = deque(maxlen=200)
self._action_log_lock = Lock()
# Queues for live audio narration
self.audio_queue: Queue[Tuple[int, np.ndarray]] = Queue()
self.event_queue: Queue[str] = LifoQueue()
self.init_message = (
'
>> SYSTEM INITIALIZED. WAITING FOR PROJECT INPUT...
'
)
self._app_css = self._compose_css()
self._cv_interface = CVInterface(self)
self._analysis_result: Optional[Any] = None # Store analysis result for async updates
self._analysis_error: Optional[str] = None # Store analysis error if any
self._analysis_running: bool = False # Track if analysis is currently running
self._cached_processing_state: Optional[str] = None # Cache processing state HTML
# Dynamic processing steps tracking
self._processing_steps: list[str] = [] # Current processing steps
self._processing_steps_lock = Lock()
self._processing_mode: Optional[str] = None # "project", "extract", or "match"
# ---------------------- HELPER METHODS ----------------------
def start_processing(self, mode: str):
"""Start processing mode and reset steps. Mode: 'project', 'extract', or 'match'."""
with self._processing_steps_lock:
self._processing_steps = []
self._processing_mode = mode
self._analysis_running = True
self._analysis_result = None
self._analysis_error = None
self._cached_processing_state = None
def stop_processing(self):
"""Stop processing mode and clear steps."""
with self._processing_steps_lock:
self._processing_mode = None
self._analysis_running = False
def add_processing_step(self, step: str):
"""Add a new processing step to the dynamic list."""
with self._processing_steps_lock:
# Avoid duplicates
if step not in self._processing_steps:
self._processing_steps.append(step)
# Invalidate cache so next render picks up new steps
self._cached_processing_state = None
def get_processing_steps(self) -> list[str]:
"""Get current processing steps."""
with self._processing_steps_lock:
return list(self._processing_steps)
def register_agent_action(self, action: str, args: Optional[Dict[str, Any]] = None):
import datetime
timestamp = datetime.datetime.now().strftime("%H:%M:%S")
with self._action_log_lock:
# Keep the high-contrast aesthetic but clean up the formatting
msg = f'{timestamp} >> {html.escape(str(action))}'
if args:
args_str = str(args)
if len(args_str) > 80:
args_str = args_str[:80] + "..."
msg += f' :: {html.escape(args_str)}'
self._action_log.appendleft(f'{msg}
')
# Add to processing steps if we're in processing mode (check INSIDE lock to avoid race condition)
with self._processing_steps_lock:
if self._processing_mode is not None:
action_str = str(action)
if action_str not in self._processing_steps:
self._processing_steps.append(action_str)
# Push to event queue for narrator
self.event_queue.put_nowait(f"{action_str} {args if args else ''}")
self._cached_processing_state = None
def get_action_log_text(self) -> str:
with self._action_log_lock:
body = "".join(self._action_log) if self._action_log else self.init_message
return f'{body}
'
def clear_action_log(self) -> str:
with self._action_log_lock:
self._action_log.clear()
return self.get_action_log_text()
def render_analysis_result(self, result: Any) -> str:
"""Render the analysis result as HTML only. Audio is handled by narrator."""
html_out = render_plan_html(result)
return html_out
def set_analysis_result(self, result: Any):
"""Store analysis result for async display."""
self._analysis_result = result
self._analysis_error = None
def set_analysis_error(self, error: str):
"""Store analysis error for async display."""
self._analysis_error = error
self._analysis_result = None
def get_analysis_output(self) -> Optional[str]:
"""Get the current analysis output (result, error, or processing state).
Returns None if no update is needed."""
# Check result/error first (these are set by the agent thread)
if self._analysis_result is not None:
self.stop_processing() # Mark as complete
return self.render_analysis_result(self._analysis_result)
elif self._analysis_error is not None:
self.stop_processing() # Mark as complete
return self.render_error_state(self._analysis_error)
# Check processing mode inside the lock for thread safety
with self._processing_steps_lock:
mode = self._processing_mode
is_running = self._analysis_running
# If not running and no result/error, don't update
if not is_running:
return None
# Render dynamic processing state based on mode
if mode == "project":
return self.render_project_processing_state()
elif mode in ("extract", "match"):
return self.render_processing_state(mode)
elif mode is not None:
return self.render_project_processing_state() # fallback
else:
return None # No processing mode set
summary_text = result.get('corny_summary', '')
audio_path = text_to_audio_file(summary_text)
is_audio = audio_path is not None
audio_out = gr.update(value=audio_path, visible=is_audio)
html_out = render_plan_html(result)
return html_out, audio_out
def render_idle_state(self) -> str:
# The style is handled inside render_plan_html.py CSS for consistency.
return "// ENTER PROJECT DETAILS TO GENERATE ROADMAP
"
def render_error_state(self, reason: str) -> str:
safe_reason = html.escape(reason)
return f"""
""", None
def reset_prompt_value(self) -> str:
return self.SAMPLE_PROMPT
def render_project_processing_state(self) -> str:
"""Render animated processing state for project analysis with dynamic steps."""
# Get current processing steps (dynamic)
current_steps = self.get_processing_steps()
# Build steps HTML - show only actual steps that have been added
if current_steps:
steps_html = ""
# Current step (in progress) - with animation - ON TOP
current_step = current_steps[-1]
steps_html += f'⏳ {html.escape(current_step)}
'
# Previous steps (completed) - WHITE text - REVERSED (Newest first)
steps_html += "".join([
f'✓ {html.escape(step)}
'
for step in reversed(current_steps[:-1])
])
else:
steps_html = '⏳ Initializing analysis...
'
step_count = len(current_steps) if current_steps else 0
return f"""
🚀 INITIATING ANALYSIS
Analyzing project requirements and generating deployment plan... Monitor system logs for real-time updates.
ANALYSIS PIPELINE:
{step_count} step{"s" if step_count != 1 else ""}
{steps_html}
⏱️ Estimated Time: 45-90 seconds (depending on project complexity)
""", None
def render_processing_state(self, mode: str = "extract") -> str:
"""Render animated processing state for CV analysis with dynamic steps."""
title = "📊 EXTRACTING SKILLS" if mode == "extract" else "🎯 ANALYZING CV + MATCHING PROJECTS"
# Get current processing steps (dynamic)
current_steps = self.get_processing_steps()
# Build steps HTML - show only actual steps that have been added
if current_steps:
steps_html = ""
# Current step (in progress) - with animation - ON TOP
current_step = current_steps[-1]
steps_html += f'⏳ {html.escape(current_step)}
'
# Previous steps (completed) - WHITE text - REVERSED (Newest first)
steps_html += "".join([
f'✓ {html.escape(step)}
'
for step in reversed(current_steps[:-1])
])
else:
steps_html = '⏳ Initializing...
'
step_count = len(current_steps) if current_steps else 0
return f"""
{title}
Processing document... Please monitor the system logs for real-time updates.
PROCESSING PIPELINE:
{step_count} step{"s" if step_count != 1 else ""}
{steps_html}
⏱️ Estimated Time: {('20-30 seconds' if mode == 'extract' else '30-45 seconds')}
"""
# ---------------------- TACTICAL CSS ----------------------
def _token_css(self) -> str:
return """
:root {
/* ARC RAIDERS SPECTRUM PALETTE */
--arc-red: #FF2A2A;
--arc-orange: #FF7F00;
--arc-yellow: #FFD400;
--arc-green: #55FF00;
--arc-cyan: #00FFFF;
/* SURFACES */
--bg-void: #090B10; /* Deep Black/Navy */
--bg-panel: #12141A; /* Slightly lighter panel */
--bg-card: #181B24; /* Card background */
/* TEXT - IMPROVED CONTRAST */
--text-main: #FFFFFF; /* Pure White for readability */
--text-dim: #AABBC9; /* Lighter grey for secondary text */
/* BORDERS */
--border-dim: #2A303C;
--border-bright: #FF7F00;
/* FONTS */
--font-header: "Inter", "Segoe UI", sans-serif;
--font-mono: "JetBrains Mono", "Consolas", monospace;
}
"""
def _base_typography_css(self) -> str:
return """
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700;800&family=JetBrains+Mono:wght@400;500;700&display=swap');
body {
background-color: var(--bg-void);
color: var(--text-main);
font-family: var(--font-header);
font-size: 16px;
line-height: 1.5;
-webkit-font-smoothing: antialiased;
margin: 0;
padding: 0;
min-height: 100vh;
}
/* FORCE FULL HEIGHT ON GRADIO CONTAINERS */
.gradio-container {
max-width: 1920px !important;
padding: 0 !important;
min-height: 100vh !important;
background-color: var(--bg-void); /* Ensure bg extends even if content is short */
}
/* Fix for the prose/markdown wrapper messing up heights */
.prose {
max-width: none !important;
}
h1, h2, h3, h4 {
font-family: var(--font-header);
letter-spacing: -0.02em;
font-weight: 800;
color: white;
}
/* COMPONENT OVERRIDES */
.gr-button {
border-radius: 2px !important;
font-family: var(--font-header) !important;
font-weight: 700 !important;
text-transform: uppercase;
letter-spacing: 1px;
font-size: 14px !important;
padding: 10px 16px !important;
}
.gr-box, .gr-panel, .gr-group {
border-radius: 2px !important;
border: 1px solid var(--border-dim) !important;
background: var(--bg-panel) !important;
}
.gr-input, textarea {
background: #0D1017 !important;
border: 1px solid var(--border-dim) !important;
color: var(--text-main) !important;
font-family: var(--font-mono) !important;
font-size: 15px !important;
line-height: 1.6 !important;
}
.gr-form { background: transparent !important; }
.gr-block { background: transparent !important; border: none !important; }
span.svelte-1gfkn6j { font-size: 13px !important; font-weight: 600 !important; color: var(--arc-yellow) !important; }
"""
def _components_css(self) -> str:
return """
/* --- TOP SPECTRUM STRIPE --- */
.status-bar-spectrum {
height: 6px;
width: 100%;
background: linear-gradient(90deg,
var(--arc-red) 0%,
var(--arc-orange) 25%,
var(--arc-yellow) 50%,
var(--arc-green) 75%,
var(--arc-cyan) 100%);
box-shadow: 0 2px 15px rgba(255, 127, 0, 0.3);
}
/* --- BUTTON VARIANTS --- */
.btn-tac-primary {
background: var(--arc-orange) !important;
color: #000 !important;
border: 1px solid var(--arc-orange) !important;
}
.btn-tac-primary:hover {
background: #FF9500 !important;
box-shadow: 0 0 15px rgba(255, 127, 0, 0.5);
}
.btn-tac-secondary {
background: transparent !important;
border: 1px solid var(--border-dim) !important;
color: var(--text-dim) !important;
}
.btn-tac-secondary:hover {
border-color: var(--text-main) !important;
color: var(--text-main) !important;
background: rgba(255,255,255,0.05) !important;
}
/* --- LAYOUT PANELS --- */
/* Use fill_height=True in Gradio Blocks, but CSS reinforces it */
.main-container {
min-height: calc(100vh - 80px); /* Account for header height approx */
display: flex;
align-items: stretch;
}
.input-panel {
padding: 32px;
border-right: 1px solid var(--border-dim);
background: var(--bg-panel);
height: auto !important; /* Let it grow */
min-height: 100%;
}
.output-panel {
padding: 32px;
background: #0C0E14; /* Darker background for content */
height: auto !important;
min-height: 100%;
flex-grow: 1;
}
.ent-header-label {
font-size: 13px;
color: var(--arc-yellow);
text-transform: uppercase;
letter-spacing: 1.5px;
margin-bottom: 12px;
font-weight: 700;
display: flex;
align-items: center;
gap: 8px;
}
.ent-header-label::before {
content: "";
display: block;
width: 4px; height: 16px;
background: var(--arc-yellow);
box-shadow: 0 0 8px var(--arc-yellow);
}
/* --- ERROR STATE --- */
.error-container {
border: 1px solid var(--arc-red);
background: rgba(255, 42, 42, 0.05);
border-left: 4px solid var(--arc-red);
padding: 0;
margin-top: 20px;
font-family: var(--font-mono);
}
.error-header {
background: rgba(255, 42, 42, 0.1);
padding: 12px 20px;
color: var(--arc-red);
font-weight: 700;
border-bottom: 1px solid rgba(255, 42, 42, 0.2);
display: flex;
align-items: center;
gap: 10px;
letter-spacing: 1px;
}
.error-body {
padding: 20px;
color: var(--text-main);
font-size: 14px;
line-height: 1.6;
}
"""
def _console_css(self) -> str:
return """
.console-wrapper {
background: #08090D;
border: 1px solid var(--border-dim);
padding: 16px;
font-family: var(--font-mono);
font-size: 13px;
min-height: 300px;
max-height: 40vh;
overflow-y: auto;
color: var(--text-main);
}
.console-line {
margin-bottom: 8px;
border-bottom: 1px solid rgba(255,255,255,0.05);
padding-bottom: 4px;
line-height: 1.4;
}
.console-timestamp { color: var(--arc-cyan); margin-right: 8px; font-weight:600; }
.console-wrapper::-webkit-scrollbar { width: 8px; }
.console-wrapper::-webkit-scrollbar-track { background: #08090D; }
.console-wrapper::-webkit-scrollbar-thumb { background: var(--border-dim); border-radius: 4px; }
/* DISABLE GRADIO DEFAULT LOADING OVERLAY */
.generating {
display: none !important;
}
.pending {
opacity: 1 !important;
}
.eta-bar {
display: none !important;
}
/* LIVE NARRATION STYLING */
/* Target the audio component's container */
audio {
width: 100% !important;
background: #1A1D24 !important;
border-radius: 4px !important;
border: 1px solid rgba(255, 127, 0, 0.4) !important;
}
/* Style the audio player controls */
audio::-webkit-media-controls-panel {
background: linear-gradient(to bottom, rgba(30, 30, 40, 0.9), rgba(20, 20, 30, 0.95)) !important;
border-radius: 4px !important;
}
audio::-webkit-media-controls-play-button,
audio::-webkit-media-controls-mute-button {
border-radius: 50% !important;
}
audio::-webkit-media-controls-timeline {
border-radius: 2px !important;
height: 6px !important;
}
audio::-webkit-media-controls-current-time-display,
audio::-webkit-media-controls-time-remaining-display {
color: #FFA94D !important;
font-family: var(--font-mono) !important;
font-size: 12px !important;
font-weight: 600 !important;
text-shadow: 0 0 3px rgba(255, 127, 0, 0.4) !important;
}
/* Add glow effect to live narration container */
.live-narration-wrapper {
padding: 16px;
background: linear-gradient(135deg, rgba(26, 29, 36, 0.8), rgba(20, 23, 30, 0.9));
border: 2px solid var(--arc-orange);
border-radius: 4px;
box-shadow: 0 0 20px rgba(255, 127, 0, 0.3);
margin-bottom: 20px;
}
.live-narration-label {
color: var(--arc-orange);
background: rgb(18, 20, 26);
font-size: 12px;
font-weight: 700;
text-transform: uppercase;
letter-spacing: 1.5px;
padding-bottom: 8px;
display: flex;
align-items: center;
gap: 8px;
}
.live-narration-label::before {
content: "";
display: inline-block;
width: 8px;
height: 8px;
background: var(--arc-red);
border-radius: 50%;
animation: pulse-red 1.5s ease-in-out infinite;
}
@keyframes pulse-red {
0%, 100% { opacity: 1; box-shadow: 0 0 8px var(--arc-red); }
50% { opacity: 0.5; box-shadow: 0 0 4px var(--arc-red); }
}
"""
def _compose_css(self) -> str:
return "\n".join([
self._token_css(),
self._base_typography_css(),
self._components_css(),
self._console_css(),
])
# --- UI Builders ---
def _build_hero(self) -> str:
return """
E
EASTSYNC ENTERPRISE
CAPABILITY INTELLIGENCE PLATFORM
● SYSTEM ONLINE
VER: 4.2.0-ENT
"""
def build_interface(self, analyze_callback: Callable[[str], str], cancel_run_callback: Callable[[], None], start_audio_stream_callback: Callable[[], Any]) -> gr.Blocks:
theme = gr.themes.Base(
primary_hue="orange",
neutral_hue="slate",
)
# Use fill_height=True on Blocks to encourage full-screen layout
with gr.Blocks(theme=theme, css=self._app_css, title="EastSync Enterprise", fill_height=True) as demo:
gr.HTML(self._build_hero())
# Live Narration with custom wrapper
with gr.Group(elem_classes=["live-narration-wrapper"]) as live_narration_group:
gr.HTML('🔴 LIVE AI NARRATION
')
live_audio = gr.Audio(
label="",
streaming=True,
autoplay=True,
buttons=None,
visible=should_narrate_events,
show_label=False,
elem_id="live-narrator-audio"
)
# Hide the wrapper if narration is disabled
if not should_narrate_events:
live_narration_group.visible = False
with gr.Row(equal_height=True, elem_classes=["main-container"]):
# --- LEFT COLUMN: INPUTS ---
with gr.Column(scale=3, elem_classes=["input-panel"]) as mission_panel:
gr.HTML("")
input_box = gr.TextArea(
label="PROJECT REQUIREMENTS",
show_label=False,
value=self.SAMPLE_PROMPT,
lines=12,
placeholder="Define project scope, technical requirements, and current team composition..."
)
with gr.Row():
btn_run = gr.Button("GENERATE ROADMAP", elem_classes=["btn-tac-primary"])
with gr.Row():
btn_reset = gr.Button("RESET FORM", elem_classes=["btn-tac-secondary"])
with gr.Row():
btn_cancel = gr.Button("STOP ANALYSIS", elem_classes=["btn-tac-secondary"])
with gr.Row():
btn_cv = gr.Button("📄 CV ANALYSIS", elem_classes=["btn-tac-primary"])
gr.HTML("") # Flexible Spacer
gr.HTML("")
console = gr.HTML(self.get_action_log_text())
# --- LEFT COLUMN: CV UPLOAD (hidden by default) ---
with gr.Column(scale=3, elem_classes=["input-panel"], visible=False) as cv_upload_panel:
gr.HTML("")
gr.HTML("""
📊 EXTRACT SKILLS
Parse CV to identify technical skills, experience, certifications.
🎯 EXTRACT + MATCH
Parse CV, rank projects, identify skill gaps.
""")
cv_file_input = gr.File(
label="SELECT CV FILE",
file_types=[".pdf", ".docx", ".doc"],
type="filepath"
)
with gr.Row():
btn_process_cv = gr.Button("📊 EXTRACT SKILLS", elem_classes=["btn-tac-secondary"])
btn_process_cv_match = gr.Button("🎯 EXTRACT + MATCH PROJECTS", elem_classes=["btn-tac-primary"])
with gr.Row():
btn_close_cv = gr.Button("← BACK", elem_classes=["btn-tac-secondary"])
gr.HTML("")
gr.HTML("")
console_cv = gr.HTML(self.get_action_log_text())
# --- RIGHT COLUMN: OUTPUT ---
with gr.Column(scale=7, elem_classes=["output-panel"]):
output_display = gr.HTML(self.render_idle_state())
# --- Event Bindings ---
# Project Analysis
def start_project_analysis():
self.start_processing("project")
return self.render_project_processing_state()
# Trigger audio stream independently so it doesn't block analysis
btn_run.click(start_audio_stream_callback, outputs=live_audio)
btn_run.click(
start_project_analysis,
outputs=output_display,
queue=False
).then(
self.clear_action_log, outputs=console, queue=False
).then(
lambda: self.get_action_log_text(), outputs=console
).then(
analyze_callback, inputs=input_box, outputs=output_display
).then(
self.get_action_log_text, outputs=console
)
btn_reset.click(self.reset_prompt_value, outputs=input_box)
# CV Button - Toggle panels
def show_cv_interface():
return (
gr.update(visible=False), # Hide mission_panel
gr.update(visible=True), # Show cv_upload_panel
self._cv_interface.render_cv_upload_interface() # Update output_display
)
btn_cv.click(
show_cv_interface,
outputs=[mission_panel, cv_upload_panel, output_display]
)
# Close CV Section - Back to mission panel
def close_cv_interface():
return (
gr.update(visible=True), # Show mission_panel
gr.update(visible=False), # Hide cv_upload_panel
self.render_idle_state() # Reset output_display
)
btn_close_cv.click(
close_cv_interface,
outputs=[mission_panel, cv_upload_panel, output_display]
)
# Standard CV analysis (no project matching)
def start_cv_extract():
self.start_processing("extract")
return self.render_processing_state("extract")
def finish_cv_processing():
self.stop_processing()
return self.get_action_log_text()
btn_process_cv.click(
start_cv_extract,
outputs=output_display,
queue=False
).then(
self.clear_action_log, outputs=console_cv, queue=False
).then(
self._cv_interface.process_cv_upload,
inputs=cv_file_input,
outputs=output_display
).then(
finish_cv_processing, outputs=console_cv
)
# CV + Project Matching
def start_cv_match():
self.start_processing("match")
return self.render_processing_state("match")
btn_process_cv_match.click(
start_cv_match,
outputs=output_display,
queue=False
).then(
self.clear_action_log, outputs=console_cv, queue=False
).then(
self._cv_interface.process_cv_with_matching,
inputs=cv_file_input,
outputs=output_display
).then(
finish_cv_processing, outputs=console_cv
)
btn_cancel.click(cancel_run_callback)
# Live log updates (both consoles)
def update_both_consoles():
log_text = self.get_action_log_text()
return (log_text, log_text) # Return same log for both consoles
gr.Timer(new_events_check_interval_seconds).tick(update_both_consoles, outputs=[console, console_cv])
# Check for analysis result updates (poll every 1 second)
def check_analysis_result():
"""Check if analysis is complete and update output display."""
output = self.get_analysis_output()
# Only return if there's an update (None means no update needed)
if output is not None:
return output
# Return None to skip update (Gradio will handle this)
return None
# Poll for results - Gradio will skip None returns
def poll_with_skip():
result = check_analysis_result()
# Return the result if not None, otherwise skip update
return result if result is not None else gr.update()
gr.Timer(new_events_check_interval_seconds).tick(poll_with_skip, outputs=output_display)
return demo