import os import gradio as gr import openai # --------------- PRESETS & FORMATS --------------- PRESETS = { "Executive Summary": { "system_role": ( "You are an executive briefing assistant for senior leaders. " "You distill complex material into clear, non-fluffy summaries that support decisions." ), "user_hint": "Paste a long report, transcript, or document here and I'll summarize it.", }, "Polished Email Reply": { "system_role": ( "You are a professional communications assistant. " "You write clear, concise, and respectful business emails suitable for executives." ), "user_hint": "Paste the email you received and notes on how you want to respond.", }, "Meeting Notes → Action Plan": { "system_role": ( "You transform messy meeting notes into a crisp, action-oriented summary." ), "user_hint": "Paste rough notes or bullet points from a meeting, even if they are messy.", }, "Idea Generator / Brainstorm": { "system_role": ( "You are a creative strategist. You generate structured ideas with next steps." ), "user_hint": "Describe what you want to build, improve, or launch. I'll propose ideas.", }, } # How each preset should format its output in Markdown PRESET_FORMAT_SPECS = { "Executive Summary": """ Output format spec (use GitHub-flavored Markdown): - Start with: `## Executive Summary` - Then add `### Context` with 2–4 sentences. - Add `### Key Points` as a bullet list (•) with the most important insights. - Add `### Recommendations` as a numbered list (1., 2., 3.) with concrete actions. - Use **bold** for critical terms and *italics* for nuance where helpful. """, "Polished Email Reply": """ Output format spec (use GitHub-flavored Markdown): - Start with: `## Email Draft` - Then include: - `**Subject:** ...` - A realistic greeting line (e.g., `Hi Alex,`) - Body in short paragraphs (2–4 sentences each). - Optional bullet points if listing items. - A professional closing (e.g., `Best,` or `Kind regards,`) and a placeholder name. - Do NOT include meta-comments about the email. Only output the email content. """, "Meeting Notes → Action Plan": """ Output format spec (use GitHub-flavored Markdown): - Start with: `## Meeting Summary` - Then sections: - `### Context` – 2–3 sentences on what the meeting was about. - `### Key Decisions` – bullet list. - `### Action Items` – numbered list, each item formatted like: `1. **Owner:** Name (or Generic Owner) — *Due:* date or timeframe — Task description` - `### Risks & Open Questions` – bullets with risks, blockers, or unknowns. """, "Idea Generator / Brainstorm": """ Output format spec (use GitHub-flavored Markdown): - Start with: `## Idea Set` - For each idea: - `### Idea N — Short Title` - Bullet list: - `**Overview:** ...` - `**Why it matters:** ...` - `**Next steps (1–3):** ...` - Aim for 3–5 strong ideas rather than many weak ones. """, } BASE_SYSTEM = """ You are the ZEN Promptboard Pro assistant. Rules: - Always respond in GitHub-flavored Markdown. - Follow any provided "Output format spec" exactly, especially headings and bullet structure. - Make the output immediately usable and ready to send or paste into a document. - Never explain what you are doing; do NOT include meta-instructions in the final output. """ # --------------- CORE CHAT LOGIC --------------- def run_completion(api_key, model, preset_name, system_prompt, user_prompt, history): """ Core call to the OpenAI Chat Completions API. - Does NOT throw on missing API key; returns a friendly message instead. - Uses model-agnostic parameters (no temperature / max_tokens) to avoid 400s. - Injects preset-specific formatting rules so output is richly structured. """ history = history or [] if not api_key: msg = "❌ No API key found. Paste your key and click **Save Key** first." history.append(("System", msg)) return history, msg if not user_prompt.strip(): msg = "⚠️ Please enter a prompt or paste some content before running." history.append(("System", msg)) return history, msg openai.api_key = api_key.strip() # Build system message combining base, preset role, and preset formatting system_parts = [BASE_SYSTEM] if preset_name in PRESETS: system_parts.append(f"Preset role description:\n{PRESETS[preset_name]['system_role']}") fmt = PRESET_FORMAT_SPECS.get(preset_name, "") if fmt: system_parts.append(fmt) if system_prompt.strip(): system_parts.append( "Additional system instructions from the user (apply after all above rules):\n" + system_prompt.strip() ) final_system = "\n\n".join(system_parts) messages = [ {"role": "system", "content": final_system}, {"role": "user", "content": user_prompt.strip()}, ] try: # Minimal API call: no temperature / max_* params to avoid model quirks response = openai.chat.completions.create( model=model.strip(), messages=messages, ) answer = (response.choices[0].message.content or "").strip() if not answer: answer = "⚠️ The model returned an empty response. Try again with more context." except Exception as e: answer = f"❌ Error while calling the model:\n\n`{e}`" history.append(("You", user_prompt)) history.append(("Assistant", answer)) return history, answer # --------------- HELPERS --------------- def save_api_key(raw_key: str): cleaned = (raw_key or "").strip() if not cleaned: raise gr.Error("🔐 Please paste a valid API key before saving.") if not cleaned.startswith("sk-"): gr.Info("Key saved, but it doesn’t start with 'sk-'. Double-check your provider format.") return cleaned, gr.update(value="", placeholder="API key saved ✔") def load_preset(preset_name: str): if not preset_name or preset_name not in PRESETS: return gr.update(), gr.update() preset = PRESETS[preset_name] return preset["system_role"], preset["user_hint"] # --------------- UI --------------- CUSTOM_CSS = """ #zen-root { background: radial-gradient(circle at top left, #020617 0, #020617 40%, #000000 100%); color: #f9fafb; font-family: 'Inter', system-ui, -apple-system, BlinkMacSystemFont, sans-serif; min-height: 100vh; padding-bottom: 2rem; } .zen-card { border-radius: 20px; background: linear-gradient(135deg, rgba(15,23,42,0.95), rgba(15,23,42,0.7)); border: 1px solid rgba(148,163,184,0.45); backdrop-filter: blur(22px); box-shadow: 0 24px 60px rgba(15,23,42,0.95), 0 0 0 1px rgba(15,23,42,0.85); } .zen-header { padding: 1.8rem 2rem; margin-bottom: 1.5rem; } .zen-pill { border-radius: 9999px; padding: 0.25rem 0.9rem; border: 1px solid rgba(129,140,248,0.7); color: #a5b4fc; font-size: 0.75rem; text-transform: uppercase; letter-spacing: 0.12em; } .zen-title { font-size: 1.9rem; font-weight: 700; letter-spacing: -0.03em; } .zen-subtitle { font-size: 0.9rem; color: #9ca3af; } #zen-output-card { padding: 1.3rem 1.6rem; } #zen-output-card .markdown-body h2 { margin-top: 0.2rem; } #zen-output-card .markdown-body h3 { margin-top: 0.9rem; } #zen-meta-card { padding: 0.75rem 1rem; font-size: 0.85rem; color: #e5e7eb; border-bottom: 1px solid rgba(148,163,184,0.35); } """ with gr.Blocks( css=CUSTOM_CSS, elem_id="zen-root", fill_height=True, title="ZEN Promptboard Pro — GPT-5", ) as demo: # Header gr.Markdown( """
ZEN VANGUARD • GPT-5 WORKSTATION
Professional AI Promptboard
Transform unstructured content into structured, ready-to-use executive artifacts — summaries, action plans, emails, and idea sets.
""", ) api_key_state = gr.State("") with gr.Row(equal_height=True): # LEFT: Controls with gr.Column(scale=1): with gr.Group(elem_classes=["zen-card"], elem_id="zen-left-card"): gr.Markdown("#### 1. Connect to your model") api_key_input = gr.Textbox( label="API Key", placeholder="Paste your GPT-style key here (not stored on server)", type="password", ) with gr.Row(): save_btn = gr.Button("Save Key", variant="primary", scale=1) key_status = gr.Markdown("Key not saved yet.") model_name = gr.Textbox( label="Model ID", value="gpt-5", info="Default: gpt-5. You can switch to gpt-4o, gpt-4.1-mini, etc.", ) gr.Markdown("---") gr.Markdown("#### 2. Choose a workflow") preset_radio = gr.Radio( label="Quick presets", choices=list(PRESETS.keys()), interactive=True, ) with gr.Group(elem_classes=["zen-card"]): gr.Markdown("#### 3. System behavior (optional)") system_box = gr.Textbox( label="Additional system / role instructions", placeholder="Optional: refine how the AI behaves (e.g., 'Write at director level, avoid buzzwords.')", lines=4, ) # RIGHT: Workspace with gr.Column(scale=2): with gr.Group(elem_classes=["zen-card"]): gr.Markdown("#### 4. Your content") user_box = gr.Textbox( label="Prompt / Notes / Source Content", placeholder="Paste meeting notes, drafts, transcripts, or describe what you want.", lines=9, ) run_btn = gr.Button("Run Prompt", variant="primary") with gr.Group(elem_classes=["zen-card"], elem_id="zen-output-wrapper"): meta_md = gr.Markdown( "Ready. Choose a preset and run your first prompt.", elem_id="zen-meta-card", ) with gr.Tab("Formatted Output"): output_md = gr.Markdown( "Output will appear here.", elem_id="zen-output-card", ) with gr.Tab("History"): history_chat = gr.Chatbot( label="Conversation History", height=320, type="tuples", # explicit, no deprecation warning show_copy_button=True, ) # --------------- WIRES / EVENTS --------------- def _save_and_ack(key: str): stored_key, placeholder_update = save_api_key(key) status_msg = "✅ API key stored for this session." return stored_key, status_msg, placeholder_update save_btn.click( _save_and_ack, inputs=[api_key_input], outputs=[api_key_state, key_status, api_key_input], ) preset_radio.change( fn=load_preset, inputs=[preset_radio], outputs=[system_box, user_box], ) def _run(api_state, model, preset_name, system, user, history): history, answer = run_completion( api_key=api_state, model=model, preset_name=preset_name, system_prompt=system, user_prompt=user, history=history, ) # Meta card text: which preset + which model preset_label = preset_name or "Custom" meta_text = f"**Preset:** {preset_label}  ·  **Model:** `{model.strip() or 'gpt-5'}`" return meta_text, history, answer run_btn.click( _run, inputs=[api_key_state, model_name, preset_radio, system_box, user_box, history_chat], outputs=[meta_md, history_chat, output_md], ) if __name__ == "__main__": demo.launch()