import os import gradio as gr import openai # --------------- PRESETS & FORMATS --------------- PRESETS = { "Executive Summary": { "system_role": ( "You are an executive briefing assistant for senior leaders. " "You distill complex material into clear, non-fluffy summaries that support decisions." ), "user_hint": "Paste a long report, transcript, or document here and I'll summarize it.", }, "Polished Email Reply": { "system_role": ( "You are a professional communications assistant. " "You write clear, concise, and respectful business emails suitable for executives." ), "user_hint": "Paste the email you received and notes on how you want to respond.", }, "Meeting Notes → Action Plan": { "system_role": ( "You transform messy meeting notes into a crisp, action-oriented summary." ), "user_hint": "Paste rough notes or bullet points from a meeting, even if they are messy.", }, "Idea Generator / Brainstorm": { "system_role": ( "You are a creative strategist. You generate structured ideas with next steps." ), "user_hint": "Describe what you want to build, improve, or launch. I'll propose ideas.", }, } # How each preset should format its output in Markdown PRESET_FORMAT_SPECS = { "Executive Summary": """ Output format spec (use GitHub-flavored Markdown): - Start with: `## Executive Summary` - Then add `### Context` with 2–4 sentences. - Add `### Key Points` as a bullet list (•) with the most important insights. - Add `### Recommendations` as a numbered list (1., 2., 3.) with concrete actions. - Use **bold** for critical terms and *italics* for nuance where helpful. """, "Polished Email Reply": """ Output format spec (use GitHub-flavored Markdown): - Start with: `## Email Draft` - Then include: - `**Subject:** ...` - A realistic greeting line (e.g., `Hi Alex,`) - Body in short paragraphs (2–4 sentences each). - Optional bullet points if listing items. - A professional closing (e.g., `Best,` or `Kind regards,`) and a placeholder name. - Do NOT include meta-comments about the email. Only output the email content. """, "Meeting Notes → Action Plan": """ Output format spec (use GitHub-flavored Markdown): - Start with: `## Meeting Summary` - Then sections: - `### Context` – 2–3 sentences on what the meeting was about. - `### Key Decisions` – bullet list. - `### Action Items` – numbered list, each item formatted like: `1. **Owner:** Name (or Generic Owner) — *Due:* date or timeframe — Task description` - `### Risks & Open Questions` – bullets with risks, blockers, or unknowns. """, "Idea Generator / Brainstorm": """ Output format spec (use GitHub-flavored Markdown): - Start with: `## Idea Set` - For each idea: - `### Idea N — Short Title` - Bullet list: - `**Overview:** ...` - `**Why it matters:** ...` - `**Next steps (1–3):** ...` - Aim for 3–5 strong ideas rather than many weak ones. """, } BASE_SYSTEM = """ You are the ZEN Promptboard Pro assistant. Rules: - Always respond in GitHub-flavored Markdown. - Follow any provided "Output format spec" exactly, especially headings and bullet structure. - Make the output immediately usable and ready to send or paste into a document. - Never explain what you are doing; do NOT include meta-instructions in the final output. """ # --------------- CORE CHAT LOGIC --------------- def run_completion(api_key, model, preset_name, system_prompt, user_prompt, history): """ Core call to the OpenAI Chat Completions API. - Does NOT throw on missing API key; returns a friendly message instead. - Uses model-agnostic parameters (no temperature / max_tokens) to avoid 400s. - Injects preset-specific formatting rules so output is richly structured. """ history = history or [] if not api_key: msg = "❌ No API key found. Paste your key and click **Save Key** first." history.append(("System", msg)) return history, msg if not user_prompt.strip(): msg = "⚠️ Please enter a prompt or paste some content before running." history.append(("System", msg)) return history, msg openai.api_key = api_key.strip() # Build system message combining base, preset role, and preset formatting system_parts = [BASE_SYSTEM] if preset_name in PRESETS: system_parts.append(f"Preset role description:\n{PRESETS[preset_name]['system_role']}") fmt = PRESET_FORMAT_SPECS.get(preset_name, "") if fmt: system_parts.append(fmt) if system_prompt.strip(): system_parts.append( "Additional system instructions from the user (apply after all above rules):\n" + system_prompt.strip() ) final_system = "\n\n".join(system_parts) messages = [ {"role": "system", "content": final_system}, {"role": "user", "content": user_prompt.strip()}, ] try: # Minimal API call: no temperature / max_* params to avoid model quirks response = openai.chat.completions.create( model=model.strip(), messages=messages, ) answer = (response.choices[0].message.content or "").strip() if not answer: answer = "⚠️ The model returned an empty response. Try again with more context." except Exception as e: answer = f"❌ Error while calling the model:\n\n`{e}`" history.append(("You", user_prompt)) history.append(("Assistant", answer)) return history, answer # --------------- HELPERS --------------- def save_api_key(raw_key: str): cleaned = (raw_key or "").strip() if not cleaned: raise gr.Error("🔐 Please paste a valid API key before saving.") if not cleaned.startswith("sk-"): gr.Info("Key saved, but it doesn’t start with 'sk-'. Double-check your provider format.") return cleaned, gr.update(value="", placeholder="API key saved ✔") def load_preset(preset_name: str): if not preset_name or preset_name not in PRESETS: return gr.update(), gr.update() preset = PRESETS[preset_name] return preset["system_role"], preset["user_hint"] # --------------- UI --------------- CUSTOM_CSS = """ #zen-root { background: radial-gradient(circle at top left, #020617 0, #020617 40%, #000000 100%); color: #f9fafb; font-family: 'Inter', system-ui, -apple-system, BlinkMacSystemFont, sans-serif; min-height: 100vh; padding-bottom: 2rem; } .zen-card { border-radius: 20px; background: linear-gradient(135deg, rgba(15,23,42,0.95), rgba(15,23,42,0.7)); border: 1px solid rgba(148,163,184,0.45); backdrop-filter: blur(22px); box-shadow: 0 24px 60px rgba(15,23,42,0.95), 0 0 0 1px rgba(15,23,42,0.85); } .zen-header { padding: 1.8rem 2rem; margin-bottom: 1.5rem; } .zen-pill { border-radius: 9999px; padding: 0.25rem 0.9rem; border: 1px solid rgba(129,140,248,0.7); color: #a5b4fc; font-size: 0.75rem; text-transform: uppercase; letter-spacing: 0.12em; } .zen-title { font-size: 1.9rem; font-weight: 700; letter-spacing: -0.03em; } .zen-subtitle { font-size: 0.9rem; color: #9ca3af; } #zen-output-card { padding: 1.3rem 1.6rem; } #zen-output-card .markdown-body h2 { margin-top: 0.2rem; } #zen-output-card .markdown-body h3 { margin-top: 0.9rem; } #zen-meta-card { padding: 0.75rem 1rem; font-size: 0.85rem; color: #e5e7eb; border-bottom: 1px solid rgba(148,163,184,0.35); } """ with gr.Blocks( css=CUSTOM_CSS, elem_id="zen-root", fill_height=True, title="ZEN Promptboard Pro — GPT-5", ) as demo: # Header gr.Markdown( """