ZENLLC commited on
Commit
a221f71
Β·
verified Β·
1 Parent(s): 61e4b36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +407 -197
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import json
2
  import textwrap
3
- import math
4
  from typing import Dict, Any, List, Tuple
5
 
6
  import gradio as gr
@@ -8,21 +7,31 @@ import requests
8
  import matplotlib.pyplot as plt
9
  from matplotlib.figure import Figure
10
 
11
- # ============================================================
12
- # LLM CALLER β€” GPT-4.1 ONLY
13
- # ============================================================
 
14
 
15
  def call_chat_completion(
16
  api_key: str,
17
  base_url: str,
 
18
  system_prompt: str,
19
  user_prompt: str,
20
- model: str = "gpt-4.1",
21
- max_completion_tokens: int = 2000,
22
  ) -> str:
 
 
23
 
 
 
 
 
24
  if not api_key:
25
- raise ValueError("Missing API key.")
 
 
 
26
 
27
  url = base_url.rstrip("/") + "/v1/chat/completions"
28
 
@@ -31,7 +40,8 @@ def call_chat_completion(
31
  "Content-Type": "application/json",
32
  }
33
 
34
- payload = {
 
35
  "model": model,
36
  "messages": [
37
  {"role": "system", "content": system_prompt},
@@ -40,338 +50,538 @@ def call_chat_completion(
40
  "max_completion_tokens": max_completion_tokens,
41
  }
42
 
43
- resp = requests.post(url, headers=headers, json=payload, timeout=60)
44
 
45
- # Fallback for providers requiring max_tokens
46
  if resp.status_code == 400 and "max_completion_tokens" in resp.text:
47
- payload["max_tokens"] = max_completion_tokens
48
- payload.pop("max_completion_tokens", None)
49
- resp = requests.post(url, headers=headers, json=payload, timeout=60)
 
 
 
 
 
 
50
 
51
  if resp.status_code != 200:
52
  raise RuntimeError(
53
- f"LLM API Error {resp.status_code}:\n{resp.text[:400]}"
54
  )
55
 
56
  data = resp.json()
57
  try:
58
  return data["choices"][0]["message"]["content"]
59
- except:
60
- raise RuntimeError(f"Malformed response:\n\n{json.dumps(data, indent=2)}")
61
 
62
 
63
- # ============================================================
64
- # SOP PROMPT + JSON PARSER
65
- # ============================================================
66
 
67
  SOP_SYSTEM_PROMPT = """
68
- You are an expert process engineer. Produce SOPs as JSON using:
 
 
 
 
69
 
70
  {
71
- "title": "",
72
- "purpose": "",
73
- "scope": "",
74
- "definitions": [],
75
- "roles": [{"name": "", "responsibilities": []}],
76
- "prerequisites": [],
 
 
 
 
 
77
  "steps": [
78
  {
79
  "step_number": 1,
80
- "title": "",
81
- "description": "",
82
- "owner_role": "",
83
- "inputs": [],
84
- "outputs": []
85
  }
86
  ],
87
- "escalation": [],
88
- "metrics": [],
89
- "risks": [],
90
- "versioning": {"version": "1.0","owner": "","last_updated": ""}
 
 
 
 
91
  }
92
-
93
- Return ONLY JSON.
94
  """
95
 
96
- def build_user_prompt(title, desc, industry, tone, detail):
 
 
 
 
 
 
 
97
  return f"""
98
- SOP Title: {title}
99
- Context: {desc}
100
- Industry: {industry}
101
- Tone: {tone}
102
- Detail Level: {detail}
103
- Audience: mid-career professionals.
 
104
  """
105
 
106
 
107
- def parse_sop_json(raw: str) -> Dict[str, Any]:
108
- txt = raw.strip()
 
 
 
109
  if txt.startswith("```"):
110
- txt = txt.split("```")[1]
 
111
 
 
112
  first = txt.find("{")
113
  last = txt.rfind("}")
114
- return json.loads(txt[first:last+1])
 
 
 
115
 
116
 
117
  def sop_to_markdown(sop: Dict[str, Any]) -> str:
 
118
 
119
- def bullet(items):
120
  if not items:
121
- return "_None provided._"
122
  return "\n".join(f"- {i}" for i in items)
123
 
124
- md = []
125
- md.append(f"# {sop.get('title','Untitled SOP')}\n")
 
126
 
127
- md.append("## 1. Purpose\n" + sop.get("purpose","N/A"))
128
- md.append("## 2. Scope\n" + sop.get("scope","N/A"))
129
 
130
- md.append("## 3. Definitions\n" + bullet(sop.get("definitions", [])))
 
131
 
132
- md.append("## 4. Roles & Responsibilities")
 
 
 
133
  for r in sop.get("roles", []):
134
- md.append(f"### {r.get('name','Role')}")
 
135
  md.append(bullet(r.get("responsibilities", [])))
136
 
137
- md.append("## 5. Prerequisites\n" + bullet(sop.get("prerequisites", [])))
 
 
 
 
 
 
 
 
 
 
 
138
 
139
- md.append("## 6. Procedure")
140
- for st in sop.get("steps", []):
141
- md.append(f"### Step {st['step_number']}: {st['title']}")
142
- md.append(f"**Owner:** {st['owner_role']}")
143
- md.append(st["description"])
144
- md.append("**Inputs:**\n" + bullet(st["inputs"]))
145
- md.append("**Outputs:**\n" + bullet(st["outputs"]))
146
 
147
- md.append("## 7. Escalation\n" + bullet(sop.get("escalation", [])))
148
- md.append("## 8. Metrics\n" + bullet(sop.get("metrics")))
149
- md.append("## 9. Risks\n" + bullet(sop.get("risks")))
 
 
150
 
151
  v = sop.get("versioning", {})
152
- md.append("## 10. Version Control")
153
- md.append(f"- Version: {v.get('version','1.0')}")
154
- md.append(f"- Owner: {v.get('owner','N/A')}")
155
- md.append(f"- Last Updated: {v.get('last_updated','N/A')}")
156
 
157
  return "\n\n".join(md)
158
 
159
 
160
- # ============================================================
161
- # πŸ†• PERFECTED DIAGRAM β€” AUTO-SIZE CARDS
162
- # ============================================================
163
 
164
  def create_sop_steps_figure(sop: Dict[str, Any]) -> Figure:
 
 
 
 
 
 
 
 
 
165
  steps = sop.get("steps", [])
 
 
166
  if not steps:
167
- fig, ax = plt.subplots(figsize=(6,2))
168
- ax.text(0.5,0.5,"No steps to visualize.",ha="center",va="center")
 
 
 
 
 
 
 
169
  ax.axis("off")
 
170
  return fig
171
 
172
- # Dynamically compute figure height based on text amount
173
- total_height = 0
174
- block_heights = []
 
 
175
 
176
- for st in steps:
177
- desc_lines = textwrap.wrap(st["description"], width=65)
178
- num_lines = 2 + len(desc_lines) # title + owner + description lines
179
- block_h = 0.35 * num_lines
180
- block_heights.append(block_h)
181
- total_height += block_h + 0.3 # spacing
182
 
183
- fig_height = min(18, max(5, total_height))
184
- fig, ax = plt.subplots(figsize=(10, fig_height))
 
185
 
186
- y = total_height
 
 
 
 
187
 
188
- for idx, st in enumerate(steps):
189
- title = st["title"]
190
- owner = st["owner_role"]
191
- desc_lines = textwrap.wrap(st["description"], width=70)
192
- block_h = block_heights[idx]
193
 
194
- x0, x1 = 0.05, 0.95
 
 
 
 
 
 
195
 
196
  ax.add_patch(
197
  plt.Rectangle(
198
- (x0, y - block_h),
199
- x1 - x0,
200
- block_h,
201
  fill=False,
202
- linewidth=1.7
203
  )
204
  )
205
 
206
- # Number box
207
- nbw = 0.08
 
 
 
 
208
  ax.add_patch(
209
  plt.Rectangle(
210
- (x0, y - block_h),
211
- nbw,
212
- block_h,
213
  fill=False,
214
- linewidth=1.5
215
  )
216
  )
217
 
218
  ax.text(
219
- x0 + nbw/2,
220
- y - block_h/2,
221
- str(st["step_number"]),
222
- ha="center", va="center",
223
- fontsize=13, fontweight="bold"
 
 
224
  )
225
 
226
- text_x = x0 + nbw + 0.02
 
227
 
228
  # Title
229
- ax.text(text_x, y - 0.2,
230
- title,
231
- fontsize=12,
232
- fontweight="bold",
233
- ha="left", va="top")
234
-
235
- # Owner
236
- ax.text(text_x, y - 0.45,
 
 
 
 
 
 
 
237
  f"Owner: {owner}",
 
 
238
  fontsize=10,
239
  style="italic",
240
- ha="left", va="top")
 
 
 
241
 
242
  # Description (wrapped)
243
- text_y = y - 0.75
244
- for line in desc_lines:
245
- ax.text(text_x, text_y, line, fontsize=9, ha="left", va="top")
246
- text_y -= 0.28
247
-
248
- y -= (block_h + 0.3)
 
 
249
 
250
  ax.axis("off")
251
  fig.tight_layout()
252
  return fig
253
 
254
 
255
- # ============================================================
256
- # SAMPLE SCENARIOS
257
- # ============================================================
258
 
259
- SAMPLES = {
260
- "Volunteer Onboarding": {
261
- "title": "Volunteer Onboarding",
262
- "description": "Create SOP for onboarding volunteers: background checks, orientation, training, placement.",
263
- "industry": "Nonprofit"
 
 
 
264
  },
265
  "Remote Employee Onboarding": {
266
  "title": "Remote Employee Onboarding",
267
- "description": "SOP for remote hires including IT setup, HR docs, culture onboarding.",
268
- "industry": "HR"
 
 
 
269
  },
270
- "IT Outage Response": {
271
- "title": "IT Outage Response",
272
- "description": "Major outage response: detection, triage, escalation, comms, restoration, post-mortem.",
273
- "industry": "IT"
 
 
 
274
  },
275
  }
276
 
277
- def load_sample(name):
278
- if name not in SAMPLES:
 
279
  return "", "", "General"
280
- s = SAMPLES[name]
281
  return s["title"], s["description"], s["industry"]
282
 
283
 
284
- # ============================================================
285
- # MAIN GENERATOR
286
- # ============================================================
287
-
288
- def generate_sop(
289
- api_key_state,
290
- api_key_input,
291
- base_url,
292
- model,
293
- title,
294
- desc,
295
- industry,
296
- tone,
297
- detail
298
- ):
299
 
 
 
 
 
 
 
 
 
 
 
 
 
300
  api_key = api_key_input or api_key_state
301
  if not api_key:
302
- return ("⚠️ Enter an API key.",
303
- "",
304
- create_sop_steps_figure({"steps": []}),
305
- api_key_state)
 
 
306
 
307
- try:
308
- user_prompt = build_user_prompt(title, desc, industry, tone, detail)
 
 
309
 
 
310
  raw = call_chat_completion(
311
  api_key=api_key,
312
  base_url=base_url,
 
313
  system_prompt=SOP_SYSTEM_PROMPT,
314
  user_prompt=user_prompt,
315
- model="gpt-4.1", # πŸ”₯ Forced stable model
316
- max_completion_tokens=2000
317
  )
318
 
319
  sop = parse_sop_json(raw)
320
  md = sop_to_markdown(sop)
321
  fig = create_sop_steps_figure(sop)
322
- json_out = json.dumps(sop, indent=2)
323
 
 
324
  return md, json_out, fig, api_key
325
 
326
  except Exception as e:
327
- return (f"❌ Error generating SOP:\n{e}",
328
- "",
329
- create_sop_steps_figure({"steps": []}),
330
- api_key_state)
 
 
331
 
332
 
333
- # ============================================================
334
- # GRADIO UI
335
- # ============================================================
336
 
337
  with gr.Blocks(title="ZEN Simple SOP Builder") as demo:
338
-
339
- gr.Markdown("""
340
  # 🧭 ZEN Simple SOP Builder
341
- Generate clean SOPs + auto diagrams using **GPT-4.1**.
342
- """)
 
 
 
 
 
 
 
 
 
343
 
344
  api_key_state = gr.State("")
345
 
346
  with gr.Row():
347
  with gr.Column(scale=1):
348
- api_input = gr.Textbox("API Key", type="password")
349
- base_url = gr.Textbox("Base URL", value="https://api.openai.com")
350
- model_name = gr.Textbox("Model (GPT-4.1 only)", value="gpt-4.1")
351
 
352
- sample = gr.Dropdown("Sample SOP", choices=list(SAMPLES.keys()))
353
- load_btn = gr.Button("Load Sample")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
 
355
  with gr.Column(scale=2):
356
- title = gr.Textbox("SOP Title")
357
- desc = gr.Textbox("Description", lines=5)
358
- industry = gr.Textbox("Industry", value="General")
359
- tone = gr.Dropdown("Tone", ["Professional","Executive","Supportive"], value="Professional")
360
- detail = gr.Dropdown("Detail Level", ["Standard","High detail","Checklist"], value="Standard")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
 
362
- gen_btn = gr.Button("πŸš€ Generate SOP", variant="primary")
363
 
364
- sop_md = gr.Markdown()
365
- sop_json = gr.Code(language="json")
366
- sop_fig = gr.Plot()
367
 
368
- load_btn.click(load_sample, sample, [title, desc, industry])
 
 
 
 
 
 
 
 
 
 
 
 
 
369
 
370
- gen_btn.click(
371
- generate_sop,
372
- [api_key_state, api_input, base_url, model_name, title, desc, industry, tone, detail],
373
- [sop_md, sop_json, sop_fig, api_key_state],
 
374
  )
375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
  if __name__ == "__main__":
377
  demo.launch()
 
1
  import json
2
  import textwrap
 
3
  from typing import Dict, Any, List, Tuple
4
 
5
  import gradio as gr
 
7
  import matplotlib.pyplot as plt
8
  from matplotlib.figure import Figure
9
 
10
+
11
+ # -----------------------------
12
+ # LLM CALL HELPERS
13
+ # -----------------------------
14
 
15
  def call_chat_completion(
16
  api_key: str,
17
  base_url: str,
18
+ model: str,
19
  system_prompt: str,
20
  user_prompt: str,
21
+ max_completion_tokens: int = 1800,
 
22
  ) -> str:
23
+ """
24
+ OpenAI-compatible ChatCompletion caller.
25
 
26
+ - Uses `max_completion_tokens` (new OpenAI spec).
27
+ - Falls back to `max_tokens` for providers that still expect it.
28
+ - No temperature param (some models only allow default).
29
+ """
30
  if not api_key:
31
+ raise ValueError("API key is required.")
32
+
33
+ if not base_url:
34
+ base_url = "https://api.openai.com"
35
 
36
  url = base_url.rstrip("/") + "/v1/chat/completions"
37
 
 
40
  "Content-Type": "application/json",
41
  }
42
 
43
+ # Primary payload using max_completion_tokens
44
+ new_payload = {
45
  "model": model,
46
  "messages": [
47
  {"role": "system", "content": system_prompt},
 
50
  "max_completion_tokens": max_completion_tokens,
51
  }
52
 
53
+ resp = requests.post(url, headers=headers, json=new_payload, timeout=60)
54
 
55
+ # If provider doesn't support `max_completion_tokens`, try legacy `max_tokens`
56
  if resp.status_code == 400 and "max_completion_tokens" in resp.text:
57
+ legacy_payload = {
58
+ "model": model,
59
+ "messages": [
60
+ {"role": "system", "content": system_prompt},
61
+ {"role": "user", "content": user_prompt},
62
+ ],
63
+ "max_tokens": max_completion_tokens,
64
+ }
65
+ resp = requests.post(url, headers=headers, json=legacy_payload, timeout=60)
66
 
67
  if resp.status_code != 200:
68
  raise RuntimeError(
69
+ f"LLM API error: {resp.status_code} - {resp.text[:400]}"
70
  )
71
 
72
  data = resp.json()
73
  try:
74
  return data["choices"][0]["message"]["content"]
75
+ except Exception as e:
76
+ raise RuntimeError(f"Unexpected LLM response format: {e}\n\n{data}")
77
 
78
 
79
+ # -----------------------------
80
+ # SOP GENERATION LOGIC
81
+ # -----------------------------
82
 
83
  SOP_SYSTEM_PROMPT = """
84
+ You are an expert operations consultant and technical writer.
85
+
86
+ You generate clear, professional, implementation-ready Standard Operating Procedures (SOPs).
87
+
88
+ You MUST respond strictly as JSON using this schema:
89
 
90
  {
91
+ "title": "string",
92
+ "purpose": "string",
93
+ "scope": "string",
94
+ "definitions": ["string", ...],
95
+ "roles": [
96
+ {
97
+ "name": "string",
98
+ "responsibilities": ["string", ...]
99
+ }
100
+ ],
101
+ "prerequisites": ["string", ...],
102
  "steps": [
103
  {
104
  "step_number": 1,
105
+ "title": "string",
106
+ "description": "string",
107
+ "owner_role": "string",
108
+ "inputs": ["string", ...],
109
+ "outputs": ["string", ...]
110
  }
111
  ],
112
+ "escalation": ["string", ...],
113
+ "metrics": ["string", ...],
114
+ "risks": ["string", ...],
115
+ "versioning": {
116
+ "version": "1.0",
117
+ "owner": "string",
118
+ "last_updated": "string"
119
+ }
120
  }
 
 
121
  """
122
 
123
+
124
+ def build_user_prompt(
125
+ sop_title: str,
126
+ description: str,
127
+ industry: str,
128
+ tone: str,
129
+ detail_level: str,
130
+ ) -> str:
131
  return f"""
132
+ Process Title: {sop_title or "Untitled SOP"}
133
+ Context: {description or "N/A"}
134
+ Industry: {industry or "General"}
135
+ Tone: {tone or "Professional"}
136
+ Detail Level: {detail_level or "Standard"}
137
+
138
+ Audience: Mid-career professionals.
139
  """
140
 
141
 
142
+ def parse_sop_json(raw_text: str) -> Dict[str, Any]:
143
+ """Clean model output and extract JSON."""
144
+ txt = raw_text.strip()
145
+
146
+ # Strip markdown fences if present
147
  if txt.startswith("```"):
148
+ parts = txt.split("```")
149
+ txt = next((p for p in parts if "{" in p), parts[-1])
150
 
151
+ # Extract JSON between first '{' and last '}'
152
  first = txt.find("{")
153
  last = txt.rfind("}")
154
+ if first != -1 and last != -1:
155
+ txt = txt[first:last + 1]
156
+
157
+ return json.loads(txt)
158
 
159
 
160
  def sop_to_markdown(sop: Dict[str, Any]) -> str:
161
+ """Format JSON SOP into Markdown."""
162
 
163
+ def bullet(items: List[str]) -> str:
164
  if not items:
165
+ return "_None specified._"
166
  return "\n".join(f"- {i}" for i in items)
167
 
168
+ md: List[str] = []
169
+
170
+ md.append(f"# {sop.get('title', 'Standard Operating Procedure')}\n")
171
 
172
+ md.append("## 1. Purpose")
173
+ md.append(sop.get("purpose", "N/A"))
174
 
175
+ md.append("\n## 2. Scope")
176
+ md.append(sop.get("scope", "N/A"))
177
 
178
+ md.append("\n## 3. Definitions")
179
+ md.append(bullet(sop.get("definitions", [])))
180
+
181
+ md.append("\n## 4. Roles & Responsibilities")
182
  for r in sop.get("roles", []):
183
+ name = r.get("name", "Role")
184
+ md.append(f"### {name}")
185
  md.append(bullet(r.get("responsibilities", [])))
186
 
187
+ md.append("\n## 5. Prerequisites")
188
+ md.append(bullet(sop.get("prerequisites", [])))
189
+
190
+ md.append("\n## 6. Procedure (Step-by-Step)")
191
+ for step in sop.get("steps", []):
192
+ md.append(f"### Step {step.get('step_number', '?')}: {step.get('title', 'Step')}")
193
+ md.append(f"**Owner:** {step.get('owner_role', 'N/A')}")
194
+ md.append(step.get("description", ""))
195
+ md.append("**Inputs:**")
196
+ md.append(bullet(step.get("inputs", [])))
197
+ md.append("**Outputs:**")
198
+ md.append(bullet(step.get("outputs", [])))
199
 
200
+ md.append("\n## 7. Escalation")
201
+ md.append(bullet(sop.get("escalation", [])))
 
 
 
 
 
202
 
203
+ md.append("\n## 8. Metrics & Success Criteria")
204
+ md.append(bullet(sop.get("metrics", [])))
205
+
206
+ md.append("\n## 9. Risks & Controls")
207
+ md.append(bullet(sop.get("risks", [])))
208
 
209
  v = sop.get("versioning", {})
210
+ md.append("\n## 10. Version Control")
211
+ md.append(f"- Version: {v.get('version', '1.0')}")
212
+ md.append(f"- Owner: {v.get('owner', 'N/A')}")
213
+ md.append(f"- Last Updated: {v.get('last_updated', 'N/A')}")
214
 
215
  return "\n\n".join(md)
216
 
217
 
218
+ # -----------------------------
219
+ # INFOGRAPHIC / DATA VISUAL
220
+ # -----------------------------
221
 
222
  def create_sop_steps_figure(sop: Dict[str, Any]) -> Figure:
223
+ """
224
+ Create a clearer, more readable infographic-style figure
225
+ showing the SOP steps as stacked cards.
226
+
227
+ - Large, legible fonts
228
+ - Number block on the left
229
+ - Wrapped description text
230
+ """
231
+
232
  steps = sop.get("steps", [])
233
+
234
+ # Empty state
235
  if not steps:
236
+ fig, ax = plt.subplots(figsize=(7, 2))
237
+ ax.text(
238
+ 0.5,
239
+ 0.5,
240
+ "No steps available to visualize.",
241
+ ha="center",
242
+ va="center",
243
+ fontsize=12,
244
+ )
245
  ax.axis("off")
246
+ fig.tight_layout()
247
  return fig
248
 
249
+ n = len(steps)
250
+
251
+ # Figure height scales with number of steps (capped)
252
+ fig_height = min(14, max(4, 1.6 * n))
253
+ fig, ax = plt.subplots(figsize=(9, fig_height))
254
 
255
+ # Coordinate system: y from 0 (bottom) to n (top)
256
+ ax.set_xlim(0, 1)
257
+ ax.set_ylim(0, n)
 
 
 
258
 
259
+ card_top_margin = 0.25
260
+ card_bottom_margin = 0.25
261
+ card_height = 1 - (card_top_margin + card_bottom_margin)
262
 
263
+ for idx, step in enumerate(steps):
264
+ # y coordinate from top down
265
+ row_top = n - idx - card_top_margin
266
+ row_bottom = row_top - card_height
267
+ center_y = (row_top + row_bottom) / 2
268
 
269
+ step_number = step.get("step_number", idx + 1)
270
+ title = step.get("title", f"Step {step_number}")
271
+ owner = step.get("owner_role", "")
272
+ desc = step.get("description", "")
 
273
 
274
+ # Wrap description into multiple lines
275
+ desc_wrapped = textwrap.fill(desc, width=80)
276
+
277
+ # Card rectangle (full width)
278
+ card_x0 = 0.03
279
+ card_x1 = 0.97
280
+ card_width = card_x1 - card_x0
281
 
282
  ax.add_patch(
283
  plt.Rectangle(
284
+ (card_x0, row_bottom),
285
+ card_width,
286
+ card_height,
287
  fill=False,
288
+ linewidth=1.6,
289
  )
290
  )
291
 
292
+ # Number block on the left
293
+ num_block_width = 0.08
294
+ num_block_x0 = card_x0
295
+ num_block_y0 = row_bottom
296
+ num_block_height = card_height
297
+
298
  ax.add_patch(
299
  plt.Rectangle(
300
+ (num_block_x0, num_block_y0),
301
+ num_block_width,
302
+ num_block_height,
303
  fill=False,
304
+ linewidth=1.4,
305
  )
306
  )
307
 
308
  ax.text(
309
+ num_block_x0 + num_block_width / 2,
310
+ center_y,
311
+ str(step_number),
312
+ ha="center",
313
+ va="center",
314
+ fontsize=12,
315
+ fontweight="bold",
316
  )
317
 
318
+ # Text block (title, owner, description)
319
+ text_x0 = num_block_x0 + num_block_width + 0.02
320
 
321
  # Title
322
+ ax.text(
323
+ text_x0,
324
+ row_top - 0.08,
325
+ title,
326
+ ha="left",
327
+ va="top",
328
+ fontsize=12,
329
+ fontweight="bold",
330
+ )
331
+
332
+ # Owner line (optional)
333
+ if owner:
334
+ ax.text(
335
+ text_x0,
336
+ row_top - 0.28,
337
  f"Owner: {owner}",
338
+ ha="left",
339
+ va="top",
340
  fontsize=10,
341
  style="italic",
342
+ )
343
+ desc_y = row_top - 0.48
344
+ else:
345
+ desc_y = row_top - 0.3
346
 
347
  # Description (wrapped)
348
+ ax.text(
349
+ text_x0,
350
+ desc_y,
351
+ desc_wrapped,
352
+ ha="left",
353
+ va="top",
354
+ fontsize=9,
355
+ )
356
 
357
  ax.axis("off")
358
  fig.tight_layout()
359
  return fig
360
 
361
 
362
+ # -----------------------------
363
+ # SAMPLE PRESETS
364
+ # -----------------------------
365
 
366
+ SAMPLE_SOPS: Dict[str, Dict[str, str]] = {
367
+ "Volunteer Onboarding Workflow": {
368
+ "title": "Volunteer Onboarding Workflow",
369
+ "description": (
370
+ "Create a clear SOP for onboarding new volunteers at a youth-serving "
371
+ "nonprofit. Include background checks, orientation, training, and site placement."
372
+ ),
373
+ "industry": "Nonprofit / Youth Development",
374
  },
375
  "Remote Employee Onboarding": {
376
  "title": "Remote Employee Onboarding",
377
+ "description": (
378
+ "Design a remote onboarding SOP for new employees in a hybrid org, "
379
+ "covering IT setup, HR paperwork, culture onboarding, and 30-60-90 day milestones."
380
+ ),
381
+ "industry": "General / HR",
382
  },
383
+ "IT Outage Incident Response": {
384
+ "title": "IT Outage Incident Response",
385
+ "description": (
386
+ "Create an SOP for responding to major IT outages affecting multiple sites, "
387
+ "including triage, communication, escalation, and post-mortem."
388
+ ),
389
+ "industry": "IT / Operations",
390
  },
391
  }
392
 
393
+
394
+ def load_sample(sample_name: str) -> Tuple[str, str, str]:
395
+ if not sample_name or sample_name not in SAMPLE_SOPS:
396
  return "", "", "General"
397
+ s = SAMPLE_SOPS[sample_name]
398
  return s["title"], s["description"], s["industry"]
399
 
400
 
401
+ # -----------------------------
402
+ # MAIN HANDLER (CALLED BY UI)
403
+ # -----------------------------
 
 
 
 
 
 
 
 
 
 
 
 
404
 
405
+ def generate_sop_ui(
406
+ api_key_state: str,
407
+ api_key_input: str,
408
+ base_url: str,
409
+ model: str,
410
+ sop_title: str,
411
+ description: str,
412
+ industry: str,
413
+ tone: str,
414
+ detail_level: str,
415
+ ) -> Tuple[str, str, Figure, str]:
416
+ """Gradio event handler: generate SOP + JSON + figure."""
417
  api_key = api_key_input or api_key_state
418
  if not api_key:
419
+ return (
420
+ "⚠️ Please enter an API key.",
421
+ "",
422
+ create_sop_steps_figure({"steps": []}),
423
+ api_key_state,
424
+ )
425
 
426
+ if not model:
427
+ model = "gpt-4.1-mini"
428
+
429
+ user_prompt = build_user_prompt(sop_title, description, industry, tone, detail_level)
430
 
431
+ try:
432
  raw = call_chat_completion(
433
  api_key=api_key,
434
  base_url=base_url,
435
+ model=model,
436
  system_prompt=SOP_SYSTEM_PROMPT,
437
  user_prompt=user_prompt,
438
+ max_completion_tokens=1800,
 
439
  )
440
 
441
  sop = parse_sop_json(raw)
442
  md = sop_to_markdown(sop)
443
  fig = create_sop_steps_figure(sop)
444
+ json_out = json.dumps(sop, indent=2, ensure_ascii=False)
445
 
446
+ # Save key into session state
447
  return md, json_out, fig, api_key
448
 
449
  except Exception as e:
450
+ return (
451
+ f"❌ Error generating SOP:\n\n{e}",
452
+ "",
453
+ create_sop_steps_figure({"steps": []}),
454
+ api_key_state,
455
+ )
456
 
457
 
458
+ # -----------------------------
459
+ # GRADIO UI
460
+ # -----------------------------
461
 
462
  with gr.Blocks(title="ZEN Simple SOP Builder") as demo:
463
+ gr.Markdown(
464
+ """
465
  # 🧭 ZEN Simple SOP Builder
466
+
467
+ Generate clean, professional Standard Operating Procedures (SOPs) from a short description.
468
+ Perfect for mid-career professionals who need clarity, structure, and ownership β€” fast.
469
+
470
+ 1. Configure your API settings
471
+ 2. Describe the process you want to document
472
+ 3. Generate a full SOP + visual flow of the steps
473
+
474
+ > Your API key stays in this browser session and is not logged to disk.
475
+ """
476
+ )
477
 
478
  api_key_state = gr.State("")
479
 
480
  with gr.Row():
481
  with gr.Column(scale=1):
482
+ gr.Markdown("### Step 1 β€” API & Model Settings")
 
 
483
 
484
+ api_key_input = gr.Textbox(
485
+ label="LLM API Key",
486
+ placeholder="Enter your API key (OpenAI or compatible provider)",
487
+ type="password",
488
+ )
489
+
490
+ base_url = gr.Textbox(
491
+ label="Base URL",
492
+ value="https://api.openai.com",
493
+ placeholder="e.g. https://api.openai.com or your custom endpoint",
494
+ )
495
+
496
+ model_name = gr.Textbox(
497
+ label="Model Name",
498
+ value="gpt-4.1-mini",
499
+ placeholder="e.g. gpt-4.1, gpt-4o, deepseek-chat, mistral-large, etc.",
500
+ )
501
+
502
+ gr.Markdown("### Step 2 β€” Try a Sample Scenario")
503
+ sample_dropdown = gr.Dropdown(
504
+ label="Sample SOPs",
505
+ choices=list(SAMPLE_SOPS.keys()),
506
+ value=None,
507
+ info="Optional: load a predefined example.",
508
+ )
509
+ load_button = gr.Button("Load Sample into Form")
510
 
511
  with gr.Column(scale=2):
512
+ gr.Markdown("### Step 3 β€” Describe Your SOP")
513
+
514
+ sop_title = gr.Textbox(
515
+ label="SOP Title",
516
+ placeholder="e.g. Volunteer Onboarding Workflow, IT Outage Response",
517
+ )
518
+
519
+ description = gr.Textbox(
520
+ label="Describe the process / context",
521
+ placeholder="What should this SOP cover? Who is it for? Any constraints?",
522
+ lines=6,
523
+ )
524
+
525
+ industry = gr.Textbox(
526
+ label="Industry / Domain",
527
+ value="General",
528
+ placeholder="e.g. Nonprofit, HR, Education, Healthcare, IT",
529
+ )
530
+
531
+ tone = gr.Dropdown(
532
+ label="Tone",
533
+ choices=["Professional", "Executive", "Supportive", "Direct", "Compliance-focused"],
534
+ value="Professional",
535
+ )
536
+
537
+ detail_level = gr.Dropdown(
538
+ label="Detail Level",
539
+ choices=["Standard", "High detail", "Checklist-style", "Overview only"],
540
+ value="Standard",
541
+ )
542
 
543
+ generate_button = gr.Button("πŸš€ Generate SOP", variant="primary")
544
 
545
+ gr.Markdown("### Step 4 β€” Results")
 
 
546
 
547
+ with gr.Row():
548
+ with gr.Column(scale=3):
549
+ sop_output = gr.Markdown(
550
+ label="Generated SOP",
551
+ value="Your SOP will appear here.",
552
+ )
553
+ with gr.Column(scale=2):
554
+ sop_json_output = gr.Code(
555
+ label="Raw SOP JSON (for automation / export)",
556
+ language="json",
557
+ )
558
+
559
+ gr.Markdown("### Visual Flow of Steps")
560
+ sop_figure = gr.Plot(label="SOP Steps Diagram")
561
 
562
+ # Wire up events
563
+ load_button.click(
564
+ fn=load_sample,
565
+ inputs=[sample_dropdown],
566
+ outputs=[sop_title, description, industry],
567
  )
568
 
569
+ generate_button.click(
570
+ fn=generate_sop_ui,
571
+ inputs=[
572
+ api_key_state,
573
+ api_key_input,
574
+ base_url,
575
+ model_name,
576
+ sop_title,
577
+ description,
578
+ industry,
579
+ tone,
580
+ detail_level,
581
+ ],
582
+ outputs=[sop_output, sop_json_output, sop_figure, api_key_state],
583
+ )
584
+
585
+
586
  if __name__ == "__main__":
587
  demo.launch()