ZENLLC commited on
Commit
566c8e3
·
verified ·
1 Parent(s): a221f71

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +159 -189
app.py CHANGED
@@ -8,9 +8,9 @@ import matplotlib.pyplot as plt
8
  from matplotlib.figure import Figure
9
 
10
 
11
- # -----------------------------
12
- # LLM CALL HELPERS
13
- # -----------------------------
14
 
15
  def call_chat_completion(
16
  api_key: str,
@@ -18,14 +18,14 @@ def call_chat_completion(
18
  model: str,
19
  system_prompt: str,
20
  user_prompt: str,
21
- max_completion_tokens: int = 1800,
22
  ) -> str:
23
  """
24
- OpenAI-compatible ChatCompletion caller.
25
 
26
- - Uses `max_completion_tokens` (new OpenAI spec).
27
- - Falls back to `max_tokens` for providers that still expect it.
28
- - No temperature param (some models only allow default).
29
  """
30
  if not api_key:
31
  raise ValueError("API key is required.")
@@ -40,8 +40,7 @@ def call_chat_completion(
40
  "Content-Type": "application/json",
41
  }
42
 
43
- # Primary payload using max_completion_tokens
44
- new_payload = {
45
  "model": model,
46
  "messages": [
47
  {"role": "system", "content": system_prompt},
@@ -50,19 +49,13 @@ def call_chat_completion(
50
  "max_completion_tokens": max_completion_tokens,
51
  }
52
 
53
- resp = requests.post(url, headers=headers, json=new_payload, timeout=60)
54
 
55
- # If provider doesn't support `max_completion_tokens`, try legacy `max_tokens`
56
  if resp.status_code == 400 and "max_completion_tokens" in resp.text:
57
- legacy_payload = {
58
- "model": model,
59
- "messages": [
60
- {"role": "system", "content": system_prompt},
61
- {"role": "user", "content": user_prompt},
62
- ],
63
- "max_tokens": max_completion_tokens,
64
- }
65
- resp = requests.post(url, headers=headers, json=legacy_payload, timeout=60)
66
 
67
  if resp.status_code != 200:
68
  raise RuntimeError(
@@ -73,19 +66,15 @@ def call_chat_completion(
73
  try:
74
  return data["choices"][0]["message"]["content"]
75
  except Exception as e:
76
- raise RuntimeError(f"Unexpected LLM response format: {e}\n\n{data}")
77
 
78
 
79
- # -----------------------------
80
- # SOP GENERATION LOGIC
81
- # -----------------------------
82
 
83
  SOP_SYSTEM_PROMPT = """
84
- You are an expert operations consultant and technical writer.
85
-
86
- You generate clear, professional, implementation-ready Standard Operating Procedures (SOPs).
87
-
88
- You MUST respond strictly as JSON using this schema:
89
 
90
  {
91
  "title": "string",
@@ -118,8 +107,9 @@ You MUST respond strictly as JSON using this schema:
118
  "last_updated": "string"
119
  }
120
  }
121
- """
122
 
 
 
123
 
124
  def build_user_prompt(
125
  sop_title: str,
@@ -129,43 +119,42 @@ def build_user_prompt(
129
  detail_level: str,
130
  ) -> str:
131
  return f"""
132
- Process Title: {sop_title or "Untitled SOP"}
133
  Context: {description or "N/A"}
134
  Industry: {industry or "General"}
135
  Tone: {tone or "Professional"}
136
  Detail Level: {detail_level or "Standard"}
137
-
138
- Audience: Mid-career professionals.
139
- """
140
 
141
 
142
  def parse_sop_json(raw_text: str) -> Dict[str, Any]:
143
- """Clean model output and extract JSON."""
144
  txt = raw_text.strip()
145
 
146
- # Strip markdown fences if present
147
  if txt.startswith("```"):
148
  parts = txt.split("```")
149
- txt = next((p for p in parts if "{" in p), parts[-1])
 
150
 
151
- # Extract JSON between first '{' and last '}'
152
  first = txt.find("{")
153
  last = txt.rfind("}")
154
- if first != -1 and last != -1:
155
- txt = txt[first:last + 1]
 
156
 
157
  return json.loads(txt)
158
 
159
 
160
  def sop_to_markdown(sop: Dict[str, Any]) -> str:
161
- """Format JSON SOP into Markdown."""
162
 
163
- def bullet(items: List[str]) -> str:
164
  if not items:
165
- return "_None specified._"
166
  return "\n".join(f"- {i}" for i in items)
167
 
168
- md: List[str] = []
169
 
170
  md.append(f"# {sop.get('title', 'Standard Operating Procedure')}\n")
171
 
@@ -179,10 +168,9 @@ def sop_to_markdown(sop: Dict[str, Any]) -> str:
179
  md.append(bullet(sop.get("definitions", [])))
180
 
181
  md.append("\n## 4. Roles & Responsibilities")
182
- for r in sop.get("roles", []):
183
- name = r.get("name", "Role")
184
- md.append(f"### {name}")
185
- md.append(bullet(r.get("responsibilities", [])))
186
 
187
  md.append("\n## 5. Prerequisites")
188
  md.append(bullet(sop.get("prerequisites", [])))
@@ -200,10 +188,10 @@ def sop_to_markdown(sop: Dict[str, Any]) -> str:
200
  md.append("\n## 7. Escalation")
201
  md.append(bullet(sop.get("escalation", [])))
202
 
203
- md.append("\n## 8. Metrics & Success Criteria")
204
  md.append(bullet(sop.get("metrics", [])))
205
 
206
- md.append("\n## 9. Risks & Controls")
207
  md.append(bullet(sop.get("risks", [])))
208
 
209
  v = sop.get("versioning", {})
@@ -215,182 +203,166 @@ def sop_to_markdown(sop: Dict[str, Any]) -> str:
215
  return "\n\n".join(md)
216
 
217
 
218
- # -----------------------------
219
- # INFOGRAPHIC / DATA VISUAL
220
- # -----------------------------
221
 
222
  def create_sop_steps_figure(sop: Dict[str, Any]) -> Figure:
223
  """
224
- Create a clearer, more readable infographic-style figure
225
- showing the SOP steps as stacked cards.
226
-
227
- - Large, legible fonts
228
- - Number block on the left
229
- - Wrapped description text
230
  """
231
 
232
  steps = sop.get("steps", [])
233
-
234
- # Empty state
235
  if not steps:
236
  fig, ax = plt.subplots(figsize=(7, 2))
237
- ax.text(
238
- 0.5,
239
- 0.5,
240
- "No steps available to visualize.",
241
- ha="center",
242
- va="center",
243
- fontsize=12,
244
- )
245
  ax.axis("off")
246
  fig.tight_layout()
247
  return fig
248
 
249
- n = len(steps)
250
-
251
- # Figure height scales with number of steps (capped)
252
- fig_height = min(14, max(4, 1.6 * n))
253
- fig, ax = plt.subplots(figsize=(9, fig_height))
254
-
255
- # Coordinate system: y from 0 (bottom) to n (top)
 
 
 
 
 
 
 
 
 
 
 
 
 
256
  ax.set_xlim(0, 1)
257
- ax.set_ylim(0, n)
258
 
259
- card_top_margin = 0.25
260
- card_bottom_margin = 0.25
261
- card_height = 1 - (card_top_margin + card_bottom_margin)
262
 
263
- for idx, step in enumerate(steps):
264
- # y coordinate from top down
265
- row_top = n - idx - card_top_margin
266
- row_bottom = row_top - card_height
267
- center_y = (row_top + row_bottom) / 2
268
 
269
- step_number = step.get("step_number", idx + 1)
270
- title = step.get("title", f"Step {step_number}")
271
- owner = step.get("owner_role", "")
272
- desc = step.get("description", "")
273
-
274
- # Wrap description into multiple lines
275
- desc_wrapped = textwrap.fill(desc, width=80)
276
-
277
- # Card rectangle (full width)
278
- card_x0 = 0.03
279
- card_x1 = 0.97
280
- card_width = card_x1 - card_x0
281
 
 
282
  ax.add_patch(
283
  plt.Rectangle(
284
- (card_x0, row_bottom),
285
- card_width,
286
- card_height,
287
  fill=False,
288
- linewidth=1.6,
289
  )
290
  )
291
 
292
- # Number block on the left
293
- num_block_width = 0.08
294
- num_block_x0 = card_x0
295
- num_block_y0 = row_bottom
296
- num_block_height = card_height
297
-
298
  ax.add_patch(
299
  plt.Rectangle(
300
- (num_block_x0, num_block_y0),
301
- num_block_width,
302
- num_block_height,
303
  fill=False,
304
- linewidth=1.4,
305
  )
306
  )
307
 
 
308
  ax.text(
309
- num_block_x0 + num_block_width / 2,
310
- center_y,
311
- str(step_number),
312
  ha="center",
313
  va="center",
314
- fontsize=12,
315
  fontweight="bold",
316
  )
317
 
318
- # Text block (title, owner, description)
319
- text_x0 = num_block_x0 + num_block_width + 0.02
320
 
321
  # Title
322
  ax.text(
323
- text_x0,
324
- row_top - 0.08,
325
- title,
326
  ha="left",
327
  va="top",
328
  fontsize=12,
329
  fontweight="bold",
330
  )
331
 
332
- # Owner line (optional)
 
333
  if owner:
 
334
  ax.text(
335
- text_x0,
336
- row_top - 0.28,
337
  f"Owner: {owner}",
338
  ha="left",
339
  va="top",
340
  fontsize=10,
341
  style="italic",
342
  )
343
- desc_y = row_top - 0.48
344
  else:
345
- desc_y = row_top - 0.3
346
 
347
  # Description (wrapped)
348
- ax.text(
349
- text_x0,
350
- desc_y,
351
- desc_wrapped,
352
- ha="left",
353
- va="top",
354
- fontsize=9,
355
- )
 
 
 
 
 
 
356
 
357
  ax.axis("off")
358
  fig.tight_layout()
359
  return fig
360
 
361
 
362
- # -----------------------------
363
- # SAMPLE PRESETS
364
- # -----------------------------
365
 
366
  SAMPLE_SOPS: Dict[str, Dict[str, str]] = {
367
- "Volunteer Onboarding Workflow": {
368
- "title": "Volunteer Onboarding Workflow",
369
- "description": (
370
- "Create a clear SOP for onboarding new volunteers at a youth-serving "
371
- "nonprofit. Include background checks, orientation, training, and site placement."
372
- ),
373
  "industry": "Nonprofit / Youth Development",
374
  },
375
  "Remote Employee Onboarding": {
376
  "title": "Remote Employee Onboarding",
377
- "description": (
378
- "Design a remote onboarding SOP for new employees in a hybrid org, "
379
- "covering IT setup, HR paperwork, culture onboarding, and 30-60-90 day milestones."
380
- ),
381
- "industry": "General / HR",
382
  },
383
- "IT Outage Incident Response": {
384
  "title": "IT Outage Incident Response",
385
- "description": (
386
- "Create an SOP for responding to major IT outages affecting multiple sites, "
387
- "including triage, communication, escalation, and post-mortem."
388
- ),
389
  "industry": "IT / Operations",
390
  },
391
  }
392
 
393
-
394
  def load_sample(sample_name: str) -> Tuple[str, str, str]:
395
  if not sample_name or sample_name not in SAMPLE_SOPS:
396
  return "", "", "General"
@@ -398,33 +370,32 @@ def load_sample(sample_name: str) -> Tuple[str, str, str]:
398
  return s["title"], s["description"], s["industry"]
399
 
400
 
401
- # -----------------------------
402
- # MAIN HANDLER (CALLED BY UI)
403
- # -----------------------------
404
 
405
  def generate_sop_ui(
406
  api_key_state: str,
407
  api_key_input: str,
408
  base_url: str,
409
- model: str,
410
  sop_title: str,
411
  description: str,
412
  industry: str,
413
  tone: str,
414
  detail_level: str,
415
  ) -> Tuple[str, str, Figure, str]:
416
- """Gradio event handler: generate SOP + JSON + figure."""
417
  api_key = api_key_input or api_key_state
418
  if not api_key:
419
  return (
420
- "⚠️ Please enter an API key.",
421
  "",
422
  create_sop_steps_figure({"steps": []}),
423
  api_key_state,
424
  )
425
 
426
- if not model:
427
- model = "gpt-4.1-mini"
428
 
429
  user_prompt = build_user_prompt(sop_title, description, industry, tone, detail_level)
430
 
@@ -435,7 +406,7 @@ def generate_sop_ui(
435
  model=model,
436
  system_prompt=SOP_SYSTEM_PROMPT,
437
  user_prompt=user_prompt,
438
- max_completion_tokens=1800,
439
  )
440
 
441
  sop = parse_sop_json(raw)
@@ -443,8 +414,7 @@ def generate_sop_ui(
443
  fig = create_sop_steps_figure(sop)
444
  json_out = json.dumps(sop, indent=2, ensure_ascii=False)
445
 
446
- # Save key into session state
447
- return md, json_out, fig, api_key
448
 
449
  except Exception as e:
450
  return (
@@ -455,65 +425,65 @@ def generate_sop_ui(
455
  )
456
 
457
 
458
- # -----------------------------
459
- # GRADIO UI
460
- # -----------------------------
461
 
462
  with gr.Blocks(title="ZEN Simple SOP Builder") as demo:
463
  gr.Markdown(
464
  """
465
  # 🧭 ZEN Simple SOP Builder
466
 
467
- Generate clean, professional Standard Operating Procedures (SOPs) from a short description.
468
- Perfect for mid-career professionals who need clarity, structure, and ownership — fast.
469
-
470
- 1. Configure your API settings
471
- 2. Describe the process you want to document
472
- 3. Generate a full SOP + visual flow of the steps
473
 
474
- > Your API key stays in this browser session and is not logged to disk.
475
  """
476
  )
477
 
478
  api_key_state = gr.State("")
479
 
480
  with gr.Row():
 
481
  with gr.Column(scale=1):
482
  gr.Markdown("### Step 1 — API & Model Settings")
483
 
484
  api_key_input = gr.Textbox(
485
  label="LLM API Key",
486
- placeholder="Enter your API key (OpenAI or compatible provider)",
487
  type="password",
488
  )
489
 
490
  base_url = gr.Textbox(
491
  label="Base URL",
492
  value="https://api.openai.com",
493
- placeholder="e.g. https://api.openai.com or your custom endpoint",
494
  )
495
 
496
  model_name = gr.Textbox(
497
  label="Model Name",
498
- value="gpt-4.1-mini",
499
- placeholder="e.g. gpt-4.1, gpt-4o, deepseek-chat, mistral-large, etc.",
500
  )
501
 
502
- gr.Markdown("### Step 2 — Try a Sample Scenario")
 
503
  sample_dropdown = gr.Dropdown(
504
- label="Sample SOPs",
505
  choices=list(SAMPLE_SOPS.keys()),
506
  value=None,
507
- info="Optional: load a predefined example.",
508
  )
 
509
  load_button = gr.Button("Load Sample into Form")
510
 
 
511
  with gr.Column(scale=2):
512
- gr.Markdown("### Step 3 — Describe Your SOP")
513
 
514
  sop_title = gr.Textbox(
515
  label="SOP Title",
516
- placeholder="e.g. Volunteer Onboarding Workflow, IT Outage Response",
517
  )
518
 
519
  description = gr.Textbox(
@@ -542,13 +512,13 @@ Perfect for mid-career professionals who need clarity, structure, and ownership
542
 
543
  generate_button = gr.Button("🚀 Generate SOP", variant="primary")
544
 
545
- gr.Markdown("### Step 4Results")
546
 
547
  with gr.Row():
548
  with gr.Column(scale=3):
549
  sop_output = gr.Markdown(
550
- label="Generated SOP",
551
- value="Your SOP will appear here.",
552
  )
553
  with gr.Column(scale=2):
554
  sop_json_output = gr.Code(
@@ -556,10 +526,10 @@ Perfect for mid-career professionals who need clarity, structure, and ownership
556
  language="json",
557
  )
558
 
559
- gr.Markdown("### Visual Flow of Steps")
560
  sop_figure = gr.Plot(label="SOP Steps Diagram")
561
 
562
- # Wire up events
563
  load_button.click(
564
  fn=load_sample,
565
  inputs=[sample_dropdown],
 
8
  from matplotlib.figure import Figure
9
 
10
 
11
+ # ============================================================
12
+ # LLM CALLER (GPT-4.1 BY DEFAULT)
13
+ # ============================================================
14
 
15
  def call_chat_completion(
16
  api_key: str,
 
18
  model: str,
19
  system_prompt: str,
20
  user_prompt: str,
21
+ max_completion_tokens: int = 2000,
22
  ) -> str:
23
  """
24
+ OpenAI-compatible chat completion call.
25
 
26
+ - Uses new-style `max_completion_tokens` (for GPT-4.1, GPT-4o, etc.)
27
+ - Falls back to `max_tokens` if the provider doesn't support it.
28
+ - No temperature / top_p to avoid incompatibility with some models.
29
  """
30
  if not api_key:
31
  raise ValueError("API key is required.")
 
40
  "Content-Type": "application/json",
41
  }
42
 
43
+ payload = {
 
44
  "model": model,
45
  "messages": [
46
  {"role": "system", "content": system_prompt},
 
49
  "max_completion_tokens": max_completion_tokens,
50
  }
51
 
52
+ resp = requests.post(url, headers=headers, json=payload, timeout=60)
53
 
54
+ # Fallback for providers still expecting `max_tokens`
55
  if resp.status_code == 400 and "max_completion_tokens" in resp.text:
56
+ payload.pop("max_completion_tokens", None)
57
+ payload["max_tokens"] = max_completion_tokens
58
+ resp = requests.post(url, headers=headers, json=payload, timeout=60)
 
 
 
 
 
 
59
 
60
  if resp.status_code != 200:
61
  raise RuntimeError(
 
66
  try:
67
  return data["choices"][0]["message"]["content"]
68
  except Exception as e:
69
+ raise RuntimeError(f"Unexpected LLM response format: {e}\n\n{json.dumps(data, indent=2)}") from e
70
 
71
 
72
+ # ============================================================
73
+ # SOP PROMPT + PARSING
74
+ # ============================================================
75
 
76
  SOP_SYSTEM_PROMPT = """
77
+ You are an expert process engineer. Produce SOPs strictly as JSON with this schema:
 
 
 
 
78
 
79
  {
80
  "title": "string",
 
107
  "last_updated": "string"
108
  }
109
  }
 
110
 
111
+ Return ONLY JSON. No explanation or commentary.
112
+ """
113
 
114
  def build_user_prompt(
115
  sop_title: str,
 
119
  detail_level: str,
120
  ) -> str:
121
  return f"""
122
+ SOP Title: {sop_title or "Untitled SOP"}
123
  Context: {description or "N/A"}
124
  Industry: {industry or "General"}
125
  Tone: {tone or "Professional"}
126
  Detail Level: {detail_level or "Standard"}
127
+ Audience: mid-career professionals who need clarity and accountability.
128
+ """.strip()
 
129
 
130
 
131
  def parse_sop_json(raw_text: str) -> Dict[str, Any]:
132
+ """Extract JSON from LLM output, stripping code fences if present."""
133
  txt = raw_text.strip()
134
 
 
135
  if txt.startswith("```"):
136
  parts = txt.split("```")
137
+ # choose the first part that looks like JSON
138
+ txt = next((p for p in parts if "{" in p and "}" in p), parts[-1])
139
 
 
140
  first = txt.find("{")
141
  last = txt.rfind("}")
142
+ if first == -1 or last == -1:
143
+ raise ValueError("No JSON object detected in model output.")
144
+ txt = txt[first:last + 1]
145
 
146
  return json.loads(txt)
147
 
148
 
149
  def sop_to_markdown(sop: Dict[str, Any]) -> str:
150
+ """Render SOP JSON readable Markdown document."""
151
 
152
+ def bullet(items):
153
  if not items:
154
+ return "_None provided._"
155
  return "\n".join(f"- {i}" for i in items)
156
 
157
+ md = []
158
 
159
  md.append(f"# {sop.get('title', 'Standard Operating Procedure')}\n")
160
 
 
168
  md.append(bullet(sop.get("definitions", [])))
169
 
170
  md.append("\n## 4. Roles & Responsibilities")
171
+ for role in sop.get("roles", []):
172
+ md.append(f"### {role.get('name', 'Role')}")
173
+ md.append(bullet(role.get("responsibilities", [])))
 
174
 
175
  md.append("\n## 5. Prerequisites")
176
  md.append(bullet(sop.get("prerequisites", [])))
 
188
  md.append("\n## 7. Escalation")
189
  md.append(bullet(sop.get("escalation", [])))
190
 
191
+ md.append("\n## 8. Metrics")
192
  md.append(bullet(sop.get("metrics", [])))
193
 
194
+ md.append("\n## 9. Risks")
195
  md.append(bullet(sop.get("risks", [])))
196
 
197
  v = sop.get("versioning", {})
 
203
  return "\n\n".join(md)
204
 
205
 
206
+ # ============================================================
207
+ # IMPROVED DIAGRAM AUTO-SIZED CARDS, NO OVERFLOW
208
+ # ============================================================
209
 
210
  def create_sop_steps_figure(sop: Dict[str, Any]) -> Figure:
211
  """
212
+ Draw each step as a stacked card with:
213
+ - dynamic height based on description length
214
+ - number block on the left
215
+ - title + owner + wrapped description inside card
 
 
216
  """
217
 
218
  steps = sop.get("steps", [])
 
 
219
  if not steps:
220
  fig, ax = plt.subplots(figsize=(7, 2))
221
+ ax.text(0.5, 0.5, "No steps available to visualize.", ha="center", va="center")
 
 
 
 
 
 
 
222
  ax.axis("off")
223
  fig.tight_layout()
224
  return fig
225
 
226
+ # First pass: determine required height for each card
227
+ card_heights = []
228
+ total_height = 0.0
229
+
230
+ for step in steps:
231
+ desc_lines = textwrap.wrap(step.get("description", ""), width=70)
232
+ # base height (title + owner) + 0.3 per line of description
233
+ base = 1.0 # title + owner + padding
234
+ per_line = 0.32
235
+ h = base + per_line * max(len(desc_lines), 1)
236
+ h += 0.3 # bottom padding
237
+ card_heights.append(h)
238
+ total_height += h
239
+
240
+ # Add spacing between cards
241
+ spacing = 0.4
242
+ total_height += spacing * (len(steps) + 1)
243
+
244
+ fig_height = min(20, max(5, total_height))
245
+ fig, ax = plt.subplots(figsize=(10, fig_height))
246
  ax.set_xlim(0, 1)
247
+ ax.set_ylim(0, total_height)
248
 
249
+ y = total_height - spacing # start from top
 
 
250
 
251
+ for step, h in zip(steps, card_heights):
252
+ y_bottom = y - h
253
+ y_top = y
 
 
254
 
255
+ # Card boundaries
256
+ x0 = 0.05
257
+ x1 = 0.95
 
 
 
 
 
 
 
 
 
258
 
259
+ # Draw outer card
260
  ax.add_patch(
261
  plt.Rectangle(
262
+ (x0, y_bottom),
263
+ x1 - x0,
264
+ h,
265
  fill=False,
266
+ linewidth=1.8,
267
  )
268
  )
269
 
270
+ # Number block
271
+ num_block_w = 0.08
 
 
 
 
272
  ax.add_patch(
273
  plt.Rectangle(
274
+ (x0, y_bottom),
275
+ num_block_w,
276
+ h,
277
  fill=False,
278
+ linewidth=1.6,
279
  )
280
  )
281
 
282
+ # Step number text in the center of the number block
283
  ax.text(
284
+ x0 + num_block_w / 2,
285
+ y_bottom + h / 2,
286
+ str(step.get("step_number", "?")),
287
  ha="center",
288
  va="center",
289
+ fontsize=13,
290
  fontweight="bold",
291
  )
292
 
293
+ # Text area start
294
+ text_x = x0 + num_block_w + 0.02
295
 
296
  # Title
297
  ax.text(
298
+ text_x,
299
+ y_top - 0.25,
300
+ step.get("title", ""),
301
  ha="left",
302
  va="top",
303
  fontsize=12,
304
  fontweight="bold",
305
  )
306
 
307
+ # Owner
308
+ owner = step.get("owner_role", "")
309
  if owner:
310
+ owner_y = y_top - 0.55
311
  ax.text(
312
+ text_x,
313
+ owner_y,
314
  f"Owner: {owner}",
315
  ha="left",
316
  va="top",
317
  fontsize=10,
318
  style="italic",
319
  )
 
320
  else:
321
+ owner_y = y_top - 0.5
322
 
323
  # Description (wrapped)
324
+ desc_lines = textwrap.wrap(step.get("description", ""), width=70)
325
+ desc_y = owner_y - 0.4
326
+ for line in desc_lines:
327
+ ax.text(
328
+ text_x,
329
+ desc_y,
330
+ line,
331
+ ha="left",
332
+ va="top",
333
+ fontsize=9,
334
+ )
335
+ desc_y -= 0.3 # vertical spacing per line
336
+
337
+ y = y_bottom - spacing # move down for next card
338
 
339
  ax.axis("off")
340
  fig.tight_layout()
341
  return fig
342
 
343
 
344
+ # ============================================================
345
+ # SAMPLE SCENARIOS
346
+ # ============================================================
347
 
348
  SAMPLE_SOPS: Dict[str, Dict[str, str]] = {
349
+ "Volunteer Onboarding": {
350
+ "title": "Volunteer Onboarding",
351
+ "description": "Onboard new volunteers including application review, background checks, orientation, training, and site placement.",
 
 
 
352
  "industry": "Nonprofit / Youth Development",
353
  },
354
  "Remote Employee Onboarding": {
355
  "title": "Remote Employee Onboarding",
356
+ "description": "Design a remote onboarding SOP for hybrid employees including IT setup, HR paperwork, and culture onboarding.",
357
+ "industry": "HR / General",
 
 
 
358
  },
359
+ "IT Outage Response": {
360
  "title": "IT Outage Incident Response",
361
+ "description": "Major outage response SOP including detection, triage, escalation, communication, restoration, and post-mortem.",
 
 
 
362
  "industry": "IT / Operations",
363
  },
364
  }
365
 
 
366
  def load_sample(sample_name: str) -> Tuple[str, str, str]:
367
  if not sample_name or sample_name not in SAMPLE_SOPS:
368
  return "", "", "General"
 
370
  return s["title"], s["description"], s["industry"]
371
 
372
 
373
+ # ============================================================
374
+ # MAIN HANDLER FOR GRADIO
375
+ # ============================================================
376
 
377
  def generate_sop_ui(
378
  api_key_state: str,
379
  api_key_input: str,
380
  base_url: str,
381
+ model_name: str,
382
  sop_title: str,
383
  description: str,
384
  industry: str,
385
  tone: str,
386
  detail_level: str,
387
  ) -> Tuple[str, str, Figure, str]:
388
+
389
  api_key = api_key_input or api_key_state
390
  if not api_key:
391
  return (
392
+ "⚠️ Please enter your API key in the left panel.",
393
  "",
394
  create_sop_steps_figure({"steps": []}),
395
  api_key_state,
396
  )
397
 
398
+ model = model_name or "gpt-4.1"
 
399
 
400
  user_prompt = build_user_prompt(sop_title, description, industry, tone, detail_level)
401
 
 
406
  model=model,
407
  system_prompt=SOP_SYSTEM_PROMPT,
408
  user_prompt=user_prompt,
409
+ max_completion_tokens=2000,
410
  )
411
 
412
  sop = parse_sop_json(raw)
 
414
  fig = create_sop_steps_figure(sop)
415
  json_out = json.dumps(sop, indent=2, ensure_ascii=False)
416
 
417
+ return md, json_out, fig, api_key # persist key in session state
 
418
 
419
  except Exception as e:
420
  return (
 
425
  )
426
 
427
 
428
+ # ============================================================
429
+ # GRADIO UI
430
+ # ============================================================
431
 
432
  with gr.Blocks(title="ZEN Simple SOP Builder") as demo:
433
  gr.Markdown(
434
  """
435
  # 🧭 ZEN Simple SOP Builder
436
 
437
+ Generate clean, professional Standard Operating Procedures (SOPs) from a short description,
438
+ plus an auto-generated visual diagram of the steps.
 
 
 
 
439
 
440
+ Powered by your own API key (GPT-4.1 by default).
441
  """
442
  )
443
 
444
  api_key_state = gr.State("")
445
 
446
  with gr.Row():
447
+ # LEFT COLUMN — API + Samples
448
  with gr.Column(scale=1):
449
  gr.Markdown("### Step 1 — API & Model Settings")
450
 
451
  api_key_input = gr.Textbox(
452
  label="LLM API Key",
453
+ placeholder="Enter your OpenAI (or compatible) API key",
454
  type="password",
455
  )
456
 
457
  base_url = gr.Textbox(
458
  label="Base URL",
459
  value="https://api.openai.com",
460
+ placeholder="e.g. https://api.openai.com or custom OpenAI-compatible endpoint",
461
  )
462
 
463
  model_name = gr.Textbox(
464
  label="Model Name",
465
+ value="gpt-4.1",
466
+ placeholder="e.g. gpt-4.1, gpt-4o, etc.",
467
  )
468
 
469
+ gr.Markdown("### Load a Sample SOP")
470
+
471
  sample_dropdown = gr.Dropdown(
472
+ label="Sample scenarios",
473
  choices=list(SAMPLE_SOPS.keys()),
474
  value=None,
475
+ info="Optional: load a ready-made example to test the tool.",
476
  )
477
+
478
  load_button = gr.Button("Load Sample into Form")
479
 
480
+ # RIGHT COLUMN — SOP Description
481
  with gr.Column(scale=2):
482
+ gr.Markdown("### Step 2 — Describe the SOP")
483
 
484
  sop_title = gr.Textbox(
485
  label="SOP Title",
486
+ placeholder="e.g. Volunteer Onboarding Workflow",
487
  )
488
 
489
  description = gr.Textbox(
 
512
 
513
  generate_button = gr.Button("🚀 Generate SOP", variant="primary")
514
 
515
+ gr.Markdown("### Step 3Generated SOP")
516
 
517
  with gr.Row():
518
  with gr.Column(scale=3):
519
  sop_output = gr.Markdown(
520
+ label="SOP (Markdown)",
521
+ value="Your SOP will appear here after generation.",
522
  )
523
  with gr.Column(scale=2):
524
  sop_json_output = gr.Code(
 
526
  language="json",
527
  )
528
 
529
+ gr.Markdown("### Step 4 — Visual Workflow Diagram")
530
  sop_figure = gr.Plot(label="SOP Steps Diagram")
531
 
532
+ # Wire up actions
533
  load_button.click(
534
  fn=load_sample,
535
  inputs=[sample_dropdown],