retvq commited on
Commit
d1f54a1
Β·
verified Β·
1 Parent(s): 5744830

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +63 -25
app.py CHANGED
@@ -25,6 +25,10 @@ def generate_question_paper(
25
  num_sets,
26
  progress=gr.Progress()
27
  ):
 
 
 
 
28
  if not pdf_files or len(pdf_files) == 0:
29
  return "❌ Please upload at least one PDF file."
30
 
@@ -139,28 +143,45 @@ Do not output conversational text. Output ONLY the exam paper in a well-formatte
139
  response = ""
140
  token_count = 0
141
  max_tokens = 2500 # Increased for longer papers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
- for message in client.chat_completion(
144
- messages=messages,
145
- model="meta-llama/Llama-3.2-3B-Instruct",
146
- max_tokens=max_tokens,
147
- temperature=0.7,
148
- stream=True,
149
- ):
150
- if hasattr(message, 'choices') and len(message.choices) > 0:
151
- if hasattr(message.choices[0], 'delta') and hasattr(message.choices[0].delta, 'content'):
152
- response += message.choices[0].delta.content or ""
153
- token_count += 1
154
- # Calculate progress within this set (70-95% range divided by number of sets)
155
- set_start = 0.70 + (set_num - 1) * 0.30 / num_sets
156
- set_range = 0.25 / num_sets # 25% of total progress for generation
157
- generation_progress = min((token_count / max_tokens), 1.0)
158
- current_progress = set_start + (generation_progress * set_range)
159
- percentage = int(generation_progress * 100)
160
-
161
- # Update with dynamic percentage
162
- progress(current_progress,
163
- desc=f"✍️ Generating Question Paper Set {set_num}/{num_sets}... {percentage}%")
164
 
165
  progress(0.70 + set_num * 0.30 / num_sets,
166
  desc=f"βœ… Set {set_num}/{num_sets} generated successfully!")
@@ -247,7 +268,10 @@ with gr.Blocks(title="AI Question Paper Generator") as demo:
247
  """)
248
 
249
  with gr.Column(scale=2):
250
- output = gr.Markdown(label="Generated Question Paper(s)")
 
 
 
251
 
252
  btn.click(
253
  fn=generate_question_paper,
@@ -258,18 +282,32 @@ with gr.Blocks(title="AI Question Paper Generator") as demo:
258
  long_difficulty, long_count,
259
  num_sets
260
  ],
261
- outputs=output
 
262
  )
263
 
264
  gr.Markdown("""
265
- ---
266
  **Features:**
267
  - βœ… Multiple PDF support (up to 5 files)
268
  - βœ… Separate difficulty control for each question type
269
  - βœ… Customizable question count per section
270
  - βœ… Generate 1-3 unique question paper sets
271
  - βœ… Automatic answer key generation for MCQs
 
 
 
 
 
 
272
  """)
273
 
274
  if __name__ == "__main__":
275
- demo.launch()
 
 
 
 
 
 
 
 
25
  num_sets,
26
  progress=gr.Progress()
27
  ):
28
+ # Add timeout protection
29
+ import time
30
+ start_time = time.time()
31
+
32
  if not pdf_files or len(pdf_files) == 0:
33
  return "❌ Please upload at least one PDF file."
34
 
 
143
  response = ""
144
  token_count = 0
145
  max_tokens = 2500 # Increased for longer papers
146
+ last_update_time = time.time()
147
+
148
+ try:
149
+ for message in client.chat_completion(
150
+ messages=messages,
151
+ model="meta-llama/Llama-3.2-3B-Instruct",
152
+ max_tokens=max_tokens,
153
+ temperature=0.7,
154
+ stream=True,
155
+ timeout=120, # 2 minute timeout per request
156
+ ):
157
+ # Check total timeout
158
+ if time.time() - start_time > 300: # 5 minute total timeout
159
+ return f"⏱️ Request timeout. Please try with:\n- Fewer PDF files\n- Fewer questions\n- Fewer sets\n\nPartial output:\n{response}"
160
+
161
+ if hasattr(message, 'choices') and len(message.choices) > 0:
162
+ if hasattr(message.choices[0], 'delta') and hasattr(message.choices[0].delta, 'content'):
163
+ response += message.choices[0].delta.content or ""
164
+ token_count += 1
165
+
166
+ # Update progress every 50 tokens to reduce overhead
167
+ if token_count % 50 == 0 or time.time() - last_update_time > 2:
168
+ # Calculate progress within this set (70-95% range divided by number of sets)
169
+ set_start = 0.70 + (set_num - 1) * 0.30 / num_sets
170
+ set_range = 0.25 / num_sets # 25% of total progress for generation
171
+ generation_progress = min((token_count / max_tokens), 1.0)
172
+ current_progress = set_start + (generation_progress * set_range)
173
+ percentage = int(generation_progress * 100)
174
+
175
+ # Update with dynamic percentage
176
+ progress(current_progress,
177
+ desc=f"✍️ Generating Question Paper Set {set_num}/{num_sets}... {percentage}%")
178
+ last_update_time = time.time()
179
 
180
+ except Exception as e:
181
+ if response:
182
+ return f"⚠️ Generation interrupted: {str(e)}\n\nPartial output for Set {set_num}:\n{response}"
183
+ else:
184
+ raise e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
  progress(0.70 + set_num * 0.30 / num_sets,
187
  desc=f"βœ… Set {set_num}/{num_sets} generated successfully!")
 
268
  """)
269
 
270
  with gr.Column(scale=2):
271
+ output = gr.Markdown(
272
+ label="Generated Question Paper(s)",
273
+ value="πŸ‘‹ Upload PDF files and configure settings to generate question papers..."
274
+ )
275
 
276
  btn.click(
277
  fn=generate_question_paper,
 
282
  long_difficulty, long_count,
283
  num_sets
284
  ],
285
+ outputs=output,
286
+ show_progress="full"
287
  )
288
 
289
  gr.Markdown("""
290
+ ---
291
  **Features:**
292
  - βœ… Multiple PDF support (up to 5 files)
293
  - βœ… Separate difficulty control for each question type
294
  - βœ… Customizable question count per section
295
  - βœ… Generate 1-3 unique question paper sets
296
  - βœ… Automatic answer key generation for MCQs
297
+ - βœ… Queue system for concurrent users
298
+
299
+ **Performance Tips:**
300
+ - For faster results: Use 1-2 PDFs, fewer questions, single set
301
+ - If timeout occurs: Reduce number of questions or sets
302
+ - Queue position will be shown when multiple users are active
303
  """)
304
 
305
  if __name__ == "__main__":
306
+ demo.queue(
307
+ max_size=20, # Maximum queue size
308
+ default_concurrency_limit=2 # Allow 2 concurrent users
309
+ )
310
+ demo.launch(
311
+ show_error=True,
312
+ share=False
313
+ )