retvq commited on
Commit
0a1d5c9
Β·
verified Β·
1 Parent(s): 5132517

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -3
app.py CHANGED
@@ -17,7 +17,7 @@ if not HF_TOKEN:
17
  client = InferenceClient(token=HF_TOKEN)
18
 
19
  # --- 2. The Core Logic ---
20
- def generate_question_paper(pdf_file, difficulty, num_questions):
21
  if not pdf_file:
22
  return "❌ Please upload a PDF file first."
23
 
@@ -26,27 +26,34 @@ def generate_question_paper(pdf_file, difficulty, num_questions):
26
 
27
  try:
28
  # A. Load PDF
 
29
  loader = PyPDFLoader(pdf_file.name)
30
  pages = loader.load()
31
 
32
  if not pages:
33
  return "❌ Error: Could not extract text from PDF. Please ensure it's a valid PDF with text content."
34
 
 
 
35
  # B. Split Text
36
  text_splitter = RecursiveCharacterTextSplitter(
37
  chunk_size=1000,
38
  chunk_overlap=100
39
  )
40
  chunks = text_splitter.split_documents(pages)
 
41
 
42
  # C. Vector Store (FAISS)
43
  embeddings = FastEmbedEmbeddings()
 
44
  vector_store = FAISS.from_documents(chunks, embeddings)
 
45
 
46
  # D. Retrieve Context
47
  retriever = vector_store.as_retriever(search_kwargs={"k": 7})
48
  context_docs = retriever.invoke("Key concepts and definitions")
49
  context_text = "\n\n".join([doc.page_content for doc in context_docs])
 
50
 
51
  # E. Create Prompt
52
  prompt = f"""You are an expert academic examiner. Create a formal Question Paper based ONLY on the context provided below.
@@ -66,9 +73,11 @@ INSTRUCTIONS:
66
  Do not output conversational text. Output ONLY the exam paper in a well-formatted structure."""
67
 
68
  # F. Generate using chat completion with a supported model
 
69
  messages = [{"role": "user", "content": prompt}]
70
 
71
  response = ""
 
72
  for message in client.chat_completion(
73
  messages=messages,
74
  model="meta-llama/Llama-3.2-3B-Instruct",
@@ -79,16 +88,21 @@ Do not output conversational text. Output ONLY the exam paper in a well-formatte
79
  if hasattr(message, 'choices') and len(message.choices) > 0:
80
  if hasattr(message.choices[0], 'delta') and hasattr(message.choices[0].delta, 'content'):
81
  response += message.choices[0].delta.content or ""
 
 
 
 
82
 
 
83
  return response
84
 
85
  except Exception as e:
86
  return f"❌ Error: {str(e)}\n\nPlease check:\n1. PDF is valid and contains text\n2. HF_TOKEN is correctly set in Space secrets\n3. Try again or contact support"
87
 
88
  # --- 3. The UI ---
89
- with gr.Blocks(title="AI Question Paper Generator") as demo:
90
  gr.Markdown("# πŸ“„ AI Question Paper Generator")
91
- gr.Markdown("Powered by **Fine Tuned Llama 3.2 3B**")
92
  gr.Markdown("⚑ Fast β€’ 🎯 Accurate β€’ πŸ“š Context-Aware")
93
 
94
  with gr.Row():
@@ -128,5 +142,11 @@ with gr.Blocks(title="AI Question Paper Generator") as demo:
128
  outputs=output
129
  )
130
 
 
 
 
 
 
 
131
  if __name__ == "__main__":
132
  demo.launch()
 
17
  client = InferenceClient(token=HF_TOKEN)
18
 
19
  # --- 2. The Core Logic ---
20
+ def generate_question_paper(pdf_file, difficulty, num_questions, progress=gr.Progress()):
21
  if not pdf_file:
22
  return "❌ Please upload a PDF file first."
23
 
 
26
 
27
  try:
28
  # A. Load PDF
29
+ progress(0, desc="πŸ“„ PDF file uploaded, accessing file...")
30
  loader = PyPDFLoader(pdf_file.name)
31
  pages = loader.load()
32
 
33
  if not pages:
34
  return "❌ Error: Could not extract text from PDF. Please ensure it's a valid PDF with text content."
35
 
36
+ progress(0.15, desc="βœ… PDF loaded successfully, extracting text...")
37
+
38
  # B. Split Text
39
  text_splitter = RecursiveCharacterTextSplitter(
40
  chunk_size=1000,
41
  chunk_overlap=100
42
  )
43
  chunks = text_splitter.split_documents(pages)
44
+ progress(0.3, desc="πŸ“ Text extracted, preparing embeddings...")
45
 
46
  # C. Vector Store (FAISS)
47
  embeddings = FastEmbedEmbeddings()
48
+ progress(0.4, desc="🧠 Creating knowledge base...")
49
  vector_store = FAISS.from_documents(chunks, embeddings)
50
+ progress(0.5, desc="βœ… Knowledge base ready, analyzing content...")
51
 
52
  # D. Retrieve Context
53
  retriever = vector_store.as_retriever(search_kwargs={"k": 7})
54
  context_docs = retriever.invoke("Key concepts and definitions")
55
  context_text = "\n\n".join([doc.page_content for doc in context_docs])
56
+ progress(0.6, desc="🎯 Key concepts identified, activating AI model...")
57
 
58
  # E. Create Prompt
59
  prompt = f"""You are an expert academic examiner. Create a formal Question Paper based ONLY on the context provided below.
 
73
  Do not output conversational text. Output ONLY the exam paper in a well-formatted structure."""
74
 
75
  # F. Generate using chat completion with a supported model
76
+ progress(0.7, desc="πŸ€– AI model activated, generating questions...")
77
  messages = [{"role": "user", "content": prompt}]
78
 
79
  response = ""
80
+ token_count = 0
81
  for message in client.chat_completion(
82
  messages=messages,
83
  model="meta-llama/Llama-3.2-3B-Instruct",
 
88
  if hasattr(message, 'choices') and len(message.choices) > 0:
89
  if hasattr(message.choices[0], 'delta') and hasattr(message.choices[0].delta, 'content'):
90
  response += message.choices[0].delta.content or ""
91
+ token_count += 1
92
+ # Update progress from 70% to 95% during generation
93
+ progress_val = min(0.7 + (token_count / 2000) * 0.25, 0.95)
94
+ progress(progress_val, desc=f"✍️ Generating question paper... {int((progress_val - 0.7) / 0.25 * 100)}%")
95
 
96
+ progress(1.0, desc="βœ… Question paper generated successfully!")
97
  return response
98
 
99
  except Exception as e:
100
  return f"❌ Error: {str(e)}\n\nPlease check:\n1. PDF is valid and contains text\n2. HF_TOKEN is correctly set in Space secrets\n3. Try again or contact support"
101
 
102
  # --- 3. The UI ---
103
+ with gr.Blocks(title="AI Question Paper Generator", theme=gr.themes.Soft(primary_hue="blue")) as demo:
104
  gr.Markdown("# πŸ“„ AI Question Paper Generator")
105
+ gr.Markdown("Powered by **Llama 3.2 3B** via Hugging Face Inference API")
106
  gr.Markdown("⚑ Fast β€’ 🎯 Accurate β€’ πŸ“š Context-Aware")
107
 
108
  with gr.Row():
 
142
  outputs=output
143
  )
144
 
145
+ gr.Markdown("""
146
+ ---
147
+ **Note:** Set `HF_TOKEN` in your Space's Settings β†’ Repository secrets.
148
+ Get your token from https://huggingface.co/settings/tokens
149
+ """)
150
+
151
  if __name__ == "__main__":
152
  demo.launch()