retvq commited on
Commit
c43d61a
Β·
verified Β·
1 Parent(s): 7ef2d1a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -27
app.py CHANGED
@@ -4,23 +4,17 @@ from langchain_community.document_loaders import PyPDFLoader
4
  from langchain_text_splitters import RecursiveCharacterTextSplitter
5
  from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
6
  from langchain_community.vectorstores import FAISS
7
- from langchain_huggingface import HuggingFaceEndpoint
8
  from langchain_core.prompts import ChatPromptTemplate
9
 
10
- # --- 1. Model Setup using HF Inference API ---
11
- # Get the HF token from environment variables (set in Space secrets)
12
  HF_TOKEN = os.environ.get("HF_TOKEN", "")
13
 
14
  if not HF_TOKEN:
15
  print("⚠️ Warning: HF_TOKEN not set. The app may not work properly.")
16
- print("Please add your Hugging Face token in Space Settings > Repository secrets")
17
 
18
- llm = HuggingFaceEndpoint(
19
- repo_id="meta-llama/Meta-Llama-3-8B-Instruct",
20
- temperature=0.7,
21
- max_new_tokens=2000,
22
- huggingfacehub_api_token=HF_TOKEN
23
- )
24
 
25
  # --- 2. The Core Logic ---
26
  def generate_question_paper(pdf_file, difficulty, num_questions):
@@ -54,11 +48,11 @@ def generate_question_paper(pdf_file, difficulty, num_questions):
54
  context_docs = retriever.invoke("Key concepts and definitions")
55
  context_text = "\n\n".join([doc.page_content for doc in context_docs])
56
 
57
- # E. Prompt
58
- template = """You are an expert academic examiner. Create a formal Question Paper based ONLY on the context provided below.
59
 
60
  CONTEXT:
61
- {context}
62
 
63
  INSTRUCTIONS:
64
  - Difficulty: {difficulty}
@@ -71,25 +65,23 @@ INSTRUCTIONS:
71
 
72
  Do not output conversational text. Output ONLY the exam paper in a well-formatted structure."""
73
 
74
- prompt = ChatPromptTemplate.from_template(template)
75
-
76
- # F. Generate
77
- chain = prompt | llm
78
- response = chain.invoke({
79
- "context": context_text,
80
- "difficulty": difficulty,
81
- "num_questions": num_questions
82
- })
83
 
84
  return response
85
 
86
  except Exception as e:
87
- return f"❌ Error processing PDF: {str(e)}\n\nPlease check:\n1. PDF is valid and contains text\n2. HF_TOKEN is correctly set\n3. You have access to Llama 3"
88
 
89
  # --- 3. The UI ---
90
- with gr.Blocks(title="AI Question Paper Generator") as demo:
91
  gr.Markdown("# πŸ“„ AI Question Paper Generator")
92
- gr.Markdown("Powered by **Llama 3 (8B)** via Hugging Face Inference API")
93
  gr.Markdown("⚑ Fast β€’ 🎯 Accurate β€’ πŸ“š Context-Aware")
94
 
95
  with gr.Row():
@@ -131,8 +123,8 @@ with gr.Blocks(title="AI Question Paper Generator") as demo:
131
 
132
  gr.Markdown("""
133
  ---
134
- **Note:** This app requires a Hugging Face token with access to Llama 3.
135
- Set `HF_TOKEN` in your Space's repository secrets.
136
  """)
137
 
138
  if __name__ == "__main__":
 
4
  from langchain_text_splitters import RecursiveCharacterTextSplitter
5
  from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
6
  from langchain_community.vectorstores import FAISS
7
+ from huggingface_hub import InferenceClient
8
  from langchain_core.prompts import ChatPromptTemplate
9
 
10
+ # --- 1. Model Setup using HF Inference Client ---
 
11
  HF_TOKEN = os.environ.get("HF_TOKEN", "")
12
 
13
  if not HF_TOKEN:
14
  print("⚠️ Warning: HF_TOKEN not set. The app may not work properly.")
 
15
 
16
+ # Use InferenceClient directly instead of LangChain wrapper
17
+ client = InferenceClient(token=HF_TOKEN)
 
 
 
 
18
 
19
  # --- 2. The Core Logic ---
20
  def generate_question_paper(pdf_file, difficulty, num_questions):
 
48
  context_docs = retriever.invoke("Key concepts and definitions")
49
  context_text = "\n\n".join([doc.page_content for doc in context_docs])
50
 
51
+ # E. Create Prompt
52
+ prompt = f"""You are an expert academic examiner. Create a formal Question Paper based ONLY on the context provided below.
53
 
54
  CONTEXT:
55
+ {context_text}
56
 
57
  INSTRUCTIONS:
58
  - Difficulty: {difficulty}
 
65
 
66
  Do not output conversational text. Output ONLY the exam paper in a well-formatted structure."""
67
 
68
+ # F. Generate using Mistral (no license needed and works better)
69
+ response = client.text_generation(
70
+ prompt,
71
+ model="mistralai/Mistral-7B-Instruct-v0.3",
72
+ max_new_tokens=2000,
73
+ temperature=0.7,
74
+ )
 
 
75
 
76
  return response
77
 
78
  except Exception as e:
79
+ return f"❌ Error: {str(e)}\n\nPlease check:\n1. PDF is valid and contains text\n2. HF_TOKEN is correctly set in Space secrets\n3. Try again or contact support"
80
 
81
  # --- 3. The UI ---
82
+ with gr.Blocks(title="AI Question Paper Generator", theme=gr.themes.Soft(primary_hue="blue")) as demo:
83
  gr.Markdown("# πŸ“„ AI Question Paper Generator")
84
+ gr.Markdown("Powered by **Mistral 7B** via Hugging Face Inference API")
85
  gr.Markdown("⚑ Fast β€’ 🎯 Accurate β€’ πŸ“š Context-Aware")
86
 
87
  with gr.Row():
 
123
 
124
  gr.Markdown("""
125
  ---
126
+ **Note:** Set `HF_TOKEN` in your Space's Settings β†’ Repository secrets.
127
+ Get your token from https://huggingface.co/settings/tokens
128
  """)
129
 
130
  if __name__ == "__main__":