Spaces:
Sleeping
Sleeping
File size: 42,584 Bytes
cf02b2b a2ca191 cf02b2b 4a1bc78 4df145c 4a1bc78 4df145c 82583bd 4df145c 82583bd 4df145c 82583bd 4df145c 82583bd 4df145c 82583bd 4df145c 82583bd 4df145c 82583bd 4df145c 82583bd 4a1bc78 82583bd 4df145c 82583bd 4df145c cf02b2b f9d3d0b cf02b2b a2ca191 cf02b2b a2ca191 f9d3d0b 8531433 a678780 8531433 a678780 4a1bc78 a678780 ecd279c a678780 4a1bc78 8531433 afe5327 f9d3d0b 8531433 82583bd f9d3d0b 82583bd f9d3d0b 8531433 f9d3d0b 82583bd 852aa39 8531433 2b5cde1 8531433 f9d3d0b 8531433 2b5cde1 8531433 2b5cde1 a678780 2b5cde1 60ce103 2b5cde1 e0ad8eb 2b5cde1 60ce103 2b5cde1 8531433 2b5cde1 ecd279c 2b5cde1 8531433 ecd279c 8531433 2b5cde1 8531433 2b5cde1 8531433 2b5cde1 8531433 2b5cde1 8531433 2b5cde1 061a93c 4a1bc78 f9d3d0b 061a93c f9d3d0b cf02b2b f9d3d0b cf02b2b f9d3d0b cf02b2b f9d3d0b cf02b2b f9d3d0b cf02b2b f9d3d0b cf02b2b 061a93c cf02b2b 061a93c f9d3d0b cf02b2b 5d371c7 cf02b2b 5d371c7 7807a49 3df14a5 5d371c7 3df14a5 5d371c7 3df14a5 5d371c7 3df14a5 5d371c7 3df14a5 5d371c7 3df14a5 7807a49 5d371c7 cf02b2b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 |
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_core.tools import tool
from config import EMBEDDING_MODEL_NAME
from langchain_core.runnables import RunnableConfig
from typing import List, Dict, Any
from lancedb_service import lancedb_service
from scenario_analysis_service import scenario_service
from enhanced_search_service import enhanced_search_service
import logging
import json
import asyncio
logger = logging.getLogger("voicebot")
def generate_role_based_checklist(query: str, content: str) -> list:
"""Generate role-specific checklists based on query and content"""
query_lower = query.lower()
content_lower = content.lower() if content else ""
# Pension Beneficiaries & Claimants
if any(phrase in query_lower for phrase in ['pension eligibility', 'pension documents', 'how to apply pension', 'pension application']):
return [
"Verify service eligibility (minimum 10 years qualifying service)",
"Gather required documents (service book, PPO, identity proof)",
"Check for any departmental proceedings or vigilance cases",
"Apply through proper channel 6 months before retirement",
"Follow up with pension disbursing authority for processing"
]
elif any(phrase in query_lower for phrase in ['family pension', 'widow pension', 'dependent pension']):
return [
"Obtain death certificate and service documents of deceased employee",
"Submit family pension application with nominee details",
"Provide proof of relationship and dependency",
"Get certificate from employer about last drawn salary",
"Register with pension disbursing bank for regular payments"
]
elif 'pension calculation' in query_lower or 'pension amount' in query_lower:
return [
"Collect last pay certificate with basic pay and DA details",
"Calculate qualifying service excluding breaks/suspensions",
"Apply pension formula: (Last pay Γ service years) Γ· 70",
"Check for minimum pension ceiling and DA applicability",
"Verify commutation options if considering lump sum"
]
# Procurement Officers & Bidders
elif any(phrase in query_lower for phrase in ['tender process', 'bid submission', 'procurement threshold']):
return [
"Verify procurement threshold limits and delegation of powers",
"Check MSME purchase preference and price benefits applicable",
"Ensure technical specifications are non-discriminatory",
"Follow mandatory e-procurement process through GeM/portal",
"Maintain proper documentation for audit trail"
]
elif any(phrase in query_lower for phrase in ['msme relaxation', 'msme benefits']):
return [
"Verify MSME registration certificate validity",
"Apply 15% price preference for MSME quotes",
"Check exemption from EMD (Earnest Money Deposit)",
"Ensure MSME gets advance payment facility if applicable",
"Follow tender splitting norms for MSME participation"
]
elif any(phrase in query_lower for phrase in ['gem portal', 'vendor registration']):
return [
"Complete vendor registration on Government e-Marketplace",
"Upload all required business documents and certificates",
"Get product/service catalog approved by GeM",
"Maintain competitive pricing and service ratings",
"Respond promptly to buyer inquiries and orders"
]
# Finance Staff
elif any(phrase in query_lower for phrase in ['sanctioning authority', 'financial approval', 'expenditure sanction']):
return [
"Verify delegated financial powers and approval limits",
"Check budget provision and availability of funds",
"Ensure compliance with financial rules and procedures",
"Obtain necessary pre-audit clearance if required",
"Maintain proper accounting and audit trail"
]
elif any(phrase in query_lower for phrase in ['budget allocation', 'fund release']):
return [
"Verify budget allocation in approved estimates",
"Check fund availability in treasury/bank account",
"Ensure proper budget head classification",
"Follow fund release schedule and priority guidelines",
"Update budget utilization registers promptly"
]
elif any(phrase in query_lower for phrase in ['audit compliance', 'financial audit']):
return [
"Maintain all vouchers and supporting documents",
"Ensure transactions are recorded in proper registers",
"Respond to audit queries within stipulated time",
"Implement audit recommendations and report compliance",
"Conduct internal audit and review before external audit"
]
# Leadership & Policymakers
elif any(phrase in query_lower for phrase in ['policy impact', 'scenario analysis']):
return [
"Gather baseline data and impact measurement parameters",
"Conduct stakeholder consultation and feedback analysis",
"Prepare cost-benefit analysis for different scenarios",
"Assess implementation feasibility and resource requirements",
"Develop monitoring and evaluation framework"
]
elif any(phrase in query_lower for phrase in ['evidence pack', 'policy brief']):
return [
"Compile relevant policy documents and legal framework",
"Gather statistical data and trend analysis",
"Include comparative analysis from other states/countries",
"Prepare executive summary with key recommendations",
"Ensure all sources are cited and verifiable"
]
# General categories with enhanced checklists
elif "pension" in query_lower:
return [
"Check eligibility criteria and service requirements",
"Collect required documents (service book, PPO, ID proof)",
"Obtain necessary approvals and clearances",
"Submit application through proper channel",
"Follow up with pension office for processing status"
]
elif any(word in query_lower for word in ["procurement", "tender", "bid"]):
return [
"Review procurement guidelines and threshold limits",
"Check MSME relaxations and price preferences",
"Prepare comprehensive bid documents",
"Ensure compliance with technical specifications",
"Submit bid through approved e-procurement platform"
]
elif any(word in query_lower for word in ["finance", "budget", "expenditure"]):
return [
"Verify financial delegation and approval limits",
"Check budget provision and fund availability",
"Ensure compliance with treasury and accounting rules",
"Maintain proper documentation for audit",
"Update financial registers and reports"
]
elif "leave" in query_lower:
return [
"Check leave balance and entitlement",
"Follow prescribed application procedure",
"Obtain necessary approvals from competent authority",
"Arrange work coverage during leave period",
"Update attendance records upon return"
]
else:
return [
"Review relevant policy guidelines and procedures",
"Consult with appropriate authorities if needed",
"Ensure compliance with applicable rules",
"Maintain proper documentation",
"Seek clarification for any doubts"
]
# Fallback content for when database is empty
FALLBACK_CONTENT = {
"pension": """Pension is a regular payment made during a person's retirement from an investment fund. For government employees in India, pension includes:
1. Basic Pension: Calculated based on last drawn salary and years of service
2. Dearness Relief (DR): Additional amount to counter inflation
3. Medical Benefits: Healthcare coverage post-retirement
4. Family Pension: Benefits for family members
Key features:
- Minimum 10 years service for qualification
- Monthly payment to retired employees
- Pension amount revised periodically for inflation""",
"da_increment": """Dearness Allowance (DA) is paid to government employees to offset inflation impact.
DA 6% Increment Impact:
- DA revised twice yearly (January and July)
- Based on Consumer Price Index (AICPI)
- 6% increase adds significant monthly income
- Example: βΉ50,000 basic salary gets βΉ3,000 additional per month
- Pensioners get corresponding Dearness Relief increase
- Applicable across all government pay scales""",
"rajasthan": """Rajasthan government employees have comprehensive retirement benefits:
1. Old Pension Scheme (OPS): Restored for all employees in 2022
- 50% of last drawn salary after 33 years
- Family pension available
2. Pension Processing:
- Apply 6 months before retirement
- 3-6 months processing time
- Monthly credit via NEFT
3. Benefits include pension, gratuity, and provident fund
4. Enhanced benefits for teachers and staff""",
"procurement": """Government Procurement Guidelines:
1. Threshold Limits:
- Goods: βΉ25,000 to βΉ25 lakh (departmental purchase committee)
- Works: βΉ1 lakh to βΉ5 crore (various committees)
- Services: As per delegation of powers
2. MSME Benefits:
- 15% price preference in competitive bids
- Exemption from EMD (Earnest Money Deposit)
- No tender fee for MSME enterprises
- Advance payment facility available
3. GeM Portal Usage:
- Mandatory for central government purchases
- Direct purchase up to βΉ5 lakh
- Rate contract for common items""",
"finance": """Financial Management Guidelines:
1. Sanctioning Authority:
- As per delegation of financial powers
- Budget provision must be available
- Pre-audit clearance where required
2. Documentation:
- All expenditure must have proper vouchers
- Budget registers to be maintained
- Audit trail for all transactions
3. Treasury Rules:
- Follow prescribed payment procedures
- Maintain cash book and other registers
- Submit periodic returns and statements"""
}
def get_fallback_content(query: str) -> List[Dict[str, Any]]:
"""Return fallback content when database search fails"""
query_lower = query.lower()
results = []
if any(word in query_lower for word in ["pension", "retirement"]):
content = FALLBACK_CONTENT["pension"]
results.append({
"clause_text": content,
"summary": "Government pension benefits including basic pension, dearness relief, and medical benefits for retired employees.",
"role_checklist": [
"Check eligibility (minimum 10 years service)",
"Gather required documents (service book, ID proof)",
"Apply 6 months before retirement",
"Follow up with pension office for processing"
],
"source_title": "Government Pension Guide",
"clause_id": "PENSION_001",
"date": "2024",
"url": "",
"score": 0.9
})
if any(word in query_lower for word in ["da", "dearness", "allowance", "increment", "6%"]):
content = FALLBACK_CONTENT["da_increment"]
results.append({
"clause_text": content,
"summary": "Dearness Allowance is revised twice yearly to offset inflation impact on government employee salaries.",
"role_checklist": [
"Check current DA percentage rates",
"Calculate impact on monthly salary",
"Verify automatic application in pay slip",
"Understand revision schedule (January & July)"
],
"source_title": "DA Increment Guidelines",
"clause_id": "DA_001",
"date": "2024",
"url": "",
"score": 0.9
})
if any(word in query_lower for word in ["rajasthan", "state"]):
content = FALLBACK_CONTENT["rajasthan"]
results.append({
"clause_text": content,
"summary": "Rajasthan government restored Old Pension Scheme (OPS) in 2022 with enhanced retirement benefits.",
"role_checklist": [
"Verify OPS eligibility and coverage",
"Apply 6 months before retirement date",
"Collect all required service documents",
"Track processing status through pension office"
],
"source_title": "Rajasthan Pension Rules",
"clause_id": "RAJ_001",
"date": "2024",
"url": "",
"score": 0.9
})
# If no specific match, return query-relevant response
if not results:
# Generate a more intelligent fallback based on the query
if any(word in query_lower for word in ["salary", "pay", "increment", "grade"]):
fallback_text = f"I understand you're asking about '{query}'. While I don't have specific documents loaded for this query, government pay and salary matters typically involve pay commission recommendations, grade pay structures, and periodic revisions. For accurate information about your specific query, please consult the latest government circulars or contact your administrative office."
checklist = [
"Check latest pay commission guidelines",
"Consult current government circulars",
"Contact administrative/accounts office",
"Verify with official government portals"
]
elif any(word in query_lower for word in ["leave", "holiday", "casual", "earned"]):
fallback_text = f"Regarding your query about '{query}', government leave rules typically cover casual leave, earned leave, medical leave, and other statutory leaves. Each type has specific eligibility criteria and application procedures."
checklist = [
"Check leave balance and entitlement",
"Follow proper application procedure",
"Obtain necessary approvals",
"Maintain leave records"
]
elif any(word in query_lower for word in ["audit", "financial", "budget", "expenditure", "accounts"]):
fallback_text = f"Regarding your query about '{query}', government financial audits and accounts are typically maintained at departmental and central levels. Financial audits cover budget utilization, expenditure patterns, and compliance with financial rules. For specific audit reports, you would need to access official government finance portals or contact the concerned audit department."
checklist = [
"Contact Controller and Auditor General (CAG) office",
"Check government finance portals for audit reports",
"Request specific financial year audit documents",
"Verify with concerned department's accounts section"
]
elif any(word in query_lower for word in ["training", "development", "skill", "course"]):
fallback_text = f"Regarding your query about '{query}', government training and development programs are designed to enhance employee capabilities. These include induction training, skill development courses, leadership programs, and specialized technical training through various government training institutes."
checklist = [
"Check available training programs in your department",
"Contact training institutes for course details",
"Apply for relevant skill development programs",
"Utilize online learning platforms like iGOT Karmayogi"
]
else:
fallback_text = f"I understand you're asking about '{query}'. While I don't have specific documents loaded for this query, I can help with government policies, pension rules, allowances, and administrative procedures. Please try rephrasing your question or ask about specific government benefits."
checklist = [
"Clarify your specific query area",
"Check if documents are available in system",
"Contact relevant government office",
"Try alternative search terms"
]
results.append({
"clause_text": fallback_text,
"summary": f"General guidance for query: {query}",
"role_checklist": checklist,
"source_title": "Voice Bot Assistant",
"clause_id": "ASSIST_001",
"date": "2024",
"url": "",
"score": 0.7
})
return results[:2] # Return max 2 fallback documents
# Setup embedding model
embedding_model = HuggingFaceEmbeddings(
model_name=EMBEDDING_MODEL_NAME,
model_kwargs={
"device": "cpu",
"trust_remote_code": True
},
encode_kwargs={
"normalize_embeddings": True
}
)
async def get_user_knowledge_bases(userid: str) -> List[str]:
"""Get all knowledge bases for a user"""
try:
return await lancedb_service.get_user_knowledge_bases(userid)
except Exception as e:
logger.error(f"β Error fetching knowledge bases: {e}")
return []
async def get_kb_documents(user_id: str, kb_name: str):
"""Get all documents in a knowledge base"""
try:
return await lancedb_service.get_kb_documents(user_id, kb_name)
except Exception as e:
logger.error(f"β Error fetching documents: {e}")
return []
async def delete_document_from_kb(user_id: str, kb_name: str, filename: str):
"""Delete a document from knowledge base"""
try:
return await lancedb_service.delete_document_from_kb(user_id, kb_name, filename)
except Exception as e:
logger.error(f"β Error deleting document: {e}")
return False
async def search_documents_async(query: str, limit: int = 5) -> List[Dict[str, Any]]:
"""
Enhanced async search for documents in government knowledge base (1500+ docs).
Uses advanced search strategies to find the most relevant documents.
Returns a list of documents with content for compatibility with existing code.
"""
try:
# Use enhanced search service for better results with large document collections
logger.info(f"π Enhanced search for: '{query}' (limit: {limit})")
# First try enhanced search (specifically good for pension queries)
results = await enhanced_search_service.search_with_fallback(query, limit)
if results:
logger.info(f"β
Enhanced search found {len(results)} documents")
return results
# Fallback to original logic with enhanced query
knowledge_bases = ["government_docs"] # Default
query_lower = query.lower()
# Enhance query for better relevance based on category
enhanced_query = query
# Role-specific query enhancement
# Pension Beneficiary queries
if any(word in query_lower for word in ["pension eligibility", "pension documents", "how to apply pension", "pension certificate"]):
enhanced_query = f"{query} pension eligibility documents application process beneficiary requirements"
elif any(word in query_lower for word in ["family pension", "widow pension", "dependent pension"]):
enhanced_query = f"{query} family pension eligibility widow dependent benefits"
elif any(word in query_lower for word in ["pension calculation", "pension amount", "pension formula"]):
enhanced_query = f"{query} pension calculation formula amount computation service years"
# Procurement Officer queries
elif any(word in query_lower for word in ["tender process", "bid submission", "procurement threshold"]):
enhanced_query = f"{query} procurement tender bidding process thresholds guidelines"
elif any(word in query_lower for word in ["msme relaxation", "msme benefits", "small scale industry"]):
enhanced_query = f"{query} msme relaxation benefits procurement small scale industry"
elif any(word in query_lower for word in ["gem portal", "vendor registration", "vendor empanelment"]):
enhanced_query = f"{query} gem portal vendor registration empanelment process"
# Finance Staff queries
elif any(word in query_lower for word in ["sanctioning authority", "financial approval", "expenditure sanction"]):
enhanced_query = f"{query} sanctioning authority financial approval expenditure delegation"
elif any(word in query_lower for word in ["budget allocation", "fund release", "treasury"]):
enhanced_query = f"{query} budget allocation fund release treasury rules procedures"
elif any(word in query_lower for word in ["audit compliance", "financial audit", "audit report"]):
enhanced_query = f"{query} audit compliance financial audit reporting procedures"
# Leadership/Policymaker queries
elif any(word in query_lower for word in ["policy impact", "scenario analysis", "comparative analysis"]):
enhanced_query = f"{query} policy impact scenario analysis comparison evidence"
elif any(word in query_lower for word in ["evidence pack", "policy brief", "decision support"]):
enhanced_query = f"{query} evidence pack policy brief decision support documentation"
# General category queries
elif "pension" in query_lower:
if any(word in query_lower for word in ["changes", "impact", "rules"]):
enhanced_query = f"{query} pension rules retirement benefits modifications"
elif "calculation" in query_lower or "formula" in query_lower:
enhanced_query = f"{query} pension calculation retirement benefits formula"
elif any(word in query_lower for word in ["old age", "elderly", "senior", "old"]):
enhanced_query = f"{query} pension retirement benefits elderly old age senior citizen"
# Leave queries
elif any(word in query_lower for word in ["leave", "casual", "earned"]):
enhanced_query = f"{query} leave rules entitlement policy"
# Allowance queries
elif any(word in query_lower for word in ["allowance", "da", "dearness"]):
enhanced_query = f"{query} allowance rates dearness increment"
# Procurement queries
elif any(word in query_lower for word in ["tender", "procurement", "bid"]):
enhanced_query = f"{query} procurement tender bidding process"
# Medical queries
elif any(word in query_lower for word in ["medical", "health", "reimbursement"]):
enhanced_query = f"{query} medical health reimbursement cghs"
# Transfer queries
elif any(word in query_lower for word in ["transfer", "posting"]):
enhanced_query = f"{query} transfer posting policy rules"
# Audit and financial queries
elif any(word in query_lower for word in ["audit", "financial", "budget", "expenditure", "accounts", "finance"]):
enhanced_query = f"{query} audit financial budget expenditure accounts"
# Training and development queries
elif any(word in query_lower for word in ["training", "development", "course", "skill"]):
enhanced_query = f"{query} training development skill course capacity building"
# Salary and pay queries
elif any(word in query_lower for word in ["salary", "pay", "grade", "scale"]):
enhanced_query = f"{query} salary pay grade scale compensation"
logger.info(f"π Enhanced query: '{enhanced_query}' (original: '{query}')")
# Temporarily disable Rajasthan documents table due to vector dimension mismatch
# if any(keyword in query_lower for keyword in ["rajasthan", "pension", "circular", "pay", "rules"]):
# # Use separate table for Rajasthan documents
# return await search_rajasthan_documents_async(query, limit)
all_docs = []
for kb in knowledge_bases:
try:
logger.info(f"π Searching in knowledge base: {kb} for query: '{enhanced_query}'")
docs = await lancedb_service.similarity_search(enhanced_query, "system", kb, k=limit*2) # Get more docs for filtering
if docs:
logger.info(f"β
Found {len(docs)} documents in {kb}")
all_docs.extend(docs)
else:
logger.warning(f"β οΈ No documents found in knowledge base {kb}")
except Exception as e:
logger.error(f"β Search failed for knowledge base {kb}: {e}")
continue
if not all_docs:
logger.warning(f"π No documents found in database for query: '{query}', using fallback content")
return get_fallback_content(query)
# SIMPLIFIED: Trust the semantic search results - minimal filtering
filtered_docs = []
for doc in all_docs:
# Handle different document object types
if hasattr(doc, 'page_content'):
content = doc.page_content.lower()
metadata = getattr(doc, 'metadata', {})
else:
content = str(doc).lower()
metadata = {}
# Start with base relevance score from LanceDB
relevance_score = getattr(doc, 'score', 0.7) # Higher base score
# Simple keyword matching boost for government documents
government_keywords = ['pension', 'retirement', 'government', 'rules', 'policy', 'allowance', 'benefits', 'service']
query_keywords = query_lower.split()
# Boost score for keyword matches
for keyword in query_keywords:
if keyword in content:
relevance_score += 0.2 # Small boost per matching keyword
# Extra boost for exact government keyword matches
for gov_keyword in government_keywords:
if gov_keyword in query_lower and gov_keyword in content:
relevance_score += 0.3
# VERY PERMISSIVE THRESHOLD - trust the semantic search
threshold = -0.5 # Accept almost all documents returned by semantic search
if relevance_score > threshold:
logger.info(f"β
Document PASSED filter: score {relevance_score:.2f} > threshold {threshold}")
# Add relevance score to document
if hasattr(doc, 'metadata'):
doc.metadata['relevance_score'] = relevance_score
filtered_docs.append(doc)
else:
logger.info(f"β Document FAILED filter: score {relevance_score:.2f} <= threshold {threshold}")
# Remove duplicates based on content similarity first
unique_docs = []
seen_content = set()
for doc in filtered_docs:
content_hash = hash(doc.page_content[:200]) # Use first 200 chars as content signature
if content_hash not in seen_content:
seen_content.add(content_hash)
unique_docs.append(doc)
# Sort by relevance score and limit results
unique_docs = sorted(unique_docs, key=lambda x: getattr(x, 'metadata', {}).get('relevance_score', 0), reverse=True)[:limit]
if not unique_docs:
logger.warning(f"π No relevant documents found after filtering for query: '{query}', using fallback content")
return get_fallback_content(query)
logger.info(f"π Filtered to {len(unique_docs)} unique documents from {len(all_docs)} total (removed {len(filtered_docs) - len(unique_docs)} duplicates)")
results = []
for doc in unique_docs:
metadata = doc.metadata if hasattr(doc, 'metadata') else {}
clause_text = doc.page_content
# Simple extractive summary: first sentence or up to 2 lines
summary = clause_text.split(". ")[0][:180] + ("..." if len(clause_text) > 180 else "")
# Enhanced role-aware checklist logic
role_checklist = generate_role_based_checklist(query, clause_text)
results.append({
"clause_text": clause_text,
"summary": summary,
"role_checklist": role_checklist,
"source_title": metadata.get('title', metadata.get('source', 'Unknown')),
"clause_id": metadata.get('clause_id', ''),
"date": metadata.get('date', ''),
"url": metadata.get('url', ''),
"score": getattr(doc, 'score', 1.0)
})
logger.info(f"π Found {len(results)} documents for query: {query}")
return results
except Exception as e:
logger.error(f"β Error in search_documents_async: {e}")
return get_fallback_content(query)
async def search_rajasthan_documents_async(query: str, limit: int = 5) -> List[Dict[str, Any]]:
"""
Async search specifically in the Rajasthan documents table using direct LanceDB query.
"""
try:
import lancedb
db = lancedb.connect('./lancedb_data')
if 'rajasthan_documents' not in db.table_names():
logger.warning("β οΈ Rajasthan documents table not found")
return []
tbl = db.open_table('rajasthan_documents')
query_embedding = embedding_model.embed_query(query)
# LanceDB search is sync, so run in thread executor
import pandas as pd
import concurrent.futures
def run_search():
return tbl.search(query_embedding).limit(limit).to_pandas()
loop = asyncio.get_running_loop()
search_results = await loop.run_in_executor(None, run_search)
if search_results.empty:
logger.info(f"π No results found in Rajasthan documents for: {query}")
return get_fallback_content(query)
results = []
for _, row in search_results.iterrows():
clause_text = row['content']
summary = clause_text.split(". ")[0][:180] + ("..." if len(clause_text) > 180 else "")
role_checklist = []
query_lower = query.lower()
if "pension" in query_lower:
role_checklist = [
"Check eligibility (service years, misconduct)",
"Collect required documents (service book, ID, proof)",
"Obtain approvals (sanctioning authority)",
"Submit application to pension office"
]
elif "procurement" in query_lower or "bid" in query_lower:
role_checklist = [
"Review procurement thresholds and MSME relaxations",
"Prepare bid documents",
"Complete registration and approvals",
"Submit bid before deadline"
]
elif "finance" in query_lower:
role_checklist = [
"Check sanctioning steps",
"Update registers",
"Obtain necessary approvals",
"Notify stakeholders"
]
results.append({
"clause_text": clause_text,
"summary": summary,
"role_checklist": role_checklist,
"source_title": row.get('title', row.get('filename', 'Unknown')),
"clause_id": row.get('clause_id', ''),
"date": row.get('date', ''),
"url": row.get('url', ''),
"score": float(row.get('_distance', 1.0))
})
logger.info(f"π Found {len(results)} Rajasthan documents for query: {query}")
return results
except Exception as e:
logger.error(f"β Error searching Rajasthan documents: {e}")
return []
@tool
async def search_docs(query: str, config: RunnableConfig) -> str:
"""Search the knowledge base for relevant context within a specific knowledge base."""
userid = config["configurable"].get("thread_id")
knowledge_base = config["configurable"].get("knowledge_base", "government_docs")
try:
# Search in the specified knowledge base
import time
t0 = time.time()
docs = await lancedb_service.similarity_search(query, userid, knowledge_base)
t1 = time.time()
if docs:
# Advanced extractive summarization using NLTK
try:
import nltk
nltk.download('punkt', quiet=True)
from nltk.tokenize import sent_tokenize
except ImportError:
sent_tokenize = lambda x: x.split('.')
t2 = time.time()
# Embedding-based chunk selection
try:
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer('all-MiniLM-L6-v2')
except ImportError:
embedder = None
t3 = time.time()
def select_best_chunk(chunks, query):
if not embedder or not chunks:
return chunks[0] if chunks else ""
chunk_embeddings = embedder.encode(chunks)
query_embedding = embedder.encode([query])[0]
import numpy as np
scores = [np.dot(chunk_emb, query_embedding)/(np.linalg.norm(chunk_emb)*np.linalg.norm(query_embedding)) for chunk_emb in chunk_embeddings]
best_idx = int(np.argmax(scores))
return chunks[best_idx]
def extractive_summary(text, max_sentences=3):
sentences = sent_tokenize(text)
keywords = query.lower().split()
scored = [s for s in sentences if any(k in s.lower() for k in keywords)]
if scored:
return ' '.join(scored[:max_sentences])
return ' '.join(sentences[:max_sentences])
t4 = time.time()
compressed_contexts = []
for doc in docs:
if hasattr(doc, 'chunks') and doc.chunks:
best_chunk = select_best_chunk(doc.chunks, query)
summary = extractive_summary(best_chunk)
else:
paragraphs = doc.page_content.split('\n\n')
best_chunk = select_best_chunk(paragraphs, query) if paragraphs else doc.page_content
summary = extractive_summary(best_chunk)
compressed_contexts.append(summary)
t5 = time.time()
context = "\n\n".join(compressed_contexts)
t6 = time.time()
import logging
logging.info(f"[Latency] Document search: {t1-t0:.3f}s, NLTK setup: {t2-t1:.3f}s, Embedding setup: {t3-t2:.3f}s, Function setup: {t4-t3:.3f}s, Chunking/summarization: {t5-t4:.3f}s, Context join: {t6-t5:.3f}s, Total: {t6-t0:.3f}s")
return f"π Found {len(docs)} relevant documents (chunked & summarized):\n\n{context}"
else:
context = ""
t7 = time.time()
import logging
logging.info(f"[Latency] Document search: {t1-t0:.3f}s, No docs found, Total: {t7-t0:.3f}s")
return "No relevant documents found in the knowledge base."
except Exception as e:
logger.error(f"β Error searching documents: {e}")
return "Error occurred while searching documents."
@tool
async def search_government_docs(query: str, config: RunnableConfig) -> str:
"""Search government documents for relevant information and policies."""
try:
# Search specifically in government_docs knowledge base
docs = await lancedb_service.similarity_search(query, "system", "government_docs")
if not docs:
return "No relevant government documents found for your query."
context = "\n\n".join([doc.page_content for doc in docs])
sources = list(set([doc.metadata.get('source', 'Unknown') for doc in docs]))
result = f"π Found {len(docs)} relevant government documents:\n\n{context}"
if sources:
result += f"\n\nπ Sources: {', '.join(sources)}"
return result
except Exception as e:
logger.error(f"β Error searching government documents: {e}")
return "Error occurred while searching government documents."
@tool
async def analyze_scenario(scenario_query: str, config: RunnableConfig) -> str:
"""
Analyze government scenarios and create visualizations including charts, graphs, and diagrams.
Use this tool when users ask for scenario analysis, data visualization, charts, graphs, or diagrams
related to government processes, budgets, policies, organizational structures, or performance metrics.
Args:
scenario_query: Description of the scenario to analyze (e.g., "budget analysis for health department",
"policy implementation timeline", "organizational structure", "performance metrics")
"""
try:
logger.info(f"π Analyzing scenario: {scenario_query}")
# Parse the scenario query to determine type and extract data
scenario_data = await _parse_scenario_query(scenario_query)
# Perform scenario analysis
result = await scenario_service.analyze_government_scenario(scenario_data)
if result.get("success", False):
# Format response with images
response = f"π **Scenario Analysis Complete!**\n\n"
response += result.get("analysis", "")
response += f"\n\nπΌοΈ **Generated {len(result.get('images', []))} visualization(s)**"
# Add image information for frontend rendering
if result.get("images"):
response += "\n\n**SCENARIO_IMAGES_START**\n"
response += json.dumps(result["images"])
response += "\n**SCENARIO_IMAGES_END**"
return response
else:
return f"β Error in scenario analysis: {result.get('error', 'Unknown error')}"
except Exception as e:
logger.error(f"β Error in scenario analysis tool: {e}")
return f"Error occurred while analyzing scenario: {str(e)}"
async def _parse_scenario_query(query: str) -> Dict[str, Any]:
"""Parse scenario query to determine type and extract relevant data"""
query_lower = query.lower()
# Determine scenario type based on keywords
if any(word in query_lower for word in ["budget", "financial", "expenditure", "allocation", "funding"]):
scenario_type = "budget"
# Extract budget data if mentioned in query
data = _extract_budget_data(query)
elif any(word in query_lower for word in ["policy", "implementation", "timeline", "plan", "strategy"]):
scenario_type = "policy"
data = _extract_policy_data(query)
elif any(word in query_lower for word in ["organization", "hierarchy", "structure", "reporting", "org"]):
scenario_type = "organization"
data = _extract_org_data(query)
elif any(word in query_lower for word in ["performance", "metrics", "kpi", "efficiency", "evaluation"]):
scenario_type = "performance"
data = _extract_performance_data(query)
elif any(word in query_lower for word in ["workflow", "process", "flow", "procedure", "steps"]):
scenario_type = "workflow"
data = _extract_workflow_data(query)
else:
scenario_type = "general"
data = {}
return {
"type": scenario_type,
"title": f"Government {scenario_type.title()} Analysis",
"data": data
}
def _extract_budget_data(query: str) -> Dict[str, Any]:
"""Extract budget-related data from query"""
# This could be enhanced with NLP to extract actual numbers and departments
# For now, return sample data structure
return {}
def _extract_policy_data(query: str) -> Dict[str, Any]:
"""Extract policy-related data from query"""
return {}
def _extract_org_data(query: str) -> Dict[str, Any]:
"""Extract organizational data from query"""
return {}
def _extract_performance_data(query: str) -> Dict[str, Any]:
"""Extract performance data from query"""
return {}
def _extract_workflow_data(query: str) -> Dict[str, Any]:
"""Extract workflow data from query"""
return {}
if __name__ == "__main__":
import asyncio
async def test_search():
print("π Testing search_docs RAG tool with LanceDB vector store...\n")
test_user_id = "test_user_123"
test_knowledge_base = "test_kb"
while True:
user_input = input("Enter a query (or 'exit'): ").strip()
if user_input.lower() == "exit":
break
kb_input = input(f"Knowledge base (current: {test_knowledge_base}, press Enter to keep): ").strip()
if kb_input:
test_knowledge_base = kb_input
try:
result = await search_docs.ainvoke(
{"query": user_input},
config=RunnableConfig(
configurable={
"thread_id": test_user_id,
"knowledge_base": test_knowledge_base
}
)
)
print(f"\nπ Results from '{test_knowledge_base}' knowledge base:\n")
print(result)
print("\n" + "="*50 + "\n")
except Exception as e:
print(f"β Error: {e}")
asyncio.run(test_search())
|