ChAbhishek28 commited on
Commit
bef54cd
Β·
1 Parent(s): 5060335

Send TTS audio message for frontend playback

Browse files
Files changed (1) hide show
  1. enhanced_websocket_handler.py +8 -11
enhanced_websocket_handler.py CHANGED
@@ -1,5 +1,5 @@
1
  """
2
- Enhanced WebSocket handler with hybrid LLM and optional voice features
3
  """
4
 
5
  from fastapi import WebSocket, WebSocketDisconnect
@@ -399,22 +399,21 @@ async def handle_voice_message(websocket: WebSocket, data: dict, session_data: d
399
 
400
  async def get_hybrid_response(user_message: str, context: str, config: dict, knowledge_base: str):
401
  """Get response using hybrid LLM with document search"""
402
-
403
  # Search for relevant documents directly using LanceDB
404
  try:
405
  logger.info(f"πŸ” Searching documents for: {user_message}")
406
-
407
  # Use the search_documents function instead of LangChain tool
408
  from rag_service import search_documents
409
-
410
  # Search for relevant documents
411
  docs = search_documents(user_message, limit=3)
412
-
413
  if docs:
414
- if audio_response:
415
- await websocket.send_bytes(audio_response)
416
- else:
417
- logger.warning("⚠️ Could not generate audio response")
 
 
 
418
  logger.info(f"πŸ€– Getting LLM response...")
419
  response_text = await hybrid_llm_service.get_response(
420
  user_message,
@@ -422,11 +421,9 @@ async def get_hybrid_response(user_message: str, context: str, config: dict, kno
422
  system_prompt="""You are a helpful government document assistant. Provide accurate, helpful responses based on the context provided. When appropriate, suggest additional resources or redirect users to relevant departments for more assistance."""
423
  )
424
  logger.info(f"βœ… LLM response received, length: {len(response_text)}")
425
-
426
  # Determine which provider was used
427
  provider = hybrid_llm_service.choose_llm_provider(user_message)
428
  provider_used = provider.value if provider else "unknown"
429
-
430
  return response_text, provider_used
431
 
432
  async def send_text_response(websocket: WebSocket, response_text: str, provider_used: str, session_data: dict):
 
1
  """
2
+ Enhanced WebSocket handler with hybrid LLM and voice features
3
  """
4
 
5
  from fastapi import WebSocket, WebSocketDisconnect
 
399
 
400
  async def get_hybrid_response(user_message: str, context: str, config: dict, knowledge_base: str):
401
  """Get response using hybrid LLM with document search"""
 
402
  # Search for relevant documents directly using LanceDB
403
  try:
404
  logger.info(f"πŸ” Searching documents for: {user_message}")
 
405
  # Use the search_documents function instead of LangChain tool
406
  from rag_service import search_documents
 
407
  # Search for relevant documents
408
  docs = search_documents(user_message, limit=3)
 
409
  if docs:
410
+ context = "\n\n".join([doc["content"] for doc in docs])
411
+ sources = [doc["source"] for doc in docs]
412
+ logger.info(f"πŸ“š Found {len(docs)} documents from sources: {sources}")
413
+ else:
414
+ logger.info("πŸ“š No documents found, using existing context")
415
+ except Exception as e:
416
+ logger.warning(f"❌ Document search failed: {e}, using existing context")
417
  logger.info(f"πŸ€– Getting LLM response...")
418
  response_text = await hybrid_llm_service.get_response(
419
  user_message,
 
421
  system_prompt="""You are a helpful government document assistant. Provide accurate, helpful responses based on the context provided. When appropriate, suggest additional resources or redirect users to relevant departments for more assistance."""
422
  )
423
  logger.info(f"βœ… LLM response received, length: {len(response_text)}")
 
424
  # Determine which provider was used
425
  provider = hybrid_llm_service.choose_llm_provider(user_message)
426
  provider_used = provider.value if provider else "unknown"
 
427
  return response_text, provider_used
428
 
429
  async def send_text_response(websocket: WebSocket, response_text: str, provider_used: str, session_data: dict):