Shim
commited on
Commit
ยท
79cc1d2
1
Parent(s):
37545aa
Improve model generation and remove static responses - use better Hebrew model and full persona prompts
Browse files
app.py
CHANGED
|
@@ -42,24 +42,35 @@ class MirautrApp:
|
|
| 42 |
is_hf_spaces = os.getenv("SPACE_ID") is not None
|
| 43 |
|
| 44 |
if is_hf_spaces:
|
| 45 |
-
logger.info("Running in Hugging Face Spaces - using
|
| 46 |
-
# Use a
|
| 47 |
-
model_name = "
|
| 48 |
-
|
| 49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
else:
|
| 51 |
# For local development, try Hebrew-specific model first
|
| 52 |
try:
|
| 53 |
model_name = "yam-peleg/Hebrew-Mistral-7B"
|
| 54 |
logger.info(f"Loading Hebrew model: {model_name}")
|
| 55 |
except:
|
| 56 |
-
# Fallback to
|
| 57 |
-
model_name = "
|
| 58 |
-
logger.info(f"Falling back to
|
| 59 |
|
| 60 |
# Load tokenizer
|
| 61 |
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
# Determine the best settings for the environment
|
| 64 |
if torch.cuda.is_available() and not is_hf_spaces:
|
| 65 |
torch_dtype = torch.float16
|
|
@@ -70,14 +81,7 @@ class MirautrApp:
|
|
| 70 |
device_map = None
|
| 71 |
|
| 72 |
# Load model with appropriate settings
|
| 73 |
-
if "
|
| 74 |
-
# Use Seq2Seq model for T5
|
| 75 |
-
self.model = AutoModelForSeq2SeqLM.from_pretrained(
|
| 76 |
-
model_name,
|
| 77 |
-
torch_dtype=torch_dtype,
|
| 78 |
-
low_cpu_mem_usage=True
|
| 79 |
-
)
|
| 80 |
-
elif "mistral" in model_name.lower():
|
| 81 |
# Use CausalLM for Mistral with additional settings
|
| 82 |
self.model = AutoModelForCausalLM.from_pretrained(
|
| 83 |
model_name,
|
|
@@ -91,35 +95,31 @@ class MirautrApp:
|
|
| 91 |
self.model = AutoModelForCausalLM.from_pretrained(
|
| 92 |
model_name,
|
| 93 |
torch_dtype=torch_dtype,
|
| 94 |
-
low_cpu_mem_usage=True
|
|
|
|
| 95 |
)
|
| 96 |
|
| 97 |
# Create text generation pipeline with appropriate settings
|
| 98 |
generation_kwargs = {
|
| 99 |
-
"max_new_tokens":
|
| 100 |
-
"temperature": 0.
|
| 101 |
"do_sample": True,
|
| 102 |
-
"
|
|
|
|
|
|
|
|
|
|
| 103 |
"return_full_text": False
|
| 104 |
}
|
| 105 |
|
| 106 |
-
#
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
)
|
| 114 |
-
else:
|
| 115 |
-
self.generator = pipeline(
|
| 116 |
-
"text-generation",
|
| 117 |
-
model=self.model,
|
| 118 |
-
tokenizer=self.tokenizer,
|
| 119 |
-
**generation_kwargs
|
| 120 |
-
)
|
| 121 |
|
| 122 |
-
logger.info("Model loaded successfully")
|
| 123 |
|
| 124 |
except Exception as e:
|
| 125 |
logger.error(f"Error loading model: {e}")
|
|
@@ -159,90 +159,115 @@ class MirautrApp:
|
|
| 159 |
# Prepare conversation context
|
| 160 |
context = self.conversation_manager.get_conversation_context(conversation_state)
|
| 161 |
|
| 162 |
-
#
|
| 163 |
response = None
|
| 164 |
if self.generator:
|
| 165 |
try:
|
| 166 |
-
#
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
response = outputs[0]["generated_text"].strip()
|
|
|
|
| 176 |
|
| 177 |
-
# Clean up
|
| 178 |
-
if
|
| 179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
else:
|
| 182 |
-
|
| 183 |
-
full_prompt = f"{system_prompt}\n\nืืงืฉืจ: {context}\n\nืืืฉืชืืฉ ืืืจ: {user_message}\n\nืชืืืื:"
|
| 184 |
-
outputs = self.generator(full_prompt)
|
| 185 |
-
response = outputs[0]["generated_text"]
|
| 186 |
-
# Extract only the new generated part
|
| 187 |
-
response = response[len(full_prompt):].strip()
|
| 188 |
-
|
| 189 |
-
# Basic validation and cleanup
|
| 190 |
-
if not response or len(response.strip()) < 5:
|
| 191 |
response = None
|
| 192 |
|
| 193 |
except Exception as gen_error:
|
| 194 |
-
logger.
|
| 195 |
response = None
|
| 196 |
|
| 197 |
-
# If
|
| 198 |
if not response:
|
| 199 |
-
|
| 200 |
part_info = DEFAULT_PARTS.get(conversation_state.selected_part, {})
|
| 201 |
persona_name = conversation_state.persona_name or part_info.get("default_persona_name", "ืืืง ืคื ืืื")
|
|
|
|
| 202 |
|
| 203 |
-
# Generate
|
| 204 |
-
|
| 205 |
-
"
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
"
|
| 216 |
-
f"ืื ื {persona_name}, ืืืจืฆื ืฉืื. ืื ืฉืืืจืช - '{user_message}' - ืื ื ืจืืฆื ืฉืืืื ืืืื ื ืืืจืืืฉื ืืกืืจ ืขื ืื. ืืื ืื ื ืืืื ืืขืืืจ ืื ืืืืืช ืืจืืฆื ืืืืฆื?",
|
| 217 |
-
f"ื{persona_name}, ืื ื ืชืืื ืื ืกื ืืืืื ืฉืืืื ืืจืืฆืื. ืื ืฉืืืจืช ืขื '{user_message}' - ืืื ืื ืืฉืคืืข ืขื ืืืืจืื? ืื ื ืืืื ืฉืืืื ืืืื ืืกืืจ.",
|
| 218 |
-
f"ืื ื {persona_name} ืืื ื ืืื ืืื ืืขืืืจ ืื ืืืฆืื ืืจื ืฉืืืื ืืืื ืืจืืฆืื. '{user_message}' - ืืืื ื ื ืืฆื ืคืชืจืื ืฉืืชืืื ืืืืื."
|
| 219 |
-
],
|
| 220 |
-
"ืืืื": [
|
| 221 |
-
f"ืื ื {persona_name}, ืืืื ืฉืื. ืื ืฉืืืจืช - '{user_message}' - ืื ื ืืื ืืื ืืฉืืืจ ืขืืื. ืืื ืื ืืืื? ืืื ืื ื ืฆืจืื ืืืืื ืืืฉืื?",
|
| 222 |
-
f"ื{persona_name}, ืชืคืงืืื ืืืื ืขืืื. ืื ืฉืืืจืช ืขื '{user_message}' ืืขืืจืจ ืื ืืช ืืืื ืกืืื ืงื ืืืืื ื. ืืื ืื ื ืืืื ืืืืื ืฉืืชื ืืืื?",
|
| 223 |
-
f"ืื ื {persona_name}, ืืฉืืืจ ืื ืืื ืฉืื. '{user_message}' - ืื ื ืืืื ืื ืื ืืืื ืขืืืจื. ืืคืขืืื ืื ื ืฆืจืื ืืืืืช ืงืฉืื ืืื ืืฉืืืจ ืขืืื."
|
| 224 |
-
],
|
| 225 |
-
"ืื ืื ืข/ืช": [
|
| 226 |
-
f"ืื ื {persona_name}, ืืืืง ืฉืืขืืืฃ ืืืืื ืข. ืื ืฉืืืจืช ืขื '{user_message}' - ืื ืืขืืจืจ ืื ืืจืื. ืืืื ืืืื ืคืฉืื... ืื ืืืชืขืกืง ืขื ืื ืขืืฉืื?",
|
| 227 |
-
f"ื{persona_name}, ืื ื ืืจืืืฉ/ื ืงืฆืช ืื ืื ืื ืขื '{user_message}'. ืืืื ื ืืื ืืช ืื ืงืฆืช? ืืคืขืืื ืื ืืกืืจ ืื ืืืชืืืื ืขื ืืื ืืื.",
|
| 228 |
-
f"ืื ื {persona_name} ืืื ื ืืขืืืฃ/ื ืืืืฉืืจ ืืฆื. ืื ืฉืืืจืช - '{user_message}' - ืื ื ืจืื ืืกืืื. ืืชื ืืืื ืฉืืชื ืจืืฆื ืืืืื ืก ืืื ืขืืฉืื?"
|
| 229 |
-
]
|
| 230 |
-
}
|
| 231 |
-
|
| 232 |
-
# Get relevant responses for the part
|
| 233 |
-
part_responses = responses_by_part.get(conversation_state.selected_part, [
|
| 234 |
-
f"ืื ื {persona_name}, {conversation_state.selected_part} ืฉืื. ืฉืืขืชื ืื ืฉืืืจืช ืขื '{user_message}'. ืืื ืืฆื ืืืืื - ืืืฆื ืืื ืืืื ืืื ืืืื ืฉืคื ืขืืจืืช."
|
| 235 |
-
])
|
| 236 |
-
|
| 237 |
-
# Select response based on conversation length for variety
|
| 238 |
-
response_index = len(conversation_state.conversation_history) % len(part_responses)
|
| 239 |
-
response = part_responses[response_index]
|
| 240 |
|
| 241 |
return response
|
| 242 |
|
| 243 |
except Exception as e:
|
| 244 |
logger.error(f"Error generating response: {e}")
|
| 245 |
-
return "ืกืืืื, ื ืชืงืืชื ืืืขืื ืืื ืืช.
|
| 246 |
|
| 247 |
def create_main_interface(self):
|
| 248 |
"""Create the main Gradio interface"""
|
|
@@ -555,10 +580,28 @@ def main():
|
|
| 555 |
else:
|
| 556 |
# Local development settings
|
| 557 |
logger.info("Configuring for local development")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 558 |
launch_config.update({
|
| 559 |
"server_name": "127.0.0.1",
|
| 560 |
-
"server_port":
|
| 561 |
-
"share":
|
| 562 |
"quiet": False
|
| 563 |
})
|
| 564 |
|
|
|
|
| 42 |
is_hf_spaces = os.getenv("SPACE_ID") is not None
|
| 43 |
|
| 44 |
if is_hf_spaces:
|
| 45 |
+
logger.info("Running in Hugging Face Spaces - using multilingual model with Hebrew support")
|
| 46 |
+
# Use a better multilingual model that supports Hebrew well
|
| 47 |
+
model_name = "microsoft/DialoGPT-medium" # Better conversational model
|
| 48 |
+
try:
|
| 49 |
+
# Try Hebrew-capable multilingual model first
|
| 50 |
+
model_name = "bigscience/bloomz-560m" # Better Hebrew support
|
| 51 |
+
logger.info(f"Loading multilingual model with Hebrew support: {model_name}")
|
| 52 |
+
except:
|
| 53 |
+
# Fallback to DialoGPT if bloomz fails
|
| 54 |
+
model_name = "microsoft/DialoGPT-medium"
|
| 55 |
+
logger.info(f"Fallback to conversational model: {model_name}")
|
| 56 |
+
|
| 57 |
else:
|
| 58 |
# For local development, try Hebrew-specific model first
|
| 59 |
try:
|
| 60 |
model_name = "yam-peleg/Hebrew-Mistral-7B"
|
| 61 |
logger.info(f"Loading Hebrew model: {model_name}")
|
| 62 |
except:
|
| 63 |
+
# Fallback to better multilingual model
|
| 64 |
+
model_name = "bigscience/bloomz-560m"
|
| 65 |
+
logger.info(f"Falling back to multilingual model: {model_name}")
|
| 66 |
|
| 67 |
# Load tokenizer
|
| 68 |
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 69 |
|
| 70 |
+
# Add padding token if missing
|
| 71 |
+
if self.tokenizer.pad_token is None:
|
| 72 |
+
self.tokenizer.pad_token = self.tokenizer.eos_token
|
| 73 |
+
|
| 74 |
# Determine the best settings for the environment
|
| 75 |
if torch.cuda.is_available() and not is_hf_spaces:
|
| 76 |
torch_dtype = torch.float16
|
|
|
|
| 81 |
device_map = None
|
| 82 |
|
| 83 |
# Load model with appropriate settings
|
| 84 |
+
if "mistral" in model_name.lower():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
# Use CausalLM for Mistral with additional settings
|
| 86 |
self.model = AutoModelForCausalLM.from_pretrained(
|
| 87 |
model_name,
|
|
|
|
| 95 |
self.model = AutoModelForCausalLM.from_pretrained(
|
| 96 |
model_name,
|
| 97 |
torch_dtype=torch_dtype,
|
| 98 |
+
low_cpu_mem_usage=True,
|
| 99 |
+
trust_remote_code=True
|
| 100 |
)
|
| 101 |
|
| 102 |
# Create text generation pipeline with appropriate settings
|
| 103 |
generation_kwargs = {
|
| 104 |
+
"max_new_tokens": 120,
|
| 105 |
+
"temperature": 0.7,
|
| 106 |
"do_sample": True,
|
| 107 |
+
"top_p": 0.9,
|
| 108 |
+
"top_k": 50,
|
| 109 |
+
"pad_token_id": self.tokenizer.pad_token_id,
|
| 110 |
+
"eos_token_id": self.tokenizer.eos_token_id,
|
| 111 |
"return_full_text": False
|
| 112 |
}
|
| 113 |
|
| 114 |
+
# Always use causal LM pipeline for consistent behavior
|
| 115 |
+
self.generator = pipeline(
|
| 116 |
+
"text-generation",
|
| 117 |
+
model=self.model,
|
| 118 |
+
tokenizer=self.tokenizer,
|
| 119 |
+
**generation_kwargs
|
| 120 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
+
logger.info(f"Model loaded successfully: {model_name}")
|
| 123 |
|
| 124 |
except Exception as e:
|
| 125 |
logger.error(f"Error loading model: {e}")
|
|
|
|
| 159 |
# Prepare conversation context
|
| 160 |
context = self.conversation_manager.get_conversation_context(conversation_state)
|
| 161 |
|
| 162 |
+
# Generate response with model
|
| 163 |
response = None
|
| 164 |
if self.generator:
|
| 165 |
try:
|
| 166 |
+
# Get part information for better context
|
| 167 |
+
part_info = DEFAULT_PARTS.get(conversation_state.selected_part, {})
|
| 168 |
+
part_description = part_info.get("description", conversation_state.selected_part)
|
| 169 |
+
persona_name = conversation_state.persona_name or part_info.get("default_persona_name", "ืืืง ืคื ืืื")
|
| 170 |
+
|
| 171 |
+
# Create a well-structured prompt using the full system prompt
|
| 172 |
+
full_system_prompt = system_prompt.strip()
|
| 173 |
+
|
| 174 |
+
prompt_template = f"""{full_system_prompt}
|
| 175 |
+
|
| 176 |
+
ืืงืฉืจ ื ืืกืฃ: {conversation_state.user_context if conversation_state.user_context else 'ืืื ืืงืฉืจ ืืืืื'}
|
| 177 |
+
|
| 178 |
+
ืฉืืื ืขื ืื:
|
| 179 |
+
{context}
|
| 180 |
+
|
| 181 |
+
ืืืฉืชืืฉ ืืืจ: "{user_message}"
|
| 182 |
+
|
| 183 |
+
{persona_name} ืืืื:"""
|
| 184 |
+
|
| 185 |
+
logger.info(f"Generating response for part: {conversation_state.selected_part}")
|
| 186 |
+
|
| 187 |
+
# Generate with the model
|
| 188 |
+
outputs = self.generator(
|
| 189 |
+
prompt_template,
|
| 190 |
+
max_new_tokens=80,
|
| 191 |
+
temperature=0.7,
|
| 192 |
+
do_sample=True,
|
| 193 |
+
top_p=0.9,
|
| 194 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
| 195 |
+
eos_token_id=self.tokenizer.eos_token_id
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
if outputs and len(outputs) > 0:
|
| 199 |
response = outputs[0]["generated_text"].strip()
|
| 200 |
+
logger.info(f"Raw model output length: {len(response)}")
|
| 201 |
|
| 202 |
+
# Clean up response - remove prompt and extract only the new part
|
| 203 |
+
if response:
|
| 204 |
+
# Try to extract only the response part
|
| 205 |
+
response_lines = response.split('\n')
|
| 206 |
+
for i, line in enumerate(response_lines):
|
| 207 |
+
if f"{persona_name} ืืืื:" in line and i + 1 < len(response_lines):
|
| 208 |
+
response = '\n'.join(response_lines[i+1:]).strip()
|
| 209 |
+
break
|
| 210 |
+
|
| 211 |
+
# If that didn't work, try other cleanup methods
|
| 212 |
+
if not response or len(response) < 10:
|
| 213 |
+
# Look for the response after the last colon
|
| 214 |
+
if ':' in outputs[0]["generated_text"]:
|
| 215 |
+
response = outputs[0]["generated_text"].split(':')[-1].strip()
|
| 216 |
|
| 217 |
+
# Validate and clean the response
|
| 218 |
+
if response:
|
| 219 |
+
# Remove any remaining prompt artifacts
|
| 220 |
+
response = response.replace(prompt_template, "").strip()
|
| 221 |
+
response = response.replace(f"{persona_name} ืืืื:", "").strip()
|
| 222 |
+
response = response.replace("ืืืฉืชืืฉ ืืืจ:", "").strip()
|
| 223 |
+
|
| 224 |
+
# Remove incomplete sentences or artifacts
|
| 225 |
+
if response.startswith('"') and not response.endswith('"'):
|
| 226 |
+
response = response[1:]
|
| 227 |
+
|
| 228 |
+
# Ensure minimum quality
|
| 229 |
+
if len(response.strip()) >= 10 and not response.lower().startswith('the user'):
|
| 230 |
+
logger.info(f"Generated response: {response[:50]}...")
|
| 231 |
+
else:
|
| 232 |
+
logger.warning(f"Response too short or invalid: '{response}'")
|
| 233 |
+
response = None
|
| 234 |
+
else:
|
| 235 |
+
logger.warning("Empty response after cleanup")
|
| 236 |
+
response = None
|
| 237 |
else:
|
| 238 |
+
logger.warning("No outputs from model")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 239 |
response = None
|
| 240 |
|
| 241 |
except Exception as gen_error:
|
| 242 |
+
logger.error(f"Model generation failed: {gen_error}")
|
| 243 |
response = None
|
| 244 |
|
| 245 |
+
# If we still don't have a response, generate a contextual one using the persona
|
| 246 |
if not response:
|
| 247 |
+
logger.info("Using contextual persona-based response generation")
|
| 248 |
part_info = DEFAULT_PARTS.get(conversation_state.selected_part, {})
|
| 249 |
persona_name = conversation_state.persona_name or part_info.get("default_persona_name", "ืืืง ืคื ืืื")
|
| 250 |
+
part_description = part_info.get("description", "")
|
| 251 |
|
| 252 |
+
# Generate a more dynamic response based on the actual persona and context
|
| 253 |
+
if conversation_state.selected_part == "ืืงืื ืืืืงืืจืชื":
|
| 254 |
+
response = f"ืื ื {persona_name}. ืฉืืขืชื ืื ืฉืืืจืช - '{user_message}'. ืื ื ืืจืืืฉ ืฉืฆืจืื ืืืืื ืืช ืื ืืืชืจ ืืขืืืง. ืื ืืืืช ืื ืืข ืืืชื ืืื? ืืื ืืฉืืช ืขื ืื ืืืฉืืืืช?"
|
| 255 |
+
elif conversation_state.selected_part == "ืืืื/ื ืืคื ืืืืช":
|
| 256 |
+
response = f"ืื ื {persona_name}, ืืืืง ืืฆืขืืจ ืฉืื. ืื ืฉืืืจืช ืขื '{user_message}' ื ืืืข ืื. ืื ืืืจื ืื ืืืจืืืฉ... ืงืฆืช ืืคืืื ืืื ืื ืกืงืจื. ืืชื ืืืืช ืฉืืืข ืืืชื ืขืืฉืื?"
|
| 257 |
+
elif conversation_state.selected_part == "ืืืจืฆื":
|
| 258 |
+
response = f"ืื ื {persona_name}. ืื ืฉืืืจืช - '{user_message}' - ืื ื ืจืืฆื ืืืืื ืฉืืืื ืืืื ืืกืืจ ืขื ืื. ืืื ืืชื ืืืฉื ืฉืื ืืฉืคืืข ืขื ืืืืจืื? ืืืื ื ื ืืฆื ืคืชืจืื ืฉืืชืืื ืืืืื."
|
| 259 |
+
elif conversation_state.selected_part == "ืืืื":
|
| 260 |
+
response = f"ืื ื {persona_name}, ืืฉืืืจ ืฉืื. '{user_message}' - ืื ื ืืขืจืื ืืช ืืืฆื. ืืื ืื ืืืื? ืืื ืื ื ืฆืจืื ืืืืื ืืืฉืื? ืชืคืงืืื ืืฉืืืจ ืขืืื."
|
| 261 |
+
elif conversation_state.selected_part == "ืื ืื ืข/ืช":
|
| 262 |
+
response = f"ืื ื {persona_name}. ืื ืฉืืืจืช ืขื '{user_message}' ืืขืืจืจ ืื ืงืฆืช ืืจืื. ืืืื... ืื ืืืืืื ืืืชืืืื ืขื ืื ืขืืฉืื? ืืคืขืืื ืื ืืกืืจ ืืงืืช ืืคืกืงื."
|
| 263 |
+
else:
|
| 264 |
+
response = f"ืื ื {persona_name}, {conversation_state.selected_part} ืฉืื. ืฉืืขืชื ืื ืฉืืืจืช ืขื '{user_message}'. ืืืื ื ื ืฉืืื ืขื ืื ืืื."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 265 |
|
| 266 |
return response
|
| 267 |
|
| 268 |
except Exception as e:
|
| 269 |
logger.error(f"Error generating response: {e}")
|
| 270 |
+
return "ืกืืืื, ื ืชืงืืชื ืืืขืื ืืื ืืช. ืืืื ื ื ื ืกื ืฉืื."
|
| 271 |
|
| 272 |
def create_main_interface(self):
|
| 273 |
"""Create the main Gradio interface"""
|
|
|
|
| 580 |
else:
|
| 581 |
# Local development settings
|
| 582 |
logger.info("Configuring for local development")
|
| 583 |
+
|
| 584 |
+
# Try to find an available port
|
| 585 |
+
default_port = int(os.getenv("GRADIO_SERVER_PORT", "7861"))
|
| 586 |
+
available_port = default_port
|
| 587 |
+
|
| 588 |
+
# Check if port is available, if not find next available
|
| 589 |
+
import socket
|
| 590 |
+
for port_try in range(default_port, default_port + 10):
|
| 591 |
+
try:
|
| 592 |
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
| 593 |
+
s.bind(('127.0.0.1', port_try))
|
| 594 |
+
available_port = port_try
|
| 595 |
+
break
|
| 596 |
+
except OSError:
|
| 597 |
+
continue
|
| 598 |
+
|
| 599 |
+
logger.info(f"Using port {available_port} for local development")
|
| 600 |
+
|
| 601 |
launch_config.update({
|
| 602 |
"server_name": "127.0.0.1",
|
| 603 |
+
"server_port": available_port,
|
| 604 |
+
"share": False, # Disable share for local development - can be enabled manually
|
| 605 |
"quiet": False
|
| 606 |
})
|
| 607 |
|