Deminiko
commited on
Commit
·
85b20ff
1
Parent(s):
be72749
Fix: Update Gemini model names, pin dependencies, use proven MCP endpoint
Browse files- Fixed invalid model names: gemini-2.5-flash-lite-preview -> gemini-2.0-flash
- Fixed typo: gemini-2.5-flash-lite-lite -> gemini-2.0-flash
- Updated requirements.txt with version pinning
- Changed default MCP endpoint to proven: nlarchive-mcp-server-finder-monitor
- Compatible with latest google-genai SDK (v1.52.0)
- agent.py +1 -1
- app.py +1 -1
- inference.py +3 -3
- requirements.txt +7 -7
agent.py
CHANGED
|
@@ -413,7 +413,7 @@ class AsyncPersistentMCPClient:
|
|
| 413 |
except Exception as e:
|
| 414 |
print(f"AsyncPersistentMCPClient: General error during disconnect for {self.url}: {e}")
|
| 415 |
|
| 416 |
-
def get_mcp_client(url: str = "https://
|
| 417 |
"""Get or create an MCP client with enhanced global connection pooling."""
|
| 418 |
# Phase 2 Optimization: Use global connection pool
|
| 419 |
with _global_connection_lock:
|
|
|
|
| 413 |
except Exception as e:
|
| 414 |
print(f"AsyncPersistentMCPClient: General error during disconnect for {self.url}: {e}")
|
| 415 |
|
| 416 |
+
def get_mcp_client(url: str = "https://nlarchive-mcp-server-finder-monitor.hf.space/gradio_api/mcp/sse") -> AsyncPersistentMCPClient:
|
| 417 |
"""Get or create an MCP client with enhanced global connection pooling."""
|
| 418 |
# Phase 2 Optimization: Use global connection pool
|
| 419 |
with _global_connection_lock:
|
app.py
CHANGED
|
@@ -53,7 +53,7 @@ def validate_and_set_api_key(api_key, is_user_provided=True):
|
|
| 53 |
initialize()
|
| 54 |
|
| 55 |
# Make a simple test call to verify the API key works
|
| 56 |
-
test_response = generate_content("Hello", model_name="gemini-2.
|
| 57 |
|
| 58 |
# If we get here, the API key works
|
| 59 |
with _api_key_lock:
|
|
|
|
| 53 |
initialize()
|
| 54 |
|
| 55 |
# Make a simple test call to verify the API key works
|
| 56 |
+
test_response = generate_content("Hello", model_name="gemini-2.0-flash")
|
| 57 |
|
| 58 |
# If we get here, the API key works
|
| 59 |
with _api_key_lock:
|
inference.py
CHANGED
|
@@ -31,7 +31,7 @@ def generate_content(prompt: str, model_name: str = None, allow_fallbacks: bool
|
|
| 31 |
|
| 32 |
Args:
|
| 33 |
prompt: The prompt to send to the model.
|
| 34 |
-
model_name: The name of the model to use (e.g., "gemini-2.
|
| 35 |
If None, a default model will be used.
|
| 36 |
allow_fallbacks: (Currently not directly used by genai.Client.models.generate_content,
|
| 37 |
but kept for compatibility with agent.py structure)
|
|
@@ -49,7 +49,7 @@ def generate_content(prompt: str, model_name: str = None, allow_fallbacks: bool
|
|
| 49 |
raise RuntimeError("Google Generative AI client is not initialized. Call initialize() first.")
|
| 50 |
|
| 51 |
# Default model if not specified
|
| 52 |
-
effective_model_name = model_name if model_name else "gemini-2.
|
| 53 |
|
| 54 |
# Prepare generation configuration for the API
|
| 55 |
config_obj = None
|
|
@@ -93,7 +93,7 @@ if __name__ == '__main__':
|
|
| 93 |
|
| 94 |
sample_prompt_2 = "What is the capital of France?"
|
| 95 |
print(f"\nSending prompt: '{sample_prompt_2}'")
|
| 96 |
-
generated_text_2 = generate_content(sample_prompt_2, model_name="gemini-2.
|
| 97 |
print("\nGenerated text:")
|
| 98 |
print(generated_text_2)
|
| 99 |
except Exception as e:
|
|
|
|
| 31 |
|
| 32 |
Args:
|
| 33 |
prompt: The prompt to send to the model.
|
| 34 |
+
model_name: The name of the model to use (e.g., "gemini-2.0-flash", "gemini-2.5-flash").
|
| 35 |
If None, a default model will be used.
|
| 36 |
allow_fallbacks: (Currently not directly used by genai.Client.models.generate_content,
|
| 37 |
but kept for compatibility with agent.py structure)
|
|
|
|
| 49 |
raise RuntimeError("Google Generative AI client is not initialized. Call initialize() first.")
|
| 50 |
|
| 51 |
# Default model if not specified
|
| 52 |
+
effective_model_name = model_name if model_name else "gemini-2.0-flash"
|
| 53 |
|
| 54 |
# Prepare generation configuration for the API
|
| 55 |
config_obj = None
|
|
|
|
| 93 |
|
| 94 |
sample_prompt_2 = "What is the capital of France?"
|
| 95 |
print(f"\nSending prompt: '{sample_prompt_2}'")
|
| 96 |
+
generated_text_2 = generate_content(sample_prompt_2, model_name="gemini-2.0-flash") # Example with a different model
|
| 97 |
print("\nGenerated text:")
|
| 98 |
print(generated_text_2)
|
| 99 |
except Exception as e:
|
requirements.txt
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
-
gradio[mcp]
|
| 2 |
-
smolagents[mcp]
|
| 3 |
-
google-genai
|
| 4 |
-
python-dotenv
|
| 5 |
-
networkx
|
| 6 |
-
matplotlib
|
| 7 |
-
numpy
|
|
|
|
| 1 |
+
gradio[mcp]>=5.0.0
|
| 2 |
+
smolagents[mcp]>=1.0.0
|
| 3 |
+
google-genai>=1.0.0
|
| 4 |
+
python-dotenv>=1.0.0
|
| 5 |
+
networkx>=3.0
|
| 6 |
+
matplotlib>=3.7.0
|
| 7 |
+
numpy>=1.24.0
|