Spaces:
Running
Running
Merge pull request #30 from The-Obstacle-Is-The-Way/chore/env-cleanup-model-upgrade
Browse files- .env.example +17 -9
- src/app.py +3 -1
- src/utils/config.py +6 -13
- tests/unit/tools/test_clinicaltrials.py +8 -13
.env.example
CHANGED
|
@@ -1,22 +1,30 @@
|
|
| 1 |
-
# LLM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
OPENAI_API_KEY=sk-your-key-here
|
| 3 |
ANTHROPIC_API_KEY=sk-ant-your-key-here
|
| 4 |
|
| 5 |
-
#
|
| 6 |
-
|
|
|
|
| 7 |
|
| 8 |
-
#
|
| 9 |
-
HF_TOKEN=hf_your-token-here
|
| 10 |
|
| 11 |
-
# Agent Config
|
| 12 |
MAX_ITERATIONS=10
|
|
|
|
| 13 |
LOG_LEVEL=INFO
|
| 14 |
|
| 15 |
-
# ==============
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
# Modal (
|
| 18 |
MODAL_TOKEN_ID=ak-your-modal-token-id-here
|
| 19 |
MODAL_TOKEN_SECRET=your-modal-token-secret-here
|
| 20 |
|
| 21 |
-
# Vector Database
|
| 22 |
CHROMA_DB_PATH=./chroma_db
|
|
|
|
| 1 |
+
# ============== LLM CONFIGURATION ==============
|
| 2 |
+
|
| 3 |
+
# Provider: "openai" or "anthropic"
|
| 4 |
+
LLM_PROVIDER=openai
|
| 5 |
+
|
| 6 |
+
# API Keys (at least one required for full LLM analysis)
|
| 7 |
OPENAI_API_KEY=sk-your-key-here
|
| 8 |
ANTHROPIC_API_KEY=sk-ant-your-key-here
|
| 9 |
|
| 10 |
+
# Model names (optional - sensible defaults)
|
| 11 |
+
OPENAI_MODEL=gpt-5.1
|
| 12 |
+
ANTHROPIC_MODEL=claude-sonnet-4-5-20250929
|
| 13 |
|
| 14 |
+
# ============== AGENT CONFIGURATION ==============
|
|
|
|
| 15 |
|
|
|
|
| 16 |
MAX_ITERATIONS=10
|
| 17 |
+
SEARCH_TIMEOUT=30
|
| 18 |
LOG_LEVEL=INFO
|
| 19 |
|
| 20 |
+
# ============== EXTERNAL SERVICES ==============
|
| 21 |
+
|
| 22 |
+
# PubMed (optional - higher rate limits)
|
| 23 |
+
NCBI_API_KEY=your-ncbi-key-here
|
| 24 |
|
| 25 |
+
# Modal Sandbox (optional - for secure code execution)
|
| 26 |
MODAL_TOKEN_ID=ak-your-modal-token-id-here
|
| 27 |
MODAL_TOKEN_SECRET=your-modal-token-secret-here
|
| 28 |
|
| 29 |
+
# Vector Database (optional - for LlamaIndex RAG)
|
| 30 |
CHROMA_DB_PATH=./chroma_db
|
src/app.py
CHANGED
|
@@ -68,9 +68,11 @@ def configure_orchestrator(
|
|
| 68 |
if api_provider == "anthropic":
|
| 69 |
anthropic_provider = AnthropicProvider(api_key=user_api_key)
|
| 70 |
model = AnthropicModel(settings.anthropic_model, provider=anthropic_provider)
|
| 71 |
-
|
| 72 |
openai_provider = OpenAIProvider(api_key=user_api_key)
|
| 73 |
model = OpenAIModel(settings.openai_model, provider=openai_provider)
|
|
|
|
|
|
|
| 74 |
judge_handler = JudgeHandler(model=model)
|
| 75 |
|
| 76 |
return create_orchestrator(
|
|
|
|
| 68 |
if api_provider == "anthropic":
|
| 69 |
anthropic_provider = AnthropicProvider(api_key=user_api_key)
|
| 70 |
model = AnthropicModel(settings.anthropic_model, provider=anthropic_provider)
|
| 71 |
+
elif api_provider == "openai":
|
| 72 |
openai_provider = OpenAIProvider(api_key=user_api_key)
|
| 73 |
model = OpenAIModel(settings.openai_model, provider=openai_provider)
|
| 74 |
+
else:
|
| 75 |
+
raise ValueError(f"Unsupported API provider: {api_provider}")
|
| 76 |
judge_handler = JudgeHandler(model=model)
|
| 77 |
|
| 78 |
return create_orchestrator(
|
src/utils/config.py
CHANGED
|
@@ -26,8 +26,10 @@ class Settings(BaseSettings):
|
|
| 26 |
llm_provider: Literal["openai", "anthropic"] = Field(
|
| 27 |
default="openai", description="Which LLM provider to use"
|
| 28 |
)
|
| 29 |
-
openai_model: str = Field(default="gpt-
|
| 30 |
-
anthropic_model: str = Field(
|
|
|
|
|
|
|
| 31 |
|
| 32 |
# Embedding Configuration
|
| 33 |
# Note: OpenAI embeddings require OPENAI_API_KEY (Anthropic has no embeddings API)
|
|
@@ -52,23 +54,14 @@ class Settings(BaseSettings):
|
|
| 52 |
# Logging
|
| 53 |
log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR"] = "INFO"
|
| 54 |
|
| 55 |
-
#
|
| 56 |
modal_token_id: str | None = Field(default=None, description="Modal token ID")
|
| 57 |
modal_token_secret: str | None = Field(default=None, description="Modal token secret")
|
| 58 |
chroma_db_path: str = Field(default="./chroma_db", description="ChromaDB storage path")
|
| 59 |
-
enable_modal_analysis: bool = Field(
|
| 60 |
-
default=False,
|
| 61 |
-
description="Opt-in flag to enable Modal analysis. Must also have modal_available=True.",
|
| 62 |
-
)
|
| 63 |
|
| 64 |
@property
|
| 65 |
def modal_available(self) -> bool:
|
| 66 |
-
"""Check if Modal credentials are configured
|
| 67 |
-
|
| 68 |
-
Note: This is a credentials check, NOT an opt-in flag.
|
| 69 |
-
Use `enable_modal_analysis` to opt-in, then check `modal_available` for credentials.
|
| 70 |
-
Typical usage: `if settings.enable_modal_analysis and settings.modal_available`
|
| 71 |
-
"""
|
| 72 |
return bool(self.modal_token_id and self.modal_token_secret)
|
| 73 |
|
| 74 |
def get_api_key(self) -> str:
|
|
|
|
| 26 |
llm_provider: Literal["openai", "anthropic"] = Field(
|
| 27 |
default="openai", description="Which LLM provider to use"
|
| 28 |
)
|
| 29 |
+
openai_model: str = Field(default="gpt-5.1", description="OpenAI model name")
|
| 30 |
+
anthropic_model: str = Field(
|
| 31 |
+
default="claude-sonnet-4-5-20250929", description="Anthropic model"
|
| 32 |
+
)
|
| 33 |
|
| 34 |
# Embedding Configuration
|
| 35 |
# Note: OpenAI embeddings require OPENAI_API_KEY (Anthropic has no embeddings API)
|
|
|
|
| 54 |
# Logging
|
| 55 |
log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR"] = "INFO"
|
| 56 |
|
| 57 |
+
# External Services
|
| 58 |
modal_token_id: str | None = Field(default=None, description="Modal token ID")
|
| 59 |
modal_token_secret: str | None = Field(default=None, description="Modal token secret")
|
| 60 |
chroma_db_path: str = Field(default="./chroma_db", description="ChromaDB storage path")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
@property
|
| 63 |
def modal_available(self) -> bool:
|
| 64 |
+
"""Check if Modal credentials are configured."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
return bool(self.modal_token_id and self.modal_token_secret)
|
| 66 |
|
| 67 |
def get_api_key(self) -> str:
|
tests/unit/tools/test_clinicaltrials.py
CHANGED
|
@@ -123,26 +123,21 @@ class TestClinicalTrialsTool:
|
|
| 123 |
await tool.search("metformin alzheimer")
|
| 124 |
|
| 125 |
|
| 126 |
-
def _can_reach_clinicaltrials() -> bool:
|
| 127 |
-
"""Check if ClinicalTrials.gov API is reachable."""
|
| 128 |
-
try:
|
| 129 |
-
resp = requests.get("https://clinicaltrials.gov/api/v2/studies", timeout=5)
|
| 130 |
-
return resp.status_code < 500
|
| 131 |
-
except (requests.RequestException, OSError):
|
| 132 |
-
return False
|
| 133 |
-
|
| 134 |
-
|
| 135 |
class TestClinicalTrialsIntegration:
|
| 136 |
"""Integration tests (marked for separate run)."""
|
| 137 |
|
| 138 |
@pytest.mark.integration
|
| 139 |
@pytest.mark.asyncio
|
| 140 |
-
@pytest.mark.skipif(
|
| 141 |
-
not _can_reach_clinicaltrials(),
|
| 142 |
-
reason="ClinicalTrials.gov API not reachable (network/SSL issue)",
|
| 143 |
-
)
|
| 144 |
async def test_real_api_call(self) -> None:
|
| 145 |
"""Test actual API call (requires network)."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
tool = ClinicalTrialsTool()
|
| 147 |
results = await tool.search("metformin diabetes", max_results=3)
|
| 148 |
|
|
|
|
| 123 |
await tool.search("metformin alzheimer")
|
| 124 |
|
| 125 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
class TestClinicalTrialsIntegration:
|
| 127 |
"""Integration tests (marked for separate run)."""
|
| 128 |
|
| 129 |
@pytest.mark.integration
|
| 130 |
@pytest.mark.asyncio
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
async def test_real_api_call(self) -> None:
|
| 132 |
"""Test actual API call (requires network)."""
|
| 133 |
+
# Skip at runtime if API unreachable (avoids network call at collection time)
|
| 134 |
+
try:
|
| 135 |
+
resp = requests.get("https://clinicaltrials.gov/api/v2/studies", timeout=5)
|
| 136 |
+
if resp.status_code >= 500:
|
| 137 |
+
pytest.skip("ClinicalTrials.gov API not reachable (server error)")
|
| 138 |
+
except (requests.RequestException, OSError):
|
| 139 |
+
pytest.skip("ClinicalTrials.gov API not reachable (network/SSL issue)")
|
| 140 |
+
|
| 141 |
tool = ClinicalTrialsTool()
|
| 142 |
results = await tool.search("metformin diabetes", max_results=3)
|
| 143 |
|