Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,42 +3,45 @@ from dotenv import load_dotenv
|
|
| 3 |
import gradio as gr
|
| 4 |
from huggingface_hub import InferenceClient
|
| 5 |
|
| 6 |
-
# Carrega
|
| 7 |
load_dotenv()
|
| 8 |
api_key = os.getenv("HF_API_TOKEN")
|
| 9 |
|
| 10 |
-
#
|
| 11 |
-
|
|
|
|
| 12 |
|
| 13 |
-
#
|
|
|
|
|
|
|
|
|
|
| 14 |
def chat_with_llm(message, history):
|
| 15 |
try:
|
| 16 |
-
# Constrói o histórico de mensagens
|
| 17 |
messages = []
|
| 18 |
for user_msg, bot_msg in history:
|
| 19 |
messages.append({"role": "user", "content": user_msg})
|
| 20 |
if bot_msg:
|
| 21 |
messages.append({"role": "assistant", "content": bot_msg})
|
| 22 |
messages.append({"role": "user", "content": message})
|
| 23 |
-
|
| 24 |
# Chama a API do Mistral
|
| 25 |
response = client.chat.completions.create(
|
| 26 |
model="mistralai/Mistral-7B-Instruct-v0.3",
|
| 27 |
messages=messages,
|
| 28 |
-
max_tokens=150,
|
| 29 |
-
temperature=0.7,
|
| 30 |
)
|
| 31 |
return response.choices[0].message.content
|
| 32 |
except Exception as e:
|
| 33 |
-
return f"Erro: {str(e)}. Verifique
|
| 34 |
|
| 35 |
-
# Cria a interface do
|
| 36 |
demo = gr.ChatInterface(
|
| 37 |
fn=chat_with_llm,
|
| 38 |
title="Chatbot com Mistral (Gratuito via HF API)",
|
| 39 |
description="Teste o chatbot usando Mistral. Plano gratuito com limites.",
|
| 40 |
)
|
| 41 |
|
| 42 |
-
# Lança o app (no Spaces, isso é automático)
|
| 43 |
if __name__ == "__main__":
|
| 44 |
demo.launch()
|
|
|
|
| 3 |
import gradio as gr
|
| 4 |
from huggingface_hub import InferenceClient
|
| 5 |
|
| 6 |
+
# Carrega variáveis de ambiente (para testes locais; no Space, usa secrets)
|
| 7 |
load_dotenv()
|
| 8 |
api_key = os.getenv("HF_API_TOKEN")
|
| 9 |
|
| 10 |
+
# Verifica se o token existe
|
| 11 |
+
if not api_key:
|
| 12 |
+
raise ValueError("Erro: HF_API_TOKEN não encontrado. Configure-o nas secrets do Space (Settings > Repository secrets).")
|
| 13 |
|
| 14 |
+
# Inicializa o cliente de inferência com o modelo Mistral
|
| 15 |
+
client = InferenceClient(token=api_key, model="mistralai/Mistral-7B-Instruct-v0.3")
|
| 16 |
+
|
| 17 |
+
# Função do chatbot
|
| 18 |
def chat_with_llm(message, history):
|
| 19 |
try:
|
| 20 |
+
# Constrói o histórico de mensagens
|
| 21 |
messages = []
|
| 22 |
for user_msg, bot_msg in history:
|
| 23 |
messages.append({"role": "user", "content": user_msg})
|
| 24 |
if bot_msg:
|
| 25 |
messages.append({"role": "assistant", "content": bot_msg})
|
| 26 |
messages.append({"role": "user", "content": message})
|
| 27 |
+
|
| 28 |
# Chama a API do Mistral
|
| 29 |
response = client.chat.completions.create(
|
| 30 |
model="mistralai/Mistral-7B-Instruct-v0.3",
|
| 31 |
messages=messages,
|
| 32 |
+
max_tokens=150,
|
| 33 |
+
temperature=0.7,
|
| 34 |
)
|
| 35 |
return response.choices[0].message.content
|
| 36 |
except Exception as e:
|
| 37 |
+
return f"Erro: {str(e)}. Verifique o token API, conexão com a internet ou limites da API gratuita."
|
| 38 |
|
| 39 |
+
# Cria a interface do Gradio
|
| 40 |
demo = gr.ChatInterface(
|
| 41 |
fn=chat_with_llm,
|
| 42 |
title="Chatbot com Mistral (Gratuito via HF API)",
|
| 43 |
description="Teste o chatbot usando Mistral. Plano gratuito com limites.",
|
| 44 |
)
|
| 45 |
|
|
|
|
| 46 |
if __name__ == "__main__":
|
| 47 |
demo.launch()
|