File size: 2,073 Bytes
29a5bdf
 
 
fb133d5
29a5bdf
fb133d5
29a5bdf
 
f3ddd64
fb133d5
 
 
 
 
 
 
 
 
29a5bdf
fb133d5
2f4d994
 
fb133d5
 
 
 
2f4d994
fb133d5
f3ddd64
 
29a5bdf
 
f3ddd64
29a5bdf
 
 
 
 
 
f3ddd64
fb133d5
 
 
 
 
 
 
 
 
 
29a5bdf
fb133d5
 
29a5bdf
fb133d5
29a5bdf
fb133d5
29a5bdf
 
fb133d5
 
29a5bdf
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import os
from dotenv import load_dotenv
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig

# Carrega variáveis de ambiente
load_dotenv()
api_key = os.getenv("HF_API_TOKEN")
if not api_key:
    raise ValueError("Erro: HF_API_TOKEN não encontrado. Configure-o nas secrets do Space.")

# Configuração de quantização
quant_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_use_double_quant=True,
    bnb_4bit_compute_dtype="bfloat16"
)

# Inicializa o modelo e o tokenizer
model_id = "Qwen/Qwen2-7B-Instruct-GPTQ-Int4"  # Modelo GPTQ oficial da Qwen
tokenizer = AutoTokenizer.from_pretrained(model_id, token=api_key)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=quant_config,
    device_map="auto",
    token=api_key
)

# Função do chatbot
def chat_with_llm(message, history):
    try:
        # Constrói o histórico de mensagens
        messages = []
        for user_msg, bot_msg in history:
            messages.append({"role": "user", "content": user_msg})
            if bot_msg:
                messages.append({"role": "assistant", "content": bot_msg})
        messages.append({"role": "user", "content": message})

        # Tokeniza a entrada
        inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
        
        # Gera a resposta
        outputs = model.generate(
            inputs,
            max_new_tokens=500,
            temperature=0.5,
            top_p=0.9,
            do_sample=True
        )
        response = tokenizer.decode(outputs[0], skip_special_tokens=True)
        return response
    except Exception as e:
        return f"Erro: {str(e)}. Verifique o token API, conexão com a internet ou requisitos de hardware."

# Interface Gradio
demo = gr.ChatInterface(
    fn=chat_with_llm,
    title="Chatbot com Qwen2-7B Quantizado",
    description="Chatbot usando Qwen2-7B quantizado em 4-bit para maior eficiência.",
)

if __name__ == "__main__":
    demo.launch()