Vivek16 commited on
Commit
46e2879
Β·
verified Β·
1 Parent(s): 275aa80

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -57
app.py CHANGED
@@ -1,76 +1,66 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
3
- from peft import PeftModel
4
- import torch
5
 
6
- # -----------------------------
7
- # CPU-friendly model
8
- # -----------------------------
9
- MODEL_NAME = "tiiuae/falcon-7b-instruct" # smaller CPU-friendly model
 
 
 
 
 
 
 
 
 
 
10
 
11
- print("Loading tokenizer...")
12
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=False)
 
 
13
 
14
- print("Loading model...")
15
- base_model = AutoModelForCausalLM.from_pretrained(
16
- MODEL_NAME,
17
- device_map=None, # CPU only
18
- torch_dtype=torch.float32
19
- )
20
-
21
- # Load LoRA if exists (optional)
22
- try:
23
- model = PeftModel.from_pretrained(base_model, MODEL_NAME, device_map=None)
24
- except:
25
- model = base_model
26
-
27
- model.eval()
28
-
29
- # -----------------------------
30
- # Response function
31
- # -----------------------------
32
- def respond(message, history, system_message, max_tokens, temperature, top_p):
33
- # Limit max tokens for CPU safety
34
- if max_tokens > 128:
35
- max_tokens = 128
36
 
37
- # Build prompt
38
- prompt = system_message + "\n"
39
- for h in history:
40
- prompt += f"User: {h['content']}\n"
41
- prompt += f"User: {message}\nBot:"
42
 
43
- inputs = tokenizer(prompt, return_tensors="pt")
44
-
45
- gen_config = GenerationConfig(
46
- max_new_tokens=max_tokens,
47
- temperature=temperature,
48
- top_p=top_p,
49
- do_sample=True
50
- )
51
-
52
- with torch.no_grad():
53
- output_ids = model.generate(**inputs, **gen_config.to_dict())
54
- output = tokenizer.decode(output_ids[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
55
- return output
56
-
57
- # -----------------------------
58
- # Gradio Chat Interface
59
- # -----------------------------
60
  chatbot = gr.ChatInterface(
61
  respond,
62
  type="messages",
63
  additional_inputs=[
64
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
65
- gr.Slider(minimum=1, maximum=128, value=64, step=1, label="Max new tokens"),
66
- gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
67
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
68
  ],
69
  )
70
 
 
71
  with gr.Blocks() as demo:
 
 
72
  chatbot.render()
73
 
74
  if __name__ == "__main__":
75
- demo.launch(server_name="0.0.0.0", server_port=7860)
76
-
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
 
 
3
 
4
+ def respond(
5
+ message,
6
+ history: list[dict[str, str]],
7
+ system_message,
8
+ max_tokens,
9
+ temperature,
10
+ top_p,
11
+ hf_token: gr.OAuthToken,
12
+ ):
13
+ """
14
+ Simple CPU-compatible chat using Hugging Face InferenceClient.
15
+ Works without GPU and 4-bit models.
16
+ """
17
+ client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b") # keep your model
18
 
19
+ # Build messages for chat
20
+ messages = [{"role": "system", "content": system_message}]
21
+ messages.extend(history)
22
+ messages.append({"role": "user", "content": message})
23
 
24
+ response = ""
25
+ try:
26
+ # Streaming response
27
+ for chunk in client.chat_completion(
28
+ messages,
29
+ max_tokens=max_tokens,
30
+ stream=True,
31
+ temperature=temperature,
32
+ top_p=top_p,
33
+ ):
34
+ choices = getattr(chunk, "choices", None) or chunk.get("choices", [])
35
+ token = ""
36
+ if len(choices) and getattr(choices[0].delta, "content", None):
37
+ token = choices[0].delta.content
38
+ elif len(choices) and choices[0].get("delta", {}).get("content"):
39
+ token = choices[0]["delta"]["content"]
40
+ response += token
41
+ yield response
 
 
 
 
42
 
43
+ except Exception as e:
44
+ yield f"[Error streaming response] {str(e)}"
 
 
 
45
 
46
+ # Gradio ChatInterface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  chatbot = gr.ChatInterface(
48
  respond,
49
  type="messages",
50
  additional_inputs=[
51
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
52
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
53
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
54
  gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
55
  ],
56
  )
57
 
58
+ # Build Gradio UI
59
  with gr.Blocks() as demo:
60
+ with gr.Sidebar():
61
+ gr.LoginButton() # OAuth login
62
  chatbot.render()
63
 
64
  if __name__ == "__main__":
65
+ # Launch on CPU, with a public link
66
+ demo.launch(share=True)