SadTalker / app.py
adharshvs's picture
Update app.py
50406b2 verified
import os
import requests
import gradio as gr
from src.gradio_demo import SadTalker
from huggingface_hub import snapshot_download
# 1. Download the Brain
def download_model():
REPO_ID = 'vinthony/SadTalker-V002rc'
snapshot_download(repo_id=REPO_ID, local_dir='./checkpoints', local_dir_use_symlinks=True)
download_model()
sad_talker = SadTalker(lazy_load=True)
# 2. The Internal AI Agent Logic
# UPDATE ONLY THIS FUNCTION IN YOUR app.py
def farmer_agent(question):
# A. Get Advice from Groq (This part is working!)
groq_url = "https://api.groq.com/openai/v1/chat/completions"
headers = {"Authorization": "Bearer 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"}
payload = {
"model": "llama-3.1-8b-instant",
"messages": [{"role": "system", "content": "You are a village farmer expert. 2 sentences."},
{"role": "user", "content": question}]
}
resp = requests.post(groq_url, json=payload, headers=headers).json()
advice = resp['choices'][0]['message']['content']
# B. Generate the Video (The fix for the AssertionError)
source_img = "examples/source_image/full_body_1.png"
# We set use_ref_video=False and ref_info='pose' to bypass the Assertion
video_path = sad_talker.test(
source_img,
advice,
'crop',
True,
False,
1,
256,
0,
'facevid2vid',
1.0,
False, # use_ref_video (Changed to False)
None, # ref_video
'pose', # ref_info (Changed to 'pose' to match logic)
False,
5,
True
)
return video_path, advice
# 3. Simple 1-Input Interface
with gr.Blocks() as demo:
gr.Markdown("# 🌾 Farmer GenAI Advisor")
with gr.Row():
input_text = gr.Textbox(label="Farmer's Question")
output_video = gr.Video(label="Advisor Video")
output_text = gr.Textbox(label="Advice Text")
btn = gr.Button("Ask the Expert")
btn.click(fn=farmer_agent, inputs=input_text, outputs=[output_video, output_text])
demo.launch(server_name="0.0.0.0", server_port=7860)