Spaces:
Sleeping
Sleeping
File size: 5,254 Bytes
4cd0e28 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import gradio as gr
import requests
import os
import re
import json
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
API_KEY = os.getenv("API_KEY", "")
API_URL = os.getenv("API_URL", "https://openrouter.ai/api/v1/chat/completions")
MODEL = os.getenv("MODEL", "moonshotai/Kimi-K2-Thinking")
# Load the prompt template
def load_prompt():
with open("prompt.txt", "r") as f:
return f.read()
PROMPT_TEMPLATE = load_prompt()
def generate_human_text(content: str, progress=gr.Progress()):
"""Generate human-like text using Kimi-K2-Thinking model."""
if not content.strip():
yield "⚠️ Please enter some content!"
return
if not API_KEY:
yield "⚠️ API key not configured. Please contact the administrator."
return
progress(0.1, desc="Preparing request...")
# Build the prompt
prompt = PROMPT_TEMPLATE.replace("{content}", content)
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
}
payload = {
"model": MODEL,
"messages": [
{"role": "user", "content": prompt}
],
"temperature": 0.8,
"max_tokens": 16000,
"stream": True
}
progress(0.3, desc="Connecting to AI...")
try:
response = requests.post(
API_URL,
headers=headers,
json=payload,
timeout=180,
stream=True
)
response.raise_for_status()
progress(0.5, desc="Receiving response...")
full_content = ""
is_streaming_answer = False
# Process streaming response
for line in response.iter_lines():
if not line:
continue
line = line.decode('utf-8')
if line.startswith('data: '):
data_str = line[6:]
if data_str == '[DONE]':
break
try:
data = json.loads(data_str)
if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {})
chunk = delta.get('content', '')
if chunk:
full_content += chunk
# Wait until thinking is done
if '</think>' in full_content and not is_streaming_answer:
is_streaming_answer = True
progress(0.7, desc="Generating human-like text...")
# Only stream if we're past the thinking phase
if is_streaming_answer:
# Remove all thinking content
clean_content = re.sub(r'<think>.*?</think>', '', full_content, flags=re.DOTALL)
clean_content = clean_content.strip()
if clean_content:
yield clean_content
except:
continue
progress(1.0, desc="Done!")
# Final cleanup - remove all thinking tokens
final_content = re.sub(r'<think>.*?</think>', '', full_content, flags=re.DOTALL)
# Also remove any remaining unclosed think tags
final_content = re.sub(r'<think>.*$', '', final_content, flags=re.DOTALL)
final_content = final_content.strip()
yield final_content if final_content else "⚠️ No content generated. Please try again with different text."
except requests.exceptions.Timeout:
yield "⏱️ Request timed out. The AI is taking too long to respond. Try with shorter text or try again later."
except requests.exceptions.RequestException as e:
yield f"❌ API Error: {str(e)}"
except Exception as e:
yield f"❌ Unexpected error: {str(e)}"
# Create the Gradio interface
with gr.Blocks(title="Human-Like Text Generator - Kimi K2") as app:
gr.Markdown("""
# ✍️ Human-Like Text Generator
Transform AI-generated or formal text into natural, human-sounding content using Open Source Model.
⚡ **Streaming enabled!** You'll see the text appear in real-time as the AI writes.
""")
with gr.Row():
with gr.Column():
content = gr.Textbox(
label="Your Content",
placeholder="Paste the content you want rewritten in a human style...",
lines=12,
max_lines=20
)
generate_btn = gr.Button("🚀 Generate Human Version", variant="primary", size="lg")
with gr.Column():
output = gr.Textbox(
label="Human-Like Output",
lines=12,
max_lines=20
)
gr.Markdown("💡 **Tip:** For best results, paste clear, complete paragraphs. The output will match the length and topic of your input.")
generate_btn.click(
fn=generate_human_text,
inputs=[content],
outputs=output
)
content.submit(
fn=generate_human_text,
inputs=[content],
outputs=output
)
if __name__ == "__main__":
app.launch()
|