import gradio as gr from transformers import AutoModelForSequenceClassification, AutoTokenizer import torch import torch.nn.functional as F # 🌍 Model yükle MODEL_NAME = "cardiffnlp/twitter-xlm-roberta-base-sentiment" tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME) labels = ["negative", "neutral", "positive"] def analyze_text(text): if not text.strip(): return {"label": "empty", "emoji": "💬", "scores": {}} inputs = tokenizer(text, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) probs = F.softmax(outputs.logits, dim=1) scores = {labels[i]: round(float(probs[0][i]), 3) for i in range(len(labels))} top_label = max(scores, key=scores.get) emoji_map = { "positive": "😄", "neutral": "😐", "negative": "😞", "empty": "💬" } return { "label": top_label, "emoji": emoji_map[top_label], "scores": scores } # ✅ GRADIO arayüzü (3.x sürümü) iface = gr.Interface( fn=analyze_text, inputs=gr.Textbox(label="Metin Gir"), outputs="json", title="🌍 Multilingual Sentiment Analyzer", description="Sentiment analysis for multiple languages." ) # ✅ ÖNEMLİ: 3.x'te launch otomatik predict API oluşturur iface.launch(server_name="0.0.0.0", server_port=7860)