Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration, BartForConditionalGeneration, BartTokenizer | |
| from huggingface_hub import login | |
| import os | |
| # Retrieve the token from the environment variable | |
| hf_api_token = os.getenv("HF_API_TOKEN") | |
| if hf_api_token is None: | |
| raise ValueError("HF_API_TOKEN environment variable is not set") | |
| # Authenticate with Hugging Face | |
| login(token=hf_api_token, add_to_git_credential=True) | |
| # Initialize the Whisper processor and model | |
| whisper_processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") | |
| whisper_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") | |
| # Initialize the summarization model and tokenizer | |
| # Use BART model for summarization | |
| summarization_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") | |
| summarization_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") | |
| # Function to transcribe audio | |
| def transcribe_audio(audio_file): | |
| # Load audio file | |
| audio_input, _ = whisper_processor(audio_file, return_tensors="pt", sampling_rate=16000).input_values | |
| # Generate transcription | |
| transcription_ids = whisper_model.generate(audio_input) | |
| transcription = whisper_processor.decode(transcription_ids[0]) | |
| return transcription | |
| # Function to summarize text | |
| def summarize_text(text): | |
| inputs = summarization_tokenizer(text, return_tensors="pt", max_length=1024, truncation=True) | |
| summary_ids = summarization_model.generate(inputs.input_ids, max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True) | |
| summary | |