Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 2,681 Bytes
bd97be7 9fe4dba b40af2a 8057378 bd97be7 8057378 525ee37 5f36451 d88ec40 525ee37 d88ec40 525ee37 8057378 525ee37 b40af2a 525ee37 b40af2a 525ee37 6befe57 525ee37 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import gradio as gr
import torch
from transformers import pipeline
import librosa # For audio processing
def split_audio(audio_path, chunk_duration=30):
"""Split audio into chunks of chunk_duration seconds."""
audio, sr = librosa.load(audio_path, sr=None)
chunks = []
for start in range(0, len(audio), int(chunk_duration * sr)):
end = start + int(chunk_duration * sr)
chunks.append(audio[start:end])
return chunks, sr
def transcribe_long_audio(audio_path, transcriber, chunk_duration=30):
"""Transcribe long audio by splitting into smaller chunks."""
chunks, sr = split_audio(audio_path, chunk_duration)
transcriptions = []
for chunk in chunks:
temp_path = "temp_chunk.wav"
librosa.output.write_wav(temp_path, chunk, sr)
transcription = transcriber(temp_path)["text"]
transcriptions.append(transcription)
return " ".join(transcriptions)
@spaces.GPU(duration=3)
def main():
# Force GPU if available, fallback to CPU
device = 0 if torch.cuda.is_available() else -1
try:
# Load models with explicit device
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
except Exception as e:
print(f"Error loading models: {e}")
raise
# Function to process audio
def process_audio(audio_file):
try:
# Transcribe the audio (long-form support)
transcription = transcribe_long_audio(audio_file, transcriber, chunk_duration=30)
# Summarize the transcription
summary = summarizer(transcription, max_length=50, min_length=10, do_sample=False)[0]["summary_text"]
return transcription, summary
except Exception as e:
return f"Error processing audio: {e}", ""
# Gradio Interface with Horizontal Layout
with gr.Blocks() as interface:
with gr.Row():
with gr.Column():
audio_input = gr.Audio(type="filepath", label="Upload Audio File")
process_button = gr.Button("Process Audio")
with gr.Column():
transcription_output = gr.Textbox(label="Full Transcription", lines=10)
summary_output = gr.Textbox(label="Summary", lines=5)
process_button.click(
process_audio,
inputs=[audio_input],
outputs=[transcription_output, summary_output]
)
# Launch the interface with optional public sharing
interface.launch(share=True)
# Run the main function
if __name__ == "__main__":
main()
|