File size: 1,555 Bytes
bd97be7
9fe4dba
b40af2a
bd97be7
d88ec40
 
 
 
 
 
 
 
 
 
bd97be7
b40af2a
 
d88ec40
 
 
 
 
 
 
 
b40af2a
6befe57
 
 
 
 
 
 
 
 
b40af2a
6befe57
 
 
 
 
 
d88ec40
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import gradio as gr
import torch
from transformers import pipeline

# Check if GPU is available; fallback to CPU if not
device = 0 if torch.cuda.is_available() else -1

try:
    # Load models with error handling
    transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device)
    summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
except Exception as e:
    print(f"Error loading models: {e}")
    raise

# Function to process audio
def process_audio(audio_file):
    try:
        # Transcribe the audio
        transcription = transcriber(audio_file)["text"]
        # Summarize the transcription
        summary = summarizer(transcription, max_length=50, min_length=10, do_sample=False)[0]["summary_text"]
        return transcription, summary
    except Exception as e:
        return f"Error processing audio: {e}", ""

# Gradio Interface with Horizontal Layout
with gr.Blocks() as interface:
    with gr.Row():
        with gr.Column():
            audio_input = gr.Audio(type="filepath", label="Upload Audio File")
            process_button = gr.Button("Process Audio")
        with gr.Column():
            transcription_output = gr.Textbox(label="Full Transcription", lines=10)
            summary_output = gr.Textbox(label="Summary", lines=5)

    process_button.click(
        process_audio,
        inputs=[audio_input],
        outputs=[transcription_output, summary_output]
    )

# Launch the interface with public sharing and SSR disabled
interface.launch(share=True, ssr=False)