Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
ZennyKenny
commited on
remove persistent storage
Browse files
app.py
CHANGED
@@ -4,14 +4,8 @@ from transformers import pipeline
|
|
4 |
import librosa
|
5 |
import soundfile as sf
|
6 |
import os
|
7 |
-
import uuid
|
8 |
import spaces # Ensure spaces is imported
|
9 |
|
10 |
-
# Directory to save processed audio files
|
11 |
-
OUTPUT_DIR = os.getenv("HF_HOME", ".") # Use dynamic path or default to current directory
|
12 |
-
OUTPUT_DIR = os.path.join(OUTPUT_DIR, "processed_audio_files")
|
13 |
-
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
14 |
-
|
15 |
def split_audio(audio_data, sr, chunk_duration=30):
|
16 |
"""Split audio into chunks of chunk_duration seconds."""
|
17 |
chunks = []
|
@@ -38,25 +32,6 @@ def transcribe_long_audio(audio_path, transcriber, chunk_duration=30):
|
|
38 |
print(f"Error in transcribe_long_audio: {e}")
|
39 |
return f"Error processing audio: {e}"
|
40 |
|
41 |
-
def cleanup_output_dir(max_storage_mb=500):
|
42 |
-
"""Remove old files if total directory size exceeds max_storage_mb."""
|
43 |
-
try:
|
44 |
-
total_size = sum(
|
45 |
-
os.path.getsize(os.path.join(OUTPUT_DIR, f)) for f in os.listdir(OUTPUT_DIR)
|
46 |
-
)
|
47 |
-
if total_size > max_storage_mb * 1024 * 1024:
|
48 |
-
files = sorted(
|
49 |
-
(os.path.join(OUTPUT_DIR, f) for f in os.listdir(OUTPUT_DIR)),
|
50 |
-
key=os.path.getctime,
|
51 |
-
)
|
52 |
-
for file in files:
|
53 |
-
os.remove(file)
|
54 |
-
total_size -= os.path.getsize(file)
|
55 |
-
if total_size <= max_storage_mb * 1024 * 1024:
|
56 |
-
break
|
57 |
-
except Exception as e:
|
58 |
-
print(f"Error during cleanup: {e}")
|
59 |
-
|
60 |
@spaces.GPU(duration=3)
|
61 |
def main():
|
62 |
device = 0 if torch.cuda.is_available() else -1
|
@@ -80,9 +55,6 @@ def main():
|
|
80 |
transcription = transcribe_long_audio(audio_input, transcriber, chunk_duration=30)
|
81 |
summary = summarizer(transcription, max_length=50, min_length=10, do_sample=False)[0]["summary_text"]
|
82 |
|
83 |
-
# Cleanup old files
|
84 |
-
cleanup_output_dir()
|
85 |
-
|
86 |
return transcription, summary, audio_input
|
87 |
except Exception as e:
|
88 |
print(f"Error in process_audio: {e}")
|
@@ -93,9 +65,9 @@ def main():
|
|
93 |
with gr.Column():
|
94 |
# Only support file uploads
|
95 |
audio_input = gr.Audio(type="filepath", label="Upload Audio File")
|
96 |
-
process_button = gr.Button("
|
97 |
with gr.Column():
|
98 |
-
transcription_output = gr.Textbox(label="
|
99 |
summary_output = gr.Textbox(label="Summary", lines=5)
|
100 |
audio_output = gr.Audio(label="Playback Processed Audio")
|
101 |
|
|
|
4 |
import librosa
|
5 |
import soundfile as sf
|
6 |
import os
|
|
|
7 |
import spaces # Ensure spaces is imported
|
8 |
|
|
|
|
|
|
|
|
|
|
|
9 |
def split_audio(audio_data, sr, chunk_duration=30):
|
10 |
"""Split audio into chunks of chunk_duration seconds."""
|
11 |
chunks = []
|
|
|
32 |
print(f"Error in transcribe_long_audio: {e}")
|
33 |
return f"Error processing audio: {e}"
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
@spaces.GPU(duration=3)
|
36 |
def main():
|
37 |
device = 0 if torch.cuda.is_available() else -1
|
|
|
55 |
transcription = transcribe_long_audio(audio_input, transcriber, chunk_duration=30)
|
56 |
summary = summarizer(transcription, max_length=50, min_length=10, do_sample=False)[0]["summary_text"]
|
57 |
|
|
|
|
|
|
|
58 |
return transcription, summary, audio_input
|
59 |
except Exception as e:
|
60 |
print(f"Error in process_audio: {e}")
|
|
|
65 |
with gr.Column():
|
66 |
# Only support file uploads
|
67 |
audio_input = gr.Audio(type="filepath", label="Upload Audio File")
|
68 |
+
process_button = gr.Button("Transcribe Audio")
|
69 |
with gr.Column():
|
70 |
+
transcription_output = gr.Textbox(label="Transcription", lines=10)
|
71 |
summary_output = gr.Textbox(label="Summary", lines=5)
|
72 |
audio_output = gr.Audio(label="Playback Processed Audio")
|
73 |
|