Spaces:
Running
Running
Commit
·
4cb647e
1
Parent(s):
5942210
Upd build
Browse files- app.py +34 -16
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,16 +1,19 @@
|
|
1 |
# Access site: https://binkhoale1812-interview-ai.hf.space/
|
2 |
import os
|
3 |
import tempfile
|
4 |
-
import psutil
|
5 |
from pathlib import Path
|
6 |
from typing import Dict
|
7 |
|
|
|
8 |
from fastapi import FastAPI, File, UploadFile, HTTPException
|
9 |
from fastapi.middleware.cors import CORSMiddleware
|
10 |
from fastapi.responses import JSONResponse, FileResponse
|
11 |
from fastapi.staticfiles import StaticFiles
|
12 |
|
13 |
-
|
|
|
|
|
|
|
14 |
from google import genai
|
15 |
from google.genai import types
|
16 |
|
@@ -45,22 +48,17 @@ app.mount("/statics", StaticFiles(directory="statics"), name="statics")
|
|
45 |
# ── Global objects (lazy‑loaded) ──────────
|
46 |
############################################
|
47 |
|
48 |
-
|
49 |
-
|
|
|
50 |
|
51 |
|
52 |
@app.on_event("startup")
|
53 |
async def load_models():
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
"automatic-speech-recognition",
|
59 |
-
model=ASR_MODEL_ID,
|
60 |
-
chunk_length_s=30,
|
61 |
-
torch_dtype="auto",
|
62 |
-
device="cpu",
|
63 |
-
)
|
64 |
|
65 |
|
66 |
############################################
|
@@ -79,6 +77,23 @@ def build_prompt(question: str) -> str:
|
|
79 |
def memory_usage_mb() -> float:
|
80 |
return psutil.Process().memory_info().rss / 1_048_576 # bytes→MiB
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
############################################
|
83 |
# ── Routes ────────────────────────────────
|
84 |
############################################
|
@@ -100,8 +115,11 @@ async def voice_transcribe(file: UploadFile = File(...)): # noqa: B008
|
|
100 |
tmp_path = tmp.name
|
101 |
try:
|
102 |
# ── 1. Transcribe
|
103 |
-
|
104 |
-
|
|
|
|
|
|
|
105 |
if not question:
|
106 |
raise ValueError("Empty transcription")
|
107 |
# ── 2. LLM answer
|
|
|
1 |
# Access site: https://binkhoale1812-interview-ai.hf.space/
|
2 |
import os
|
3 |
import tempfile
|
|
|
4 |
from pathlib import Path
|
5 |
from typing import Dict
|
6 |
|
7 |
+
# Server
|
8 |
from fastapi import FastAPI, File, UploadFile, HTTPException
|
9 |
from fastapi.middleware.cors import CORSMiddleware
|
10 |
from fastapi.responses import JSONResponse, FileResponse
|
11 |
from fastapi.staticfiles import StaticFiles
|
12 |
|
13 |
+
# AI + LLM
|
14 |
+
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
15 |
+
import torch
|
16 |
+
import soundfile as sf
|
17 |
from google import genai
|
18 |
from google.genai import types
|
19 |
|
|
|
48 |
# ── Global objects (lazy‑loaded) ──────────
|
49 |
############################################
|
50 |
|
51 |
+
# Globals
|
52 |
+
processor = None
|
53 |
+
model = None
|
54 |
|
55 |
|
56 |
@app.on_event("startup")
|
57 |
async def load_models():
|
58 |
+
global processor, model
|
59 |
+
processor = WhisperProcessor.from_pretrained(ASR_MODEL_ID)
|
60 |
+
model = WhisperForConditionalGeneration.from_pretrained(ASR_MODEL_ID)
|
61 |
+
model.to("cpu")
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
|
64 |
############################################
|
|
|
77 |
def memory_usage_mb() -> float:
|
78 |
return psutil.Process().memory_info().rss / 1_048_576 # bytes→MiB
|
79 |
|
80 |
+
# Monitor Resources Before Startup
|
81 |
+
import psutil
|
82 |
+
def check_system_resources():
|
83 |
+
memory = psutil.virtual_memory()
|
84 |
+
cpu = psutil.cpu_percent(interval=1)
|
85 |
+
disk = psutil.disk_usage("/")
|
86 |
+
# Defines log info messages
|
87 |
+
logger.info(f"🔍 System Resources - RAM: {memory.percent}%, CPU: {cpu}%, Disk: {disk.percent}%")
|
88 |
+
if memory.percent > 85:
|
89 |
+
logger.warning("⚠️ High RAM usage detected!")
|
90 |
+
if cpu > 90:
|
91 |
+
logger.warning("⚠️ High CPU usage detected!")
|
92 |
+
if disk.percent > 90:
|
93 |
+
logger.warning("⚠️ High Disk usage detected!")
|
94 |
+
check_system_resources()
|
95 |
+
|
96 |
+
|
97 |
############################################
|
98 |
# ── Routes ────────────────────────────────
|
99 |
############################################
|
|
|
115 |
tmp_path = tmp.name
|
116 |
try:
|
117 |
# ── 1. Transcribe
|
118 |
+
speech, sample_rate = sf.read(tmp_path)
|
119 |
+
inputs = processor(speech, sampling_rate=sample_rate, return_tensors="pt")
|
120 |
+
input_ids = inputs.input_features.to("cpu") # adjust if using GPU
|
121 |
+
generated_ids = model.generate(input_ids)
|
122 |
+
question = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
123 |
if not question:
|
124 |
raise ValueError("Empty transcription")
|
125 |
# ── 2. LLM answer
|
requirements.txt
CHANGED
@@ -5,7 +5,8 @@ aiofiles # Static file serving
|
|
5 |
python-multipart # File uploads
|
6 |
|
7 |
# Voice‑to‑text (Whisper via 🤗 Transformers)
|
8 |
-
|
|
|
9 |
torch
|
10 |
huggingface_hub
|
11 |
|
|
|
5 |
python-multipart # File uploads
|
6 |
|
7 |
# Voice‑to‑text (Whisper via 🤗 Transformers)
|
8 |
+
soundfile
|
9 |
+
transformers==4.38.2 # ensure recent enough
|
10 |
torch
|
11 |
huggingface_hub
|
12 |
|