Spaces:
Running
Running
File size: 4,461 Bytes
e7b1e22 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
# Access site: https://binkhoale1812-interview-ai.hf.space/
import os
import tempfile
import psutil
from pathlib import Path
from typing import Dict
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, FileResponse
from fastapi.staticfiles import StaticFiles
import google.generativeai as genai
from transformers import pipeline, AutoProcessor, AutoModelForSpeechSeq2Seq
############################################
# ── Configuration ────────────────────────
############################################
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
if not GEMINI_API_KEY:
raise RuntimeError("GEMINI_API_KEY environment variable must be set!")
# Tiny Whisper model is light enough for CPU Spaces; change if GPU is available
ASR_MODEL_ID = "openai/whisper-tiny" # ~39 MB
ASR_LANGUAGE = "en" # Force to English for interview setting
############################################
# ── FastAPI App ───────────────────────────
############################################
app = FastAPI(title="Interview Q&A Assistant", docs_url="/docs")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
# Serve frontend assets
app.mount("/statics", StaticFiles(directory="statics"), name="statics")
############################################
# ── Global objects (lazy‑loaded) ──────────
############################################
asr_pipeline = None # Speech‑to‑text
llm = None # Gemini model
@app.on_event("startup")
async def load_models():
"""Load Whisper."""
global asr_pipeline, llm
# Whisper tiny – seq2seq pipeline
asr_pipeline = pipeline(
"automatic-speech-recognition",
model=ASR_MODEL_ID,
chunk_length_s=30,
torch_dtype="auto",
device="cpu",
)
############################################
# ── Helpers ───────────────────────────────
############################################
def build_prompt(question: str) -> str:
"""Craft a prompt that elicits concise, structured answers."""
return (
"You are a helpful career‑coach AI. Answer the following interview "
"question clearly and concisely, offering practical insights when "
"appropriate.\n\n"
f"Interview question: \"{question}\""
)
def memory_usage_mb() -> float:
return psutil.Process().memory_info().rss / 1_048_576 # bytes→MiB
############################################
# ── Routes ────────────────────────────────
############################################
@app.get("/")
async def root() -> FileResponse:
"""Serve the single‑page app."""
return FileResponse(Path("statics/index.html"))
@app.post("/voice-transcribe")
async def voice_transcribe(file: UploadFile = File(...)): # noqa: B008
"""Receive audio, transcribe, push to Gemini, return answer."""
if file.content_type not in {"audio/wav", "audio/x-wav", "audio/mpeg"}:
raise HTTPException(status_code=415, detail="Unsupported audio type")
# Save to a temp file (Whisper expects a filename/bytes)
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
tmp.write(await file.read())
tmp_path = tmp.name
try:
# ── 1. Transcribe
transcript: Dict = asr_pipeline(tmp_path, generate_kwargs={"language": ASR_LANGUAGE})
question = transcript["text"].strip()
if not question:
raise ValueError("Empty transcription")
# ── 2. LLM answer
prompt = build_prompt(question)
# Gemini Flash 2.5 – tuned for short latency
client = genai.Client(api_key=GEMINI_API_KEY)
response = client.models.generate_content(
model="gemini-2.5-flash-preview-04-17",
contents=prompt
)
answer = response.text.strip()
return JSONResponse(
{
"question": question,
"answer": answer,
"memory_mb": round(memory_usage_mb(), 1),
}
)
finally:
os.remove(tmp_path) # Rm audio when done |