Spaces:
Running
Running
File size: 6,595 Bytes
e7b1e22 243b6fb e7b1e22 4cb647e e7b1e22 4cb647e 77d7341 5942210 e7b1e22 6e2027d 78bb61a 243b6fb 6e2027d 30f82a6 e7b1e22 243b6fb e7b1e22 243b6fb e7b1e22 4cb647e e7b1e22 fa605c6 243b6fb fa605c6 4cb647e 243b6fb d318182 243b6fb 0d7db6d 243b6fb 4cb647e e7b1e22 20efc7b e7b1e22 243b6fb 20efc7b e7b1e22 20efc7b e7b1e22 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
# Access site: https://binkhoale1812-interview-ai.hf.space/
import os, tempfile
from pathlib import Path
from typing import Dict
# Server
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, FileResponse
from fastapi.staticfiles import StaticFiles
# AI + LLM
import torch # For transformer
from google import genai
from google.genai import types
# Audio Transcribe
from pydub import AudioSegment
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import numpy as np
############################################
# ── Configuration ────────────────────────
############################################
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
if not GEMINI_API_KEY:
raise RuntimeError("GEMINI_API_KEY environment variable must be set!")
# Tiny Whisper model is light enough for CPU Spaces; change if GPU is available
ASR_MODEL_ID = "openai/whisper-small.en"
ASR_LANGUAGE = "en" # Force to English for interview setting
SAMPLE_RATE = 16000
############################################
# ── FastAPI App ───────────────────────────
############################################
app = FastAPI(title="Interview Q&A Assistant", docs_url="/docs")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
# Serve frontend assets
app.mount("/statics", StaticFiles(directory="statics"), name="statics")
############################################
# ── Global objects (lazy‑loaded) ──────────
############################################
# Globals
processor = None
model = None
# Enable Logging for Debugging
import psutil
import logging
# Set up app-specific logger
logger = logging.getLogger("triage-response")
logger.setLevel(logging.INFO) # Set to DEBUG only when needed
# Set log format
formatter = logging.Formatter("[%(levelname)s] %(asctime)s - %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
# Suppress noisy libraries like pymongo, urllib3, etc.
for noisy in ["pymongo", "urllib3", "httpx", "uvicorn", "uvicorn.error", "uvicorn.access"]:
logging.getLogger(noisy).setLevel(logging.WARNING)
# Monitor Resources Before Startup
def check_system_resources():
memory = psutil.virtual_memory()
cpu = psutil.cpu_percent(interval=1)
disk = psutil.disk_usage("/")
# Defines log info messages
logger.info(f"🔍 System Resources - RAM: {memory.percent}%, CPU: {cpu}%, Disk: {disk.percent}%")
if memory.percent > 85:
logger.warning("⚠️ High RAM usage detected!")
if cpu > 90:
logger.warning("⚠️ High CPU usage detected!")
if disk.percent > 90:
logger.warning("⚠️ High Disk usage detected!")
check_system_resources()
# Startup
@app.on_event("startup")
async def load_models():
global processor, model
cache = Path("model_cache"); cache.mkdir(exist_ok=True)
# in startup (Transformer Whisper processing)
processor = WhisperProcessor.from_pretrained(ASR_MODEL_ID, cache_dir=cache)
model = WhisperForConditionalGeneration.from_pretrained(ASR_MODEL_ID, cache_dir=cache)
# Force English transcription – never translate
forced = processor.get_decoder_prompt_ids(language="english", task="transcribe")
model.config.forced_decoder_ids = forced
model.to("cpu")
model.eval()
logger.info("[STARTUP] Whisper loaded ✔")
############################################
# ── Helpers ───────────────────────────────
############################################
def build_prompt(question: str) -> str:
"""Craft a prompt that elicits concise, structured answers."""
return (
"You are a helpful career‑coach AI. Answer the following interview "
"question clearly and concisely, offering practical insights when appropriate.\n"
"Use markdown for **bold**, *italic*, and bullet‑lists when helpful. \n"
"Ensure your answer is less than 200 words.\n\n"
f"Interview question: \"{question}\""
)
def memory_usage_mb() -> float:
return psutil.Process().memory_info().rss / 1_048_576 # bytes→MiB
############################################
# ── Routes ────────────────────────────────
############################################
@app.get("/")
async def root() -> FileResponse:
"""Serve the single‑page app."""
logger.info("[STATIC] Serving frontend")
return FileResponse(Path("statics/index.html"))
@app.post("/voice-transcribe")
async def voice_transcribe(file: UploadFile = File(...)): # noqa: B008
"""Receive audio, transcribe, push to Gemini, return answer."""
if file.content_type not in {"audio/wav", "audio/x-wav", "audio/mpeg"}:
raise HTTPException(status_code=415, detail="Unsupported audio type")
# Save to a temp file (Whisper expects a filename/bytes)
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
tmp.write(await file.read())
tmp_path = tmp.name
try:
# ── 1. Transcribe
seg = AudioSegment.from_file(tmp_path).set_frame_rate(SAMPLE_RATE).set_channels(1)
audio = np.array(seg.get_array_of_samples()).astype(np.float32) / (2**15)
inputs = processor(audio, sampling_rate=SAMPLE_RATE, return_tensors="pt")
ids = model.generate(inputs.input_features.to(model.device))
question = processor.decode(ids[0], skip_special_tokens=True).strip()
if not question: raise ValueError("Could not detect speech")
logger.info(f"[VOICE] Detected transcribe: {question}")
# ── 2. LLM answer
prompt = build_prompt(question)
# Gemini Flash 2.5 – tuned for short latency
client = genai.Client(api_key=GEMINI_API_KEY)
response = client.models.generate_content(
model="gemini-2.5-flash-preview-04-17",
contents=prompt
)
answer = response.text.strip()
logger.info(f"[LLM] Decision answer: {answer}")
return JSONResponse(
{
"question": question,
"answer": answer,
"memory_mb": round(memory_usage_mb(), 1),
}
)
finally:
os.remove(tmp_path) # Rm audio when done |