Interview_AI / app.py
LiamKhoaLe's picture
Fix typo
4a0564f
raw
history blame
6.99 kB
# Interview Q&A – FastAPI backend
import base64, io, json, logging, os, tempfile
from pathlib import Path
from typing import Dict
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, FileResponse
from fastapi.staticfiles import StaticFiles
# AI / LLM
from google import genai
from google.genai import types
# ASR
import numpy as np
from pydub import AudioSegment
from transformers import WhisperProcessor, WhisperForConditionalGeneration
# Misc
from PIL import Image
##############################################################################
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
if not GEMINI_API_KEY:
raise RuntimeError("❌ GEMINI_API_KEY must be set as env var")
ASR_MODEL_ID = "openai/whisper-small.en"
ASR_LANGUAGE = "en"
SAMPLE_RATE = 16_000
##############################################################################
app = FastAPI(title="Interview Q&A Assistant", docs_url="/docs")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], allow_methods=["*"], allow_headers=["*"],
)
app.mount("/statics", StaticFiles(directory="statics"), name="statics")
# Enable Logging for Debugging
import psutil
import logging
# Set up app-specific logger
logger = logging.getLogger("triage-response")
logger.setLevel(logging.INFO) # Set to DEBUG only when needed
# Set log format
formatter = logging.Formatter("[%(levelname)s] %(asctime)s - %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
# Suppress noisy libraries like pymongo, urllib3, etc.
for noisy in ["pymongo", "urllib3", "httpx", "uvicorn", "uvicorn.error", "uvicorn.access"]:
logging.getLogger(noisy).setLevel(logging.WARNING)
# Monitor Resources Before Startup
def check_system_resources():
memory = psutil.virtual_memory()
cpu = psutil.cpu_percent(interval=1)
disk = psutil.disk_usage("/")
# Defines log info messages
logger.info(f"🔍 System Resources - RAM: {memory.percent}%, CPU: {cpu}%, Disk: {disk.percent}%")
if memory.percent > 85:
logger.warning("⚠️ High RAM usage detected!")
if cpu > 90:
logger.warning("⚠️ High CPU usage detected!")
if disk.percent > 90:
logger.warning("⚠️ High Disk usage detected!")
check_system_resources()
##############################################################################
# Global ASR (lazy-loaded)
processor = model = None
def build_prompt(question: str) -> str:
return (
"You are a helpful career-coach AI. Answer the following interview "
"question clearly and concisely (≤200 words). Use markdown when helpful.\n\n"
f"Interview question: \"{question.strip()}\""
)
def memory_mb() -> float:
return round(psutil.Process().memory_info().rss / 1_048_576, 1)
@app.on_event("startup")
async def load_models():
global processor, model
cache = Path("model_cache"); cache.mkdir(exist_ok=True)
processor = WhisperProcessor.from_pretrained(ASR_MODEL_ID, cache_dir=cache)
model = WhisperForConditionalGeneration.from_pretrained(ASR_MODEL_ID, cache_dir=cache)
forced = processor.get_decoder_prompt_ids(task="transcribe", language="english")
model.config.forced_decoder_ids = forced
model.to("cpu").eval()
logger.info("[MODEL] 🔊 Whisper loaded ✔")
@app.get("/")
async def root() -> FileResponse: # serve SPA
logger.info("[STATIC] Serving frontend")
return FileResponse(Path("statics/index.html"))
##############################################################################
# ── MAIN ENDPOINTS ──────────────────────────────────────────────────────────
def call_gemini(prompt: str, vision_parts=None) -> str:
client = genai.Client(api_key=GEMINI_API_KEY)
kwargs: Dict = {}
if vision_parts: # multimodal call
kwargs["contents"] = vision_parts + [{"text": prompt}]
else:
kwargs["contents"] = prompt
resp = client.models.generate_content(
model="gemini-2.5-flash-preview-04-17", **kwargs
)
response = resp.text.strip()
logger.info(f"[LLM] Response: {response}")
return response
@app.post("/voice-transcribe")
async def voice_transcribe(file: UploadFile = File(...)):
if file.content_type not in {"audio/wav", "audio/x-wav", "audio/mpeg"}:
raise HTTPException(415, "Unsupported audio type")
# Write temporary audio file
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp:
tmp.write(await file.read()); tmp_path = tmp.name
# Audio processing and transcription
try:
seg = AudioSegment.from_file(tmp_path).set_frame_rate(SAMPLE_RATE).set_channels(1)
audio = np.array(seg.get_array_of_samples()).astype(np.float32) / (2 ** 15)
inputs = processor(audio, sampling_rate=SAMPLE_RATE, return_tensors="pt")
ids = model.generate(inputs.input_features.to(model.device))
question = processor.decode(ids[0], skip_special_tokens=True).strip()
if not question:
raise ValueError("No speech detected")
logger.info(f"[MODEL] Transcribed text: {question}")
answer = call_gemini(build_prompt(question))
return JSONResponse({"question": question, "answer": answer, "memory_mb": memory_mb()})
finally:
os.remove(tmp_path)
@app.post("/image-question")
async def image_question(file: UploadFile = File(...)):
if file.content_type not in {"image/png", "image/jpeg"}:
raise HTTPException(415, "Unsupported image type")
# Read file and decode
raw = await file.read()
b64 = base64.b64encode(raw).decode()
# Send image data
vision_part = [{
"inline_data": {
"mime_type": file.content_type,
"data": b64
}
}]
# Ask Gemini to return JSON so we can split Q & A
prompt = (
"From the screenshot extract the interview question (English). "
"Then answer concisely (≤200 words). "
"Return pure JSON: {\"question\":\"...\",\"answer\":\"...\"}"
)
# Send prompt and image
text = call_gemini(prompt, vision_part)
try:
parsed = json.loads(text)
question, answer = parsed["question"], parsed["answer"]
except (json.JSONDecodeError, KeyError):
# Fallback: treat whole reply as answer
question, answer = "[Extracted from screenshot]", text
return JSONResponse({"question": question, "answer": answer, "memory_mb": memory_mb()})
@app.post("/text-question")
async def text_question(payload: Dict):
question = (payload.get("question") or "").strip()
if not question:
raise HTTPException(400, "question is required")
answer = call_gemini(build_prompt(question))
return JSONResponse({"question": question, "answer": answer, "memory_mb": memory_mb()})