OmarHusseinZaki commited on
Commit
6716568
·
1 Parent(s): 81f4e18

add cors, inference, models and fastapi

Browse files
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. main.py +65 -0
Dockerfile CHANGED
@@ -1,7 +1,7 @@
1
  # Dockerfile
2
 
3
  # 1. Choose a base Python image
4
- FROM python:3.9-slim # Using Python 3.9 slim variant for smaller size
5
 
6
  # 2. Set the working directory inside the container
7
  WORKDIR /code
 
1
  # Dockerfile
2
 
3
  # 1. Choose a base Python image
4
+ FROM python:3.9-slim
5
 
6
  # 2. Set the working directory inside the container
7
  WORKDIR /code
main.py CHANGED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # main.py
2
+
3
+ import os
4
+ import io # For handling bytes data in memory
5
+ import yt_dlp # YouTube audio downloader
6
+ import requests # For making HTTP requests (to audio URLs)
7
+ from fastapi import FastAPI, HTTPException, Request # The web framework
8
+ from fastapi.middleware.cors import CORSMiddleware # For allowing frontend access
9
+ from pydantic import BaseModel # For data validation
10
+ from huggingface_hub import InferenceClient # HF API client
11
+ from dotenv import load_dotenv # To load .env file locally
12
+
13
+ # --- Initial Setup ---
14
+
15
+ # Load environment variables from .env file (for local development)
16
+ # In HF Spaces, secrets are set in the Space settings, not via .env
17
+ load_dotenv()
18
+
19
+ HF_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
20
+
21
+ # Check if the API key is loaded (crucial!)
22
+ if not HF_API_KEY:
23
+ print("ERROR: HUGGINGFACE_API_KEY environment variable not found.")
24
+ # I might want to exit or raise an error here in a real deployment
25
+ # For now, we'll let it proceed but API calls will fail later.
26
+
27
+ # Define the models we'll use from Hugging Face
28
+ # I can change these! Smaller Whisper models (base, small, medium) are faster.
29
+ # Different LLMs have different strengths.
30
+ ASR_MODEL = "openai/whisper-large-v3"
31
+ LLM_MODEL = "mistralai/Mistral-7B-Instruct-v0.2"
32
+
33
+ # Initialize the Hugging Face Inference Client
34
+ # Handles authentication using the API key automatically
35
+ try:
36
+ hf_inference = InferenceClient(token=HF_API_KEY)
37
+ print("Hugging Face Inference Client initialized.")
38
+ except Exception as e:
39
+ print(f"ERROR: Failed to initialize Hugging Face Inference Client: {e}")
40
+ hf_inference = None # Ensure it's None if initialization fails
41
+
42
+ # Initialize the FastAPI application
43
+ app = FastAPI(
44
+ title="Video Note Taker API",
45
+ description="Transcribes videos and generates notes using Hugging Face models.",
46
+ version="0.1.0",
47
+ )
48
+
49
+ # --- CORS Configuration ---
50
+ # Configure Cross-Origin Resource Sharing (CORS)
51
+ # This is VITAL to allow Vercel frontend (running on a different domain)
52
+ # to make requests to this backend API.
53
+ origins = [
54
+ "http://localhost:3000", # Allow my local frontend dev server
55
+ # !!! IMPORTANT: Add my DEPLOYED Vercel frontend URL here later !!!
56
+ # Example: "https://videos-notes-app.vercel.app",
57
+ ]
58
+
59
+ app.add_middleware(
60
+ CORSMiddleware,
61
+ allow_origins=origins, # List of allowed origins
62
+ allow_credentials=True, # Allow cookies (not strictly needed now, but good practice)
63
+ allow_methods=["*"], # Allow all HTTP methods (GET, POST, etc.)
64
+ allow_headers=["*"], # Allow all headers
65
+ )