Spaces:
Running
Running
from fastapi import FastAPI, Request, Form, UploadFile, File | |
from fastapi.templating import Jinja2Templates | |
from fastapi.responses import HTMLResponse, RedirectResponse | |
from fastapi.staticfiles import StaticFiles | |
from dotenv import load_dotenv | |
import os, io | |
from PIL import Image | |
import markdown | |
import google.generativeai as genai | |
# Load environment variable | |
load_dotenv() | |
API_KEY = os.getenv("GOOGLE_API_KEY") or "AIzaSyDXqoZkx92J1jt_XAYxCGEHmYQtg2XwfLU" | |
genai.configure(api_key=API_KEY) | |
app = FastAPI() | |
templates = Jinja2Templates(directory="templates") | |
app.mount("/static", StaticFiles(directory="static"), name="static") | |
model = genai.GenerativeModel('gemini-2.0-flash') | |
# Create a global chat session | |
chat = None | |
chat_history = [] | |
async def root(request: Request): | |
return templates.TemplateResponse("index.html", { | |
"request": request, | |
"chat_history": chat_history, | |
}) | |
async def handle_input( | |
request: Request, | |
user_input: str = Form(...), | |
image: UploadFile = File(None) | |
): | |
global chat, chat_history | |
# Initialize chat session if needed | |
if chat is None: | |
chat = model.start_chat(history=[]) | |
parts = [] | |
if user_input: | |
parts.append(user_input) | |
# For display in the UI | |
user_message = user_input | |
if image and image.content_type.startswith("image/"): | |
data = await image.read() | |
try: | |
img = Image.open(io.BytesIO(data)) | |
parts.append(img) | |
user_message += " [Image uploaded]" # Indicate image in chat history | |
except Exception as e: | |
chat_history.append({ | |
"role": "model", | |
"content": markdown.markdown(f"**Error loading image:** {e}") | |
}) | |
return RedirectResponse("/", status_code=303) | |
# Store user message for display | |
chat_history.append({"role": "user", "content": user_message}) | |
try: | |
# Send message to Gemini model | |
resp = chat.send_message(parts) | |
# Add model response to history | |
raw = resp.text | |
chat_history.append({"role": "model", "content": raw}) | |
except Exception as e: | |
err = f"**Error:** {e}" | |
chat_history.append({ | |
"role": "model", | |
"content": markdown.markdown(err) | |
}) | |
# Post-Redirect-Get | |
return RedirectResponse("/", status_code=303) | |
# Clear chat history and start fresh | |
async def new_chat(): | |
global chat, chat_history | |
chat = None | |
chat_history.clear() | |
return RedirectResponse("/", status_code=303) |