Mariam-cards / app.py
Docfile's picture
Update app.py
451d8eb verified
raw
history blame
4.88 kB
from flask import Flask, render_template, request, jsonify, Response, stream_with_context
from google import genai
from google.genai import types
import os
from PIL import Image
import io
import base64
import json
import logging
# Configuration du logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
app = Flask(__name__)
# Récupération de la clé API depuis les variables d'environnement
GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
if not GOOGLE_API_KEY:
logger.error("La clé API Gemini n'est pas configurée dans les variables d'environnement")
# Initialisation du client Gemini
try:
client = genai.Client(api_key=GOOGLE_API_KEY)
except Exception as e:
logger.error(f"Erreur lors de l'initialisation du client Gemini: {e}")
@app.route('/')
def index():
return render_template('index.html')
@app.route('/free')
def maintenance():
return render_template('maj.html')
def process_image(image_data):
"""Traite l'image et retourne sa représentation base64"""
try:
img = Image.open(io.BytesIO(image_data))
buffered = io.BytesIO()
img.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
return img_str
except Exception as e:
logger.error(f"Erreur lors du traitement de l'image: {e}")
raise
def stream_gemini_response(model_name, image_str, thinking_budget=None):
"""Génère et diffuse la réponse du modèle Gemini"""
mode = 'starting'
config_kwargs = {}
if thinking_budget:
config_kwargs["thinking_config"] = types.ThinkingConfig(thinking_budget=thinking_budget)
try:
response = client.models.generate_content_stream(
model=model_name,
contents=[
{'inline_data': {'mime_type': 'image/png', 'data': image_str}},
"Résous ça en français with rendering latex"
],
config=types.GenerateContentConfig(**config_kwargs)
)
for chunk in response:
if not hasattr(chunk, 'candidates') or not chunk.candidates:
continue
for part in chunk.candidates[0].content.parts:
if hasattr(part, 'thought') and part.thought:
if mode != "thinking":
yield f'data: {json.dumps({"mode": "thinking"})}\n\n'
mode = "thinking"
else:
if mode != "answering":
yield f'data: {json.dumps({"mode": "answering"})}\n\n'
mode = "answering"
if hasattr(part, 'text') and part.text:
yield f'data: {json.dumps({"content": part.text})}\n\n'
except Exception as e:
logger.error(f"Erreur pendant la génération avec le modèle {model_name}: {e}")
yield f'data: {json.dumps({"error": str(e)})}\n\n'
@app.route('/solve', methods=['POST'])
def solve():
"""Endpoint utilisant le modèle Pro avec capacité de réflexion étendue"""
if 'image' not in request.files:
return jsonify({'error': 'Aucune image fournie'}), 400
try:
image_data = request.files['image'].read()
img_str = process_image(image_data)
return Response(
stream_with_context(stream_gemini_response(
model_name="gemini-2.5-pro-exp-03-25",
image_str=img_str,
thinking_budget=8000
)),
mimetype='text/event-stream',
headers={
'Cache-Control': 'no-cache',
'X-Accel-Buffering': 'no'
}
)
except Exception as e:
logger.error(f"Erreur dans /solve: {e}")
return jsonify({'error': str(e)}), 500
@app.route('/solved', methods=['POST'])
def solved():
"""Endpoint utilisant le modèle Flash (plus rapide)"""
if 'image' not in request.files:
return jsonify({'error': 'Aucune image fournie'}), 400
try:
image_data = request.files['image'].read()
img_str = process_image(image_data)
return Response(
stream_with_context(stream_gemini_response(
model_name="gemini-2.5-flash-preview-04-17",
image_str=img_str
)),
mimetype='text/event-stream',
headers={
'Cache-Control': 'no-cache',
'X-Accel-Buffering': 'no'
}
)
except Exception as e:
logger.error(f"Erreur dans /solved: {e}")
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
# En production, modifiez ces paramètres
app.run(host='0.0.0.0', port=5000, debug=False)