Spaces:
Running
Running
File size: 4,875 Bytes
51cbadd f388c93 6b10944 451d8eb 12d4886 f388c93 451d8eb 12d4886 e7761b5 451d8eb e7761b5 f388c93 451d8eb f388c93 2ef19ee 451d8eb 2ef19ee 451d8eb f388c93 451d8eb f388c93 949f8bc 451d8eb f388c93 451d8eb 6b10944 451d8eb 6b10944 f388c93 451d8eb e7761b5 2ef19ee 93f4a81 451d8eb 2ef19ee 451d8eb 2ef19ee 451d8eb 2ef19ee f388c93 451d8eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
from flask import Flask, render_template, request, jsonify, Response, stream_with_context
from google import genai
from google.genai import types
import os
from PIL import Image
import io
import base64
import json
import logging
# Configuration du logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
app = Flask(__name__)
# Récupération de la clé API depuis les variables d'environnement
GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
if not GOOGLE_API_KEY:
logger.error("La clé API Gemini n'est pas configurée dans les variables d'environnement")
# Initialisation du client Gemini
try:
client = genai.Client(api_key=GOOGLE_API_KEY)
except Exception as e:
logger.error(f"Erreur lors de l'initialisation du client Gemini: {e}")
@app.route('/')
def index():
return render_template('index.html')
@app.route('/free')
def maintenance():
return render_template('maj.html')
def process_image(image_data):
"""Traite l'image et retourne sa représentation base64"""
try:
img = Image.open(io.BytesIO(image_data))
buffered = io.BytesIO()
img.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
return img_str
except Exception as e:
logger.error(f"Erreur lors du traitement de l'image: {e}")
raise
def stream_gemini_response(model_name, image_str, thinking_budget=None):
"""Génère et diffuse la réponse du modèle Gemini"""
mode = 'starting'
config_kwargs = {}
if thinking_budget:
config_kwargs["thinking_config"] = types.ThinkingConfig(thinking_budget=thinking_budget)
try:
response = client.models.generate_content_stream(
model=model_name,
contents=[
{'inline_data': {'mime_type': 'image/png', 'data': image_str}},
"Résous ça en français with rendering latex"
],
config=types.GenerateContentConfig(**config_kwargs)
)
for chunk in response:
if not hasattr(chunk, 'candidates') or not chunk.candidates:
continue
for part in chunk.candidates[0].content.parts:
if hasattr(part, 'thought') and part.thought:
if mode != "thinking":
yield f'data: {json.dumps({"mode": "thinking"})}\n\n'
mode = "thinking"
else:
if mode != "answering":
yield f'data: {json.dumps({"mode": "answering"})}\n\n'
mode = "answering"
if hasattr(part, 'text') and part.text:
yield f'data: {json.dumps({"content": part.text})}\n\n'
except Exception as e:
logger.error(f"Erreur pendant la génération avec le modèle {model_name}: {e}")
yield f'data: {json.dumps({"error": str(e)})}\n\n'
@app.route('/solve', methods=['POST'])
def solve():
"""Endpoint utilisant le modèle Pro avec capacité de réflexion étendue"""
if 'image' not in request.files:
return jsonify({'error': 'Aucune image fournie'}), 400
try:
image_data = request.files['image'].read()
img_str = process_image(image_data)
return Response(
stream_with_context(stream_gemini_response(
model_name="gemini-2.5-pro-exp-03-25",
image_str=img_str,
thinking_budget=8000
)),
mimetype='text/event-stream',
headers={
'Cache-Control': 'no-cache',
'X-Accel-Buffering': 'no'
}
)
except Exception as e:
logger.error(f"Erreur dans /solve: {e}")
return jsonify({'error': str(e)}), 500
@app.route('/solved', methods=['POST'])
def solved():
"""Endpoint utilisant le modèle Flash (plus rapide)"""
if 'image' not in request.files:
return jsonify({'error': 'Aucune image fournie'}), 400
try:
image_data = request.files['image'].read()
img_str = process_image(image_data)
return Response(
stream_with_context(stream_gemini_response(
model_name="gemini-2.5-flash-preview-04-17",
image_str=img_str
)),
mimetype='text/event-stream',
headers={
'Cache-Control': 'no-cache',
'X-Accel-Buffering': 'no'
}
)
except Exception as e:
logger.error(f"Erreur dans /solved: {e}")
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
# En production, modifiez ces paramètres
app.run(host='0.0.0.0', port=5000, debug=False) |