Docfile commited on
Commit
a0afec0
·
verified ·
1 Parent(s): e7761b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -59
app.py CHANGED
@@ -1,30 +1,31 @@
1
- # --- START OF CORRECTED app.py ---
2
 
3
  from flask import Flask, render_template, request, jsonify, Response, stream_with_context
4
- # Revert to the original google.genai import and usage
5
  from google import genai
6
- # Make sure types is imported from google.genai if needed for specific model config
7
  from google.genai import types
 
 
8
  import os
9
  from PIL import Image
10
  import io
11
  import base64
12
  import json
13
- import re # Import regex if needed for advanced text processing (though less likely without streaming logic parsing)
14
 
15
  app = Flask(__name__)
16
 
17
  GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
18
 
19
- # Use the original client initialization
20
  client = genai.Client(
21
  api_key=GOOGLE_API_KEY,
22
  )
23
 
24
  # Ensure API key is available (good practice)
25
  if not GOOGLE_API_KEY:
26
- print("WARNING: GEMINI_API_KEY environment variable not set.")
27
- # Handle this case appropriately, e.g., exit or show an error on the page
28
 
29
  # --- Routes for index and potentially the Pro version (kept for context) ---
30
  @app.route('/')
@@ -37,8 +38,7 @@ def indexx():
37
  # This route serves the free version HTML
38
  return render_template('maj.html')
39
 
40
- # --- Original /solve route (Pro version, streaming) - Kept for reference ---
41
- # If you want the Pro version (/solve) to also be non-streaming, apply similar changes as below
42
  @app.route('/solve', methods=['POST'])
43
  def solve():
44
  try:
@@ -56,16 +56,14 @@ def solve():
56
 
57
  buffered = io.BytesIO()
58
  img.save(buffered, format="PNG")
59
- img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64 for this route as in original
60
 
61
  def generate():
62
  mode = 'starting'
63
  try:
64
  response = client.models.generate_content_stream(
65
- # Use the model name for the Pro version as in your original code
66
  model="gemini-2.5-pro-exp-03-25", # Your original model name
67
  contents=[
68
- # Pass image as inline_data with base64 as in your original code
69
  {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
70
  """Résous cet exercice en français avec du LaTeX.
71
  Si nécessaire, utilise du code Python pour effectuer les calculs complexes.
@@ -81,11 +79,10 @@ def solve():
81
  )
82
  )
83
 
84
- # Process the streaming response as you had it
85
  for chunk in response:
 
86
  if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
87
  for part in chunk.candidates[0].content.parts:
88
- # Keep your original logic for emitting different modes in the stream
89
  if hasattr(part, 'thought') and part.thought:
90
  if mode != "thinking":
91
  yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
@@ -110,7 +107,7 @@ def solve():
110
  mode = "answering"
111
  if hasattr(part, 'text') and part.text:
112
  yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
113
- # Handle cases where a chunk might not have candidates/parts immediately, or handle errors
114
  elif chunk.prompt_feedback and chunk.prompt_feedback.block_reason:
115
  error_msg = f"Prompt blocked: {chunk.prompt_feedback.block_reason.name}"
116
  print(error_msg)
@@ -139,12 +136,14 @@ def solve():
139
  )
140
 
141
  except Exception as e:
142
- print(f"Error in /solve endpoint: {e}")
 
 
143
  # Return JSON error for fetch API if streaming setup fails
144
  return jsonify({'error': f'Failed to process request: {str(e)}'}), 500
145
 
146
 
147
- # --- MODIFIED /solved route (Free version, non-streaming) using original SDK syntax ---
148
  @app.route('/solved', methods=['POST'])
149
  def solved():
150
  try:
@@ -160,17 +159,15 @@ def solved():
160
  except Exception as img_err:
161
  return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
162
 
163
- buffered = io.BytesBytesIO()
164
  img.save(buffered, format="PNG")
165
- img_str = base64.b64encode(buffered.getvalue()).decode()
166
 
167
  # Use the non-streaming generate_content method
168
- # Use the model name for the Free version as in your original code
169
  model_name = "gemini-2.5-flash-preview-04-17" # Your original free model name
170
 
171
- # Prepare the content using inline_data with base64 string as in your original code
172
  contents = [
173
- {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
174
  """Résous cet exercice en français en utilisant le format LaTeX pour les mathématiques si nécessaire.
175
  Si tu dois effectuer des calculs complexes, utilise l'outil d'exécution de code Python fourni.
176
  Présente ta solution de manière claire et bien structurée. Formate le code Python dans des blocs délimités par ```python ... ``` et les résultats d'exécution dans des blocs ``` ... ```."""
@@ -181,7 +178,6 @@ def solved():
181
  model=model_name,
182
  contents=contents,
183
  config=types.GenerateContentConfig(
184
- # Removed thinking_config as it's not relevant for non-streaming output
185
  tools=[types.Tool(
186
  code_execution=types.ToolCodeExecution()
187
  )]
@@ -194,61 +190,85 @@ def solved():
194
  # Check if the response has candidates and parts
195
  if response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
196
  for part in response.candidates[0].content.parts:
197
- # Process parts based on attribute existence
198
  if hasattr(part, 'text') and part.text:
199
  full_solution += part.text
200
  elif hasattr(part, 'executable_code') and part.executable_code:
201
- # Format code block using Markdown, as the frontend expects this
202
  full_solution += f"\n\n```python\n{part.executable_code.code}\n```\n\n"
203
- # Check for the result attribute name based on your SDK version's structure
204
- # It might be `code_execution_result` as in your original code, or nested
205
  elif hasattr(part, 'code_execution_result') and hasattr(part.code_execution_result, 'output'):
206
- # Format execution result block using Markdown
207
  output_str = part.code_execution_result.output
208
  full_solution += f"\n\n**Résultat d'exécution:**\n```\n{output_str}\n```\n\n"
209
- # Add other potential part types if necessary (e.g., function_call, etc.)
210
- # Note: 'thought' parts are ignored as requested
211
 
212
- # Ensure we have some content, otherwise return a message
213
  if not full_solution.strip():
214
- # Check for finish reasons or safety ratings
215
- finish_reason = response.candidates[0].finish_reason.name if response.candidates and response.candidates[0].finish_reason else "UNKNOWN"
216
- safety_ratings = response.candidates[0].safety_ratings if response.candidates else []
217
- print(f"Generation finished with reason: {finish_reason}, Safety: {safety_ratings}") # Log details
218
- if finish_reason == 'SAFETY':
219
- full_solution = "Désolé, je ne peux pas fournir de réponse en raison de restrictions de sécurité."
220
- elif finish_reason == 'RECITATION':
221
- full_solution = "Désolé, la réponse ne peut être fournie en raison de la politique sur les récitations."
222
- # Also check prompt feedback for blocking reasons
223
- elif response.prompt_feedback and response.prompt_feedback.block_reason:
224
  block_reason = response.prompt_feedback.block_reason.name
225
  full_solution = f"Le contenu a été bloqué pour des raisons de sécurité: {block_reason}."
 
 
 
 
 
 
 
 
 
 
 
 
 
226
  else:
227
- full_solution = "Désolé, je n'ai pas pu générer de solution complète pour cette image."
 
 
228
 
229
 
230
  # Return the complete solution as JSON
231
- # Use strip() to remove leading/trailing whitespace from the full solution
232
  return jsonify({'solution': full_solution.strip()})
233
 
234
- # Catch specific API errors from your original SDK
235
- except genai.core.exceptions.GoogleAPIError as api_error:
236
- print(f"GenAI API Error: {api_error}")
237
- # Check if the error response has details, like safety block
238
- error_detail = str(api_error)
239
- if "safety" in error_detail.lower():
240
- return jsonify({'error': 'Le contenu a été bloqué pour des raisons de sécurité par l\'API.'}), 400
241
- elif "blocked" in error_detail.lower():
242
- return jsonify({'error': 'La requête a été bloquée par l\'API.'}), 400
243
- else:
244
- return jsonify({'error': f'Erreur de l\'API GenAI: {error_detail}'}), 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
  except Exception as e:
246
- # Log the full error for debugging
247
- import traceback
248
- print(f"Error in /solved endpoint: {e}")
249
  print(traceback.format_exc())
250
  # Provide a generic error message to the user
251
- return jsonify({'error': f'Une erreur interne est survenue lors du traitement: {str(e)}'}), 500
252
 
253
 
254
  if __name__ == '__main__':
@@ -256,4 +276,4 @@ if __name__ == '__main__':
256
  # Remove debug=True in production
257
  app.run(debug=True, host='0.0.0.0', port=5000) # Example port
258
 
259
- # --- END OF CORRECTED app.py ---
 
1
+ # --- START OF CORRECTED_AGAIN app.py ---
2
 
3
  from flask import Flask, render_template, request, jsonify, Response, stream_with_context
4
+ # Utilisation de l'import et de l'initialisation de votre code original
5
  from google import genai
 
6
  from google.genai import types
7
+ # Import des exceptions potentielles si elles sont dans google.api_core
8
+ # from google.api_core import exceptions as api_exceptions
9
  import os
10
  from PIL import Image
11
  import io
12
  import base64
13
  import json
14
+ import traceback # Import traceback pour un meilleur log des erreurs
15
 
16
  app = Flask(__name__)
17
 
18
  GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
19
 
20
+ # Utilisation de l'initialisation de votre code original
21
  client = genai.Client(
22
  api_key=GOOGLE_API_KEY,
23
  )
24
 
25
  # Ensure API key is available (good practice)
26
  if not GOOGLE_API_KEY:
27
+ print("WARNING: GEMINI_API_KEY environment variable not set. API calls will likely fail.")
28
+ # Consider adding a check before allowing API calls if the key is missing
29
 
30
  # --- Routes for index and potentially the Pro version (kept for context) ---
31
  @app.route('/')
 
38
  # This route serves the free version HTML
39
  return render_template('maj.html')
40
 
41
+ # --- Original /solve route (Pro version, streaming) - Kept as is ---
 
42
  @app.route('/solve', methods=['POST'])
43
  def solve():
44
  try:
 
56
 
57
  buffered = io.BytesIO()
58
  img.save(buffered, format="PNG")
59
+ img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64 for this route
60
 
61
  def generate():
62
  mode = 'starting'
63
  try:
64
  response = client.models.generate_content_stream(
 
65
  model="gemini-2.5-pro-exp-03-25", # Your original model name
66
  contents=[
 
67
  {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
68
  """Résous cet exercice en français avec du LaTeX.
69
  Si nécessaire, utilise du code Python pour effectuer les calculs complexes.
 
79
  )
80
  )
81
 
 
82
  for chunk in response:
83
+ # Process chunks as in your original streaming logic
84
  if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
85
  for part in chunk.candidates[0].content.parts:
 
86
  if hasattr(part, 'thought') and part.thought:
87
  if mode != "thinking":
88
  yield 'data: ' + json.dumps({"mode": "thinking"}) + '\n\n'
 
107
  mode = "answering"
108
  if hasattr(part, 'text') and part.text:
109
  yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
110
+ # Handle prompt feedback or finish reasons in streaming
111
  elif chunk.prompt_feedback and chunk.prompt_feedback.block_reason:
112
  error_msg = f"Prompt blocked: {chunk.prompt_feedback.block_reason.name}"
113
  print(error_msg)
 
136
  )
137
 
138
  except Exception as e:
139
+ # Log the full error for debugging
140
+ print(f"Error in /solve endpoint (setup or initial request): {e}")
141
+ print(traceback.format_exc())
142
  # Return JSON error for fetch API if streaming setup fails
143
  return jsonify({'error': f'Failed to process request: {str(e)}'}), 500
144
 
145
 
146
+ # --- MODIFIED /solved route (Free version, non-streaming) - Corrected Exception Handling ---
147
  @app.route('/solved', methods=['POST'])
148
  def solved():
149
  try:
 
159
  except Exception as img_err:
160
  return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
161
 
162
+ buffered = io.BytesIO() # Keep BytesIO
163
  img.save(buffered, format="PNG")
164
+ img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64
165
 
166
  # Use the non-streaming generate_content method
 
167
  model_name = "gemini-2.5-flash-preview-04-17" # Your original free model name
168
 
 
169
  contents = [
170
+ {'inline_data': {'mime_type': 'image/png', 'data': img_str}}, # Use inline_data with base64
171
  """Résous cet exercice en français en utilisant le format LaTeX pour les mathématiques si nécessaire.
172
  Si tu dois effectuer des calculs complexes, utilise l'outil d'exécution de code Python fourni.
173
  Présente ta solution de manière claire et bien structurée. Formate le code Python dans des blocs délimités par ```python ... ``` et les résultats d'exécution dans des blocs ``` ... ```."""
 
178
  model=model_name,
179
  contents=contents,
180
  config=types.GenerateContentConfig(
 
181
  tools=[types.Tool(
182
  code_execution=types.ToolCodeExecution()
183
  )]
 
190
  # Check if the response has candidates and parts
191
  if response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
192
  for part in response.candidates[0].content.parts:
 
193
  if hasattr(part, 'text') and part.text:
194
  full_solution += part.text
195
  elif hasattr(part, 'executable_code') and part.executable_code:
 
196
  full_solution += f"\n\n```python\n{part.executable_code.code}\n```\n\n"
197
+ # Check for the result attribute name - reverting to your original structure if possible
198
+ # Based on your original code, code_execution_result seemed to be the attribute
199
  elif hasattr(part, 'code_execution_result') and hasattr(part.code_execution_result, 'output'):
 
200
  output_str = part.code_execution_result.output
201
  full_solution += f"\n\n**Résultat d'exécution:**\n```\n{output_str}\n```\n\n"
202
+ # Note: 'thought' parts are ignored
 
203
 
204
+ # Handle cases where the response is empty or blocked
205
  if not full_solution.strip():
206
+ # Check for prompt feedback blocking or finish reasons
207
+ if response.prompt_feedback and response.prompt_feedback.block_reason:
 
 
 
 
 
 
 
 
208
  block_reason = response.prompt_feedback.block_reason.name
209
  full_solution = f"Le contenu a été bloqué pour des raisons de sécurité: {block_reason}."
210
+ print(f"Generation blocked by prompt feedback: {block_reason}") # Log it
211
+
212
+ elif response.candidates and response.candidates[0].finish_reason:
213
+ finish_reason = response.candidates[0].finish_reason.name
214
+ # Provide specific messages for known non-STOP finish reasons
215
+ if finish_reason == 'SAFETY':
216
+ full_solution = "Désolé, je ne peux pas fournir de réponse en raison de restrictions de sécurité."
217
+ elif finish_reason == 'RECITATION':
218
+ full_solution = "Désolé, la réponse ne peut être fournie en raison de la politique sur les récitations."
219
+ else:
220
+ # Generic message for other finish reasons (e.g., MAX_TOKENS)
221
+ full_solution = f"La génération s'est terminée prématurément ({finish_reason}). Le problème est peut-être trop complexe ou nécessite plus de tokens."
222
+ print(f"Generation finished early: {finish_reason}") # Log it
223
  else:
224
+ # Fallback if no specific reason is found but the response is empty
225
+ full_solution = "Désolé, je n'ai pas pu générer de solution pour cette image."
226
+ print("Generation resulted in empty content without specific block/finish reason.")
227
 
228
 
229
  # Return the complete solution as JSON
 
230
  return jsonify({'solution': full_solution.strip()})
231
 
232
+ # --- Corrected Exception Handling ---
233
+ # Catching a more general Google API error if available, otherwise just Exception
234
+ # The specific exception name might depend on the exact SDK version.
235
+ # We'll try a common one first. If this still gives AttributeError,
236
+ # we'll rely on the generic Exception catch below.
237
+ try:
238
+ # Attempt to import the specific exception type dynamically
239
+ # This is safer than assuming its location
240
+ from google.api_core.exceptions import GoogleAPIError
241
+ # If the import succeeds, catch that specific error
242
+ except GoogleAPIError as api_error:
243
+ print(f"Google API Error caught: {api_error}")
244
+ # Provide error details to the client, avoiding revealing full traceback
245
+ error_message = "Une erreur est survenue lors de la communication avec l'API GenAI."
246
+ # Attempt to extract a more specific message if possible from the error object
247
+ if hasattr(api_error, 'message'):
248
+ error_message = f"Erreur API: {api_error.message}"
249
+ elif hasattr(api_error, 'details'):
250
+ error_message = f"Erreur API: {api_error.details}"
251
+ else:
252
+ error_message = f"Erreur API: {str(api_error)}" # Fallback to string representation
253
+
254
+ # Check for common error phrases to provide user-friendly messages
255
+ if "blocked" in str(api_error).lower() or "safety" in str(api_error).lower():
256
+ error_message = 'Le contenu a été bloqué par l\'API pour des raisons de sécurité.'
257
+ return jsonify({'error': error_message}), 400 # Use 400 for client-side issue (the prompt)
258
+
259
+ return jsonify({'error': error_message}), 500 # Use 500 for server-side API issues
260
+ except ImportError:
261
+ # If GoogleAPIError is not found in api_core, we'll fall through to the generic Exception catch
262
+ print("Could not import google.api_core.exceptions.GoogleAPIError. Using generic exception handling.")
263
+ pass # Continue to the next except block
264
+
265
  except Exception as e:
266
+ # Catch any other unexpected errors during processing or API call
267
+ print(f"An unexpected error occurred in /solved endpoint: {e}")
268
+ # Log the full traceback for server-side debugging
269
  print(traceback.format_exc())
270
  # Provide a generic error message to the user
271
+ return jsonify({'error': f'Une erreur interne est survenue: {str(e)}'}), 500
272
 
273
 
274
  if __name__ == '__main__':
 
276
  # Remove debug=True in production
277
  app.run(debug=True, host='0.0.0.0', port=5000) # Example port
278
 
279
+ # --- END OF CORRECTED_AGAIN app.py ---