Docfile commited on
Commit
949f8bc
·
verified ·
1 Parent(s): a0afec0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -86
app.py CHANGED
@@ -1,31 +1,33 @@
1
- # --- START OF CORRECTED_AGAIN app.py ---
2
 
3
  from flask import Flask, render_template, request, jsonify, Response, stream_with_context
4
- # Utilisation de l'import et de l'initialisation de votre code original
5
  from google import genai
 
6
  from google.genai import types
7
- # Import des exceptions potentielles si elles sont dans google.api_core
8
- # from google.api_core import exceptions as api_exceptions
9
  import os
10
  from PIL import Image
11
  import io
12
  import base64
13
  import json
14
- import traceback # Import traceback pour un meilleur log des erreurs
15
 
16
  app = Flask(__name__)
17
 
18
  GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
19
 
20
- # Utilisation de l'initialisation de votre code original
21
  client = genai.Client(
22
  api_key=GOOGLE_API_KEY,
23
  )
24
 
25
  # Ensure API key is available (good practice)
26
  if not GOOGLE_API_KEY:
27
- print("WARNING: GEMINI_API_KEY environment variable not set. API calls will likely fail.")
28
- # Consider adding a check before allowing API calls if the key is missing
 
29
 
30
  # --- Routes for index and potentially the Pro version (kept for context) ---
31
  @app.route('/')
@@ -56,7 +58,7 @@ def solve():
56
 
57
  buffered = io.BytesIO()
58
  img.save(buffered, format="PNG")
59
- img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64 for this route
60
 
61
  def generate():
62
  mode = 'starting'
@@ -80,7 +82,6 @@ def solve():
80
  )
81
 
82
  for chunk in response:
83
- # Process chunks as in your original streaming logic
84
  if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
85
  for part in chunk.candidates[0].content.parts:
86
  if hasattr(part, 'thought') and part.thought:
@@ -107,7 +108,7 @@ def solve():
107
  mode = "answering"
108
  if hasattr(part, 'text') and part.text:
109
  yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
110
- # Handle prompt feedback or finish reasons in streaming
111
  elif chunk.prompt_feedback and chunk.prompt_feedback.block_reason:
112
  error_msg = f"Prompt blocked: {chunk.prompt_feedback.block_reason.name}"
113
  print(error_msg)
@@ -121,7 +122,6 @@ def solve():
121
  yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
122
  break # Stop processing on finish reason
123
 
124
-
125
  except Exception as e:
126
  print(f"Error during streaming generation: {e}")
127
  yield 'data: ' + json.dumps({"error": str(e)}) + '\n\n'
@@ -136,14 +136,11 @@ def solve():
136
  )
137
 
138
  except Exception as e:
139
- # Log the full error for debugging
140
- print(f"Error in /solve endpoint (setup or initial request): {e}")
141
- print(traceback.format_exc())
142
- # Return JSON error for fetch API if streaming setup fails
143
  return jsonify({'error': f'Failed to process request: {str(e)}'}), 500
144
 
145
 
146
- # --- MODIFIED /solved route (Free version, non-streaming) - Corrected Exception Handling ---
147
  @app.route('/solved', methods=['POST'])
148
  def solved():
149
  try:
@@ -159,21 +156,19 @@ def solved():
159
  except Exception as img_err:
160
  return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
161
 
162
- buffered = io.BytesIO() # Keep BytesIO
163
  img.save(buffered, format="PNG")
164
- img_str = base64.b64encode(buffered.getvalue()).decode() # Keep base64
165
 
166
- # Use the non-streaming generate_content method
167
  model_name = "gemini-2.5-flash-preview-04-17" # Your original free model name
168
 
169
  contents = [
170
- {'inline_data': {'mime_type': 'image/png', 'data': img_str}}, # Use inline_data with base64
171
  """Résous cet exercice en français en utilisant le format LaTeX pour les mathématiques si nécessaire.
172
  Si tu dois effectuer des calculs complexes, utilise l'outil d'exécution de code Python fourni.
173
  Présente ta solution de manière claire et bien structurée. Formate le code Python dans des blocs délimités par ```python ... ``` et les résultats d'exécution dans des blocs ``` ... ```."""
174
  ]
175
 
176
- # Call the non-streaming generation method using the original client object
177
  response = client.models.generate_content(
178
  model=model_name,
179
  contents=contents,
@@ -182,10 +177,8 @@ def solved():
182
  code_execution=types.ToolCodeExecution()
183
  )]
184
  )
185
- # Note: No stream=True here for non-streaming
186
  )
187
 
188
- # Aggregate the response parts into a single string
189
  full_solution = ""
190
  # Check if the response has candidates and parts
191
  if response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
@@ -194,81 +187,66 @@ def solved():
194
  full_solution += part.text
195
  elif hasattr(part, 'executable_code') and part.executable_code:
196
  full_solution += f"\n\n```python\n{part.executable_code.code}\n```\n\n"
197
- # Check for the result attribute name - reverting to your original structure if possible
198
- # Based on your original code, code_execution_result seemed to be the attribute
199
  elif hasattr(part, 'code_execution_result') and hasattr(part.code_execution_result, 'output'):
200
  output_str = part.code_execution_result.output
201
  full_solution += f"\n\n**Résultat d'exécution:**\n```\n{output_str}\n```\n\n"
202
- # Note: 'thought' parts are ignored
 
 
 
 
 
 
 
203
 
204
- # Handle cases where the response is empty or blocked
 
205
  if not full_solution.strip():
206
- # Check for prompt feedback blocking or finish reasons
207
- if response.prompt_feedback and response.prompt_feedback.block_reason:
208
- block_reason = response.prompt_feedback.block_reason.name
209
- full_solution = f"Le contenu a été bloqué pour des raisons de sécurité: {block_reason}."
210
- print(f"Generation blocked by prompt feedback: {block_reason}") # Log it
211
-
212
- elif response.candidates and response.candidates[0].finish_reason:
213
- finish_reason = response.candidates[0].finish_reason.name
214
- # Provide specific messages for known non-STOP finish reasons
215
- if finish_reason == 'SAFETY':
216
- full_solution = "Désolé, je ne peux pas fournir de réponse en raison de restrictions de sécurité."
217
- elif finish_reason == 'RECITATION':
218
- full_solution = "Désolé, la réponse ne peut être fournie en raison de la politique sur les récitations."
219
- else:
220
- # Generic message for other finish reasons (e.g., MAX_TOKENS)
221
- full_solution = f"La génération s'est terminée prématurément ({finish_reason}). Le problème est peut-être trop complexe ou nécessite plus de tokens."
222
- print(f"Generation finished early: {finish_reason}") # Log it
223
- else:
224
- # Fallback if no specific reason is found but the response is empty
225
- full_solution = "Désolé, je n'ai pas pu générer de solution pour cette image."
226
- print("Generation resulted in empty content without specific block/finish reason.")
227
 
228
 
229
  # Return the complete solution as JSON
230
  return jsonify({'solution': full_solution.strip()})
231
 
232
- # --- Corrected Exception Handling ---
233
- # Catching a more general Google API error if available, otherwise just Exception
234
- # The specific exception name might depend on the exact SDK version.
235
- # We'll try a common one first. If this still gives AttributeError,
236
- # we'll rely on the generic Exception catch below.
237
- try:
238
- # Attempt to import the specific exception type dynamically
239
- # This is safer than assuming its location
240
- from google.api_core.exceptions import GoogleAPIError
241
- # If the import succeeds, catch that specific error
242
- except GoogleAPIError as api_error:
243
- print(f"Google API Error caught: {api_error}")
244
- # Provide error details to the client, avoiding revealing full traceback
245
- error_message = "Une erreur est survenue lors de la communication avec l'API GenAI."
246
- # Attempt to extract a more specific message if possible from the error object
247
- if hasattr(api_error, 'message'):
248
- error_message = f"Erreur API: {api_error.message}"
249
- elif hasattr(api_error, 'details'):
250
- error_message = f"Erreur API: {api_error.details}"
251
- else:
252
- error_message = f"Erreur API: {str(api_error)}" # Fallback to string representation
253
-
254
- # Check for common error phrases to provide user-friendly messages
255
- if "blocked" in str(api_error).lower() or "safety" in str(api_error).lower():
256
- error_message = 'Le contenu a été bloqué par l\'API pour des raisons de sécurité.'
257
- return jsonify({'error': error_message}), 400 # Use 400 for client-side issue (the prompt)
258
-
259
- return jsonify({'error': error_message}), 500 # Use 500 for server-side API issues
260
- except ImportError:
261
- # If GoogleAPIError is not found in api_core, we'll fall through to the generic Exception catch
262
- print("Could not import google.api_core.exceptions.GoogleAPIError. Using generic exception handling.")
263
- pass # Continue to the next except block
264
 
265
  except Exception as e:
266
- # Catch any other unexpected errors during processing or API call
267
- print(f"An unexpected error occurred in /solved endpoint: {e}")
268
- # Log the full traceback for server-side debugging
269
  print(traceback.format_exc())
270
  # Provide a generic error message to the user
271
- return jsonify({'error': f'Une erreur interne est survenue: {str(e)}'}), 500
272
 
273
 
274
  if __name__ == '__main__':
@@ -276,4 +254,4 @@ if __name__ == '__main__':
276
  # Remove debug=True in production
277
  app.run(debug=True, host='0.0.0.0', port=5000) # Example port
278
 
279
- # --- END OF CORRECTED_AGAIN app.py ---
 
1
+ # --- START OF CORRECTED app.py (v3 - Fixes AttributeError) ---
2
 
3
  from flask import Flask, render_template, request, jsonify, Response, stream_with_context
4
+ # Revert to the original google.genai import and usage
5
  from google import genai
6
+ # Make sure types is imported from google.genai if needed for specific model config
7
  from google.genai import types
8
+ # Correct import for GoogleAPIError with the original genai client
9
+ from google.api_core.exceptions import GoogleAPIError # <-- IMPORTATION CORRIGÉE
10
  import os
11
  from PIL import Image
12
  import io
13
  import base64
14
  import json
15
+ import re
16
 
17
  app = Flask(__name__)
18
 
19
  GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
20
 
21
+ # Use the original client initialization
22
  client = genai.Client(
23
  api_key=GOOGLE_API_KEY,
24
  )
25
 
26
  # Ensure API key is available (good practice)
27
  if not GOOGLE_API_KEY:
28
+ print("WARNING: GEMINI_API_KEY environment variable not set.")
29
+ # Handle this case appropriately, e.g., exit or show an error on the page
30
+ # In a real application, you might want to raise an error or redirect
31
 
32
  # --- Routes for index and potentially the Pro version (kept for context) ---
33
  @app.route('/')
 
58
 
59
  buffered = io.BytesIO()
60
  img.save(buffered, format="PNG")
61
+ img_str = base64.b64encode(buffered.getvalue()).decode()
62
 
63
  def generate():
64
  mode = 'starting'
 
82
  )
83
 
84
  for chunk in response:
 
85
  if chunk.candidates and chunk.candidates[0].content and chunk.candidates[0].content.parts:
86
  for part in chunk.candidates[0].content.parts:
87
  if hasattr(part, 'thought') and part.thought:
 
108
  mode = "answering"
109
  if hasattr(part, 'text') and part.text:
110
  yield 'data: ' + json.dumps({"content": part.text}) + '\n\n'
111
+ # Handle cases where a chunk might not have candidates/parts, or handle errors
112
  elif chunk.prompt_feedback and chunk.prompt_feedback.block_reason:
113
  error_msg = f"Prompt blocked: {chunk.prompt_feedback.block_reason.name}"
114
  print(error_msg)
 
122
  yield 'data: ' + json.dumps({"error": error_msg}) + '\n\n'
123
  break # Stop processing on finish reason
124
 
 
125
  except Exception as e:
126
  print(f"Error during streaming generation: {e}")
127
  yield 'data: ' + json.dumps({"error": str(e)}) + '\n\n'
 
136
  )
137
 
138
  except Exception as e:
139
+ print(f"Error in /solve endpoint: {e}")
 
 
 
140
  return jsonify({'error': f'Failed to process request: {str(e)}'}), 500
141
 
142
 
143
+ # --- MODIFIED /solved route (Free version, non-streaming) using original SDK syntax ---
144
  @app.route('/solved', methods=['POST'])
145
  def solved():
146
  try:
 
156
  except Exception as img_err:
157
  return jsonify({'error': f'Invalid image file: {str(img_err)}'}), 400
158
 
159
+ buffered = io.BytesIO() # Corrected spelling BytesBytesIO -> BytesIO
160
  img.save(buffered, format="PNG")
161
+ img_str = base64.b64encode(buffered.getvalue()).decode()
162
 
 
163
  model_name = "gemini-2.5-flash-preview-04-17" # Your original free model name
164
 
165
  contents = [
166
+ {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
167
  """Résous cet exercice en français en utilisant le format LaTeX pour les mathématiques si nécessaire.
168
  Si tu dois effectuer des calculs complexes, utilise l'outil d'exécution de code Python fourni.
169
  Présente ta solution de manière claire et bien structurée. Formate le code Python dans des blocs délimités par ```python ... ``` et les résultats d'exécution dans des blocs ``` ... ```."""
170
  ]
171
 
 
172
  response = client.models.generate_content(
173
  model=model_name,
174
  contents=contents,
 
177
  code_execution=types.ToolCodeExecution()
178
  )]
179
  )
 
180
  )
181
 
 
182
  full_solution = ""
183
  # Check if the response has candidates and parts
184
  if response.candidates and response.candidates[0].content and response.candidates[0].content.parts:
 
187
  full_solution += part.text
188
  elif hasattr(part, 'executable_code') and part.executable_code:
189
  full_solution += f"\n\n```python\n{part.executable_code.code}\n```\n\n"
190
+ # Check for the result attribute name based on your SDK version's structure
 
191
  elif hasattr(part, 'code_execution_result') and hasattr(part.code_execution_result, 'output'):
192
  output_str = part.code_execution_result.output
193
  full_solution += f"\n\n**Résultat d'exécution:**\n```\n{output_str}\n```\n\n"
194
+ # Check for prompt_feedback on the response object for non-streaming
195
+ if response.prompt_feedback and response.prompt_feedback.block_reason:
196
+ block_reason = response.prompt_feedback.block_reason.name
197
+ # Add block reason to the solution or handle as error
198
+ if not full_solution.strip(): # If no other content generated
199
+ full_solution = f"Le contenu a été bloqué pour des raisons de sécurité: {block_reason}."
200
+ else: # If some content was generated before blocking
201
+ full_solution += f"\n\n**Attention:** La réponse a pu être incomplète car le contenu a été bloqué: {block_reason}."
202
 
203
+
204
+ # Ensure we have some content, otherwise return a message or specific error
205
  if not full_solution.strip():
206
+ # Check for finish reasons on candidates
207
+ finish_reason = response.candidates[0].finish_reason.name if response.candidates and response.candidates[0].finish_reason else "UNKNOWN"
208
+ # safety_ratings = response.candidates[0].safety_ratings if response.candidates else [] # You could log or use these
209
+ print(f"Generation finished with reason (no content): {finish_reason}")
210
+ if finish_reason == 'SAFETY':
211
+ full_solution = "Désolé, je ne peux pas fournir de réponse en raison de restrictions de sécurité."
212
+ elif finish_reason == 'RECITATION':
213
+ full_solution = "Désolé, la réponse ne peut être fournie en raison de la politique sur les récitations."
214
+ elif finish_reason == 'OTHER' or finish_reason == 'UNKNOWN': # Catch general failures
215
+ full_solution = "Désolé, je n'ai pas pu générer de solution complète pour cette image."
216
+ # If finish_reason is 'STOP' but no content, the generic message below applies
217
+
218
+ if not full_solution.strip(): # Fallback if reason didn't give a specific message
219
+ full_solution = "Désolé, je n'ai pas pu générer de solution complète pour cette image."
 
 
 
 
 
 
 
220
 
221
 
222
  # Return the complete solution as JSON
223
  return jsonify({'solution': full_solution.strip()})
224
 
225
+ # Catch specific API errors from google.api_core.exceptions
226
+ except GoogleAPIError as api_error: # <-- UTILISATION CORRIGÉE
227
+ print(f"GenAI API Error: {api_error}")
228
+ # Provide more user-friendly error messages based on potential API errors
229
+ error_message = str(api_error)
230
+ if "RESOURCE_EXHAUSTED" in error_message:
231
+ user_error = "Vous avez atteint votre quota d'utilisation de l'API. Veuillez réessayer plus tard ou vérifier votre console Google Cloud."
232
+ elif "400 Bad Request" in error_message or "INVALID_ARGUMENT" in error_message:
233
+ user_error = f"La requête à l'API est invalide : {error_message}. L'image n'a peut-être pas été comprise."
234
+ elif "403 Forbidden" in error_message or "PERMISSION_DENIED" in error_message:
235
+ user_error = "Erreur d'authentification ou de permissions avec l'API. Vérifiez votre clé API."
236
+ elif "50" in error_message: # Catch 5xx errors
237
+ user_error = f"Erreur serveur de l'API : {error_message}. Veuillez réessayer plus tard."
238
+ else:
239
+ user_error = f'Erreur de l\'API GenAI: {error_message}'
240
+
241
+ return jsonify({'error': user_error}), api_error.code if hasattr(api_error, 'code') else 500 # Return appropriate status code if available
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
 
243
  except Exception as e:
244
+ # Log the full error for debugging
245
+ import traceback
246
+ print(f"Error in /solved endpoint: {e}")
247
  print(traceback.format_exc())
248
  # Provide a generic error message to the user
249
+ return jsonify({'error': f'Une erreur interne est survenue lors du traitement: {str(e)}'}), 500
250
 
251
 
252
  if __name__ == '__main__':
 
254
  # Remove debug=True in production
255
  app.run(debug=True, host='0.0.0.0', port=5000) # Example port
256
 
257
+ # --- END OF CORRECTED app.py (v3 - Fixes AttributeError) ---