cuneytkaya commited on
Commit
cc93d7c
Β·
verified Β·
1 Parent(s): 6494950

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +277 -241
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import json
3
  import gradio as gr
4
- import gradio.themes as gr_themes # Import themes for UI
5
  import google.generativeai as genai
6
  from PIL import Image
7
  import numpy as np
@@ -10,13 +10,13 @@ from dotenv import load_dotenv
10
  import traceback
11
  import pytesseract
12
  import cv2
13
- import time # Keep time import for potential use later, though not in original process_image
14
 
15
- # --- Load Environment Variables (As per Original Script) ---
16
  load_dotenv()
17
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") or HfFolder.get_token("GEMINI_API_KEY")
18
  if not GEMINI_API_KEY:
19
- # Try to get it from Gradio secrets if running on Spaces (Added for robustness)
20
  try:
21
  import secrets
22
  GEMINI_API_KEY = secrets.GEMINI_API_KEY
@@ -28,25 +28,24 @@ if not GEMINI_API_KEY:
28
 
29
  genai.configure(api_key=GEMINI_API_KEY)
30
 
31
- # --- Define Model Names (As per Original Script) ---
32
  CLASSIFICATION_MODEL = "gemini-1.5-flash"
33
  SOLUTION_MODEL = "gemini-1.5-pro-latest"
34
  EXPLANATION_MODEL = "gemini-1.5-pro-latest"
35
  SIMILAR_MODEL = "gemini-1.5-pro-latest"
 
36
 
37
- print(f"Using models: Classification: {CLASSIFICATION_MODEL}, Solution: {SOLUTION_MODEL}, Explanation: {EXPLANATION_MODEL}, Similar: {SIMILAR_MODEL}")
38
 
39
- # --- Set up Gemini for image analysis (As per Original Script) ---
40
- MODEL_IMAGE = "gemini-1.5-pro-latest" # Use Gemini for OCR as well
41
-
42
- # --- Set Tesseract Path (As per Original Script, with robustness check) ---
43
  # Make sure this path is correct for your deployment environment
44
  try:
45
  # Check common paths
46
- if os.path.exists('/opt/homebrew/bin/tesseract'): # Original path check
47
- pytesseract.pytesseract.tesseract_cmd = '/opt/homebrew/bin/tesseract'
48
- elif os.path.exists('/usr/bin/tesseract'): # Added common Linux path
49
  pytesseract.pytesseract.tesseract_cmd = '/usr/bin/tesseract'
 
 
 
50
  else:
51
  # Attempt to find Tesseract in PATH (might work in some environments)
52
  from shutil import which
@@ -55,315 +54,359 @@ try:
55
  pytesseract.pytesseract.tesseract_cmd = tesseract_path
56
  else:
57
  print("Warning: Tesseract command not found at specified paths or in PATH. Fallback OCR might fail.")
 
58
  except Exception as e:
59
  print(f"Warning: Error setting Tesseract path: {e}. Fallback OCR might fail.")
60
 
61
 
62
- # --- Backend Functions (Copied *EXACTLY* from Original User Script) ---
63
 
64
- # Extract text using Gemini directly (with Tesseract as fallback)
65
  def extract_text_with_gemini(image):
66
- """Extract text from image using Gemini Pro Vision directly"""
 
67
  try:
68
  if isinstance(image, np.ndarray):
69
  image = Image.fromarray(image)
70
 
 
71
  model = genai.GenerativeModel(MODEL_IMAGE)
72
- prompt = """
73
- Extract ALL text, numbers, and mathematical equations from this image precisely.
74
- Include ALL symbols, numbers, letters, and mathematical notation exactly as they appear.
75
- Format any equations properly and maintain their layout.
76
- Don't explain the content, just extract the text verbatim.
77
- """
78
 
79
- response = model.generate_content([prompt, image])
80
  extracted_text = response.text.strip()
 
81
 
82
- # If Gemini returns a very short result, try Tesseract as fallback
83
- if len(extracted_text) < 10:
84
- print("Gemini returned limited text, trying Tesseract as fallback")
85
- if isinstance(image, Image.Image):
86
- image_array = np.array(image)
87
- else:
88
- image_array = image
89
 
90
- if len(image_array.shape) == 3:
91
- gray = cv2.cvtColor(image_array, cv2.COLOR_RGB2GRAY)
92
- else:
93
- gray = image_array
94
-
95
- custom_config = r'--oem 1 --psm 6'
96
- tesseract_text = pytesseract.image_to_string(gray, config=custom_config)
97
-
98
- if len(tesseract_text) > len(extracted_text):
99
- extracted_text = tesseract_text
100
-
101
- print(f"Extracted text: {extracted_text[:100]}...")
102
  return extracted_text
103
 
104
  except Exception as e:
105
- print(f"Extraction Error: {e}")
106
  print(traceback.format_exc())
107
  try:
 
 
 
 
108
  if isinstance(image, Image.Image):
109
- image_array = np.array(image)
 
 
 
 
 
110
  else:
111
- image_array = image
112
-
113
- if len(image_array.shape) == 3:
114
- gray = cv2.cvtColor(image_array, cv2.COLOR_RGB2GRAY)
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  else:
116
- gray = image_array
117
 
118
- return pytesseract.image_to_string(gray, config=r'--oem 1 --psm 6')
119
  except Exception as e2:
120
- print(f"Fallback OCR Error: {e2}")
121
- return f"Error extracting text: {str(e)}"
 
 
122
 
123
- # Classify the math problem using Gemini 1.5 Flash
124
  def classify_with_gemini_flash(math_problem):
125
  """Classify the math problem using Gemini model"""
 
 
 
 
 
 
 
126
  try:
 
127
  model = genai.GenerativeModel(
128
  model_name=CLASSIFICATION_MODEL,
129
  generation_config={
130
- "temperature": 0.1,
131
- "top_p": 0.95,
132
- "max_output_tokens": 150, # Original value
133
- "response_mime_type": "application/json",
134
  }
135
  )
136
-
137
  prompt = f"""
138
- Task: Classify the following math problem.
139
 
140
- PROBLEM: {math_problem}
 
 
 
141
 
142
- Classify this math problem according to:
143
- 1. Primary category (e.g., Algebra, Calculus, Geometry, Trigonometry, Statistics, Number Theory)
144
- 2. Specific subtopic (e.g., Linear Equations, Derivatives, Integrals, Probability)
145
- 3. Difficulty level (Basic, Intermediate, Advanced)
146
- 4. Key concepts involved
147
 
148
- Format the response as a JSON object with the fields: "category", "subtopic", "difficulty", "key_concepts".
 
149
  """
150
-
151
- response = model.generate_content(prompt)
152
  try:
153
- # Clean potential markdown code fences before parsing (Added robustness)
154
  cleaned_text = response.text.strip().replace("```json", "").replace("```", "").strip()
155
  classification = json.loads(cleaned_text)
 
 
 
 
 
 
 
 
 
 
156
  return classification
157
- except (json.JSONDecodeError, AttributeError): # Added AttributeError check
158
- print(f"JSON Decode/Attribute Error: Unable to parse response: {response.text}")
159
- # Return default structure on failure, as per original implicit behavior
160
- return {
161
- "category": "Unknown", "subtopic": "Unknown",
162
- "difficulty": "Unknown", "key_concepts": ["Unknown"]
163
- }
164
  except Exception as e:
165
  print(f"Classification Error: {e}")
166
  print(traceback.format_exc())
167
- return {
168
- "category": "Error", "subtopic": "Error",
169
- "difficulty": "Error", "key_concepts": [f"Error: {str(e)}"]
170
- }
171
 
172
- # Solve the math problem using Gemini model
173
  def solve_with_gemini_pro(math_problem, classification):
174
  """Solve the math problem using Gemini model"""
 
 
175
  try:
 
176
  model = genai.GenerativeModel(
177
  model_name=SOLUTION_MODEL,
178
  generation_config={
179
- "temperature": 0.2,
180
- "top_p": 0.9,
181
- "max_output_tokens": 1000, # Original value
182
  }
183
  )
184
 
185
- # Ensure classification has the required fields with fallbacks (As per Original Script)
186
  if not isinstance(classification, dict):
187
- classification = {
188
- "category": "Unknown", "subtopic": "Unknown",
189
- "difficulty": "Unknown", "key_concepts": ["Unknown"]
190
- }
191
-
192
- for field in ["category", "subtopic", "difficulty"]:
193
- if field not in classification or not classification[field]:
194
- classification[field] = "Unknown"
195
-
196
- if "key_concepts" not in classification or not classification["key_concepts"]:
197
- classification["key_concepts"] = ["Unknown"]
198
-
199
- # Format key concepts as a string (As per Original Script)
200
- if isinstance(classification["key_concepts"], list):
201
- key_concepts = ", ".join(classification["key_concepts"])
202
  else:
203
- key_concepts = str(classification["key_concepts"])
204
 
205
  prompt = f"""
206
- Task: Solve the following math problem with clear step-by-step explanations.
207
-
208
- PROBLEM: {math_problem}
209
-
210
- CLASSIFICATION:
211
- - Category: {classification["category"]}
212
- - Subtopic: {classification["subtopic"]}
213
- - Difficulty: {classification["difficulty"]}
214
- - Key Concepts: {key_concepts}
215
-
216
- Provide a complete solution following these guidelines:
217
- 1. Start with an overview of the approach
218
- 2. Break down the problem into clear, logical steps
219
- 3. Explain each step thoroughly, mentioning the mathematical principles applied
220
- 4. Show all work and calculations
221
- 5. Verify the answer if possible
222
- 6. Summarize the key takeaway from this problem
223
-
224
- Format the solution to be readable on a mobile device, with appropriate spacing between steps.
 
 
 
 
 
 
225
  """
226
-
227
- response = model.generate_content(prompt)
 
 
 
 
 
 
 
 
228
  return response.text
229
  except Exception as e:
230
  print(f"Solution Error: {e}")
231
  print(traceback.format_exc())
232
- return f"Error generating solution: {str(e)}"
233
 
234
- # Explain the solution in more detail
235
  def explain_solution(math_problem, solution):
236
  """Provide a more detailed explanation of the solution"""
 
 
237
  try:
238
- print(f"Generating detailed explanation...")
239
-
240
  model = genai.GenerativeModel(
241
  model_name=EXPLANATION_MODEL,
242
  generation_config={
243
- "temperature": 0.3,
244
- "top_p": 0.95,
245
- "max_output_tokens": 1500, # Original value
246
  }
247
  )
248
-
249
  prompt = f"""
250
- Task: Provide a more detailed explanation of the solution to this math problem.
251
-
252
- PROBLEM: {math_problem}
253
- SOLUTION: {solution}
254
-
255
- Provide a more comprehensive explanation that:
256
- 1. Breaks down complex steps into simpler components
257
- 2. Explains the underlying mathematical principles in depth
258
- 3. Connects this problem to fundamental concepts
259
- 4. Offers visual or intuitive ways to understand the concepts
260
- 5. Highlights common mistakes students make with this type of problem
261
- 6. Suggests alternative solution approaches if applicable
262
-
263
- Make the explanation accessible to a student who is struggling with this topic.
 
 
 
 
 
 
 
 
 
 
 
264
  """
265
-
266
- response = model.generate_content(prompt)
267
  return response.text
268
  except Exception as e:
269
  print(f"Explanation Error: {e}")
270
  print(traceback.format_exc())
271
- return f"Error generating explanation: {str(e)}"
272
 
273
- # Generate similar practice problems
274
  def generate_similar_problems(math_problem, classification):
275
  """Generate similar practice math problems"""
 
 
276
  try:
277
- print(f"Generating similar problems...")
278
-
279
  model = genai.GenerativeModel(
280
  model_name=SIMILAR_MODEL,
281
  generation_config={
282
- "temperature": 0.7,
283
- "top_p": 0.95,
284
- "max_output_tokens": 1000, # Original value
285
  }
286
  )
287
 
288
- # Prepare classification string (As per Original Script)
289
- classification_str = json.dumps(classification, indent=2)
 
 
 
 
 
 
 
 
290
 
291
  prompt = f"""
292
- Task: Generate similar practice math problems based on the following problem.
293
 
294
- ORIGINAL PROBLEM: {math_problem}
295
- CLASSIFICATION: {classification_str}
 
 
296
 
297
- Generate 3 similar practice problems that:
298
- 1. Cover the same mathematical concepts and principles
299
- 2. Vary in difficulty (one easier, one similar, one harder)
300
- 3. Use different numerical values or variables
301
- 4. Test the same underlying skills
302
 
303
- For each problem:
304
- - Provide the complete problem statement
305
- - Include a brief hint for solving it
306
- - Provide the correct answer (but not the full solution)
 
307
 
308
- Format as three separate problems with clear numbering.
309
- """
 
 
310
 
311
- response = model.generate_content(prompt)
 
 
 
312
  return response.text
313
  except Exception as e:
314
  print(f"Similar Problems Error: {e}")
315
  print(traceback.format_exc())
316
- return f"Error generating similar problems: {str(e)}"
317
-
318
 
319
- # Main function for processing images (As per Original Script)
320
- # Note: The original function didn't use gr.Progress. We will call the backend functions directly.
321
- def process_image(image):
322
- """Main processing pipeline for the NerdAI app (Original Logic)"""
323
  try:
324
  if image is None:
325
- # Return values matching the expected outputs for the UI structure
326
- return None, "No image uploaded", "{}", "No image uploaded", "", "{}" # Added empty state values
327
 
328
- print("Starting processing...") # Simple print instead of progress
 
329
 
330
  # Step 1: Extract text
331
- print("Extracting text...")
332
  extracted_text = extract_text_with_gemini(image)
333
-
334
- if not extracted_text or extracted_text.strip() == "" or extracted_text.startswith("Error"):
335
- err_msg = extracted_text if extracted_text.startswith("Error") else "No text was extracted from the image. Please try a clearer image."
 
336
  img_display = image if isinstance(image, Image.Image) else Image.fromarray(image)
337
- return img_display, err_msg, "{}", err_msg, "", "{}" # Return error state
338
 
339
- # Step 2: Classify
340
- print("Classifying problem...")
341
  classification = classify_with_gemini_flash(extracted_text)
342
- classification_json = json.dumps(classification, indent=2) # Ensure it's JSON string
343
 
344
- # Step 3: Solve
345
- print("Solving problem...")
346
  solution = solve_with_gemini_pro(extracted_text, classification)
347
 
348
- print("Processing complete")
 
349
 
350
- # Return values needed by the UI components and state variables
351
- # We need 6 values for: processed_image, extracted_text_output, classification_output, solution_output, extracted_text_state, classification_state
352
  img_display = image if isinstance(image, Image.Image) else Image.fromarray(image)
353
- return img_display, extracted_text, classification_json, solution, extracted_text, classification_json
354
 
355
  except Exception as e:
356
  print(f"Process Image Error: {e}")
357
  print(traceback.format_exc())
358
- error_message = f"Error processing image: {str(e)}"
 
359
  img_display = None
360
  if image is not None:
361
- img_display = image if isinstance(image, Image.Image) else Image.fromarray(image)
362
- # Return error message and empty states
363
- return img_display, error_message, "{}", error_message, "", "{}"
364
 
365
 
366
- # --- Gradio Interface (Modern UI from Previous Refactoring) ---
367
 
368
  # Custom CSS for styling
369
  css = """
@@ -399,7 +442,6 @@ body { font-family: 'Inter', sans-serif; } /* Modern font */
399
  height: 100%; object-fit: contain; /* Control image scaling */
400
  }
401
 
402
-
403
  /* Main button */
404
  #process_button { margin-top: 15px; }
405
 
@@ -441,20 +483,23 @@ footer { visibility: hidden } /* Hide default Gradio footer */
441
  """
442
 
443
  # Define a theme
 
444
  theme = gr_themes.Default(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.sky).set(
 
445
  button_primary_background_fill="#4A90E2",
446
  button_primary_background_fill_hover="#357ABD",
447
  button_secondary_background_fill="#E1E8ED",
448
  button_secondary_background_fill_hover="#CED9E0",
449
- block_radius="8px",
450
  )
451
 
452
 
453
  with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
454
 
455
  # --- State Variables ---
 
456
  extracted_text_state = gr.State("")
457
- classification_state = gr.State("{}") # Store classification as JSON string
458
 
459
  # --- UI Layout ---
460
  gr.Markdown("# 🧠 NerdAI Math Problem Solver", elem_id="title_markdown")
@@ -462,18 +507,17 @@ with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
462
 
463
  with gr.Row():
464
  with gr.Column(scale=1, elem_id="input_col"):
465
- input_image = gr.Image(label="Upload Math Problem", type="pil", elem_id="input_image", height=350)
466
  process_btn = gr.Button("✨ Process Image and Solve", variant="primary", elem_id="process_button")
467
  with gr.Column(scale=1, elem_id="output_col"):
468
- processed_image = gr.Image(label="Processed Image", interactive=False, elem_id="processed_image", height=350)
469
 
470
  # --- Results Area ---
471
  with gr.Group(elem_id="results_group"):
472
  gr.Markdown("### Results")
473
- with gr.Box():
474
  extracted_text_output = gr.Textbox(label="πŸ“ Extracted Text", lines=3, interactive=False, placeholder="Text from the image will appear here...", elem_id="extracted_text_output")
475
  with gr.Box():
476
- # Display classification using the original model name constant
477
  classification_output = gr.Textbox(label=f"πŸ“Š Problem Classification ({CLASSIFICATION_MODEL})", lines=5, interactive=False, placeholder="Problem type analysis will appear here...", elem_id="classification_output")
478
 
479
  solution_output = gr.Markdown(label="βœ… Solution Steps", value="*Solution steps will appear here after processing...*", elem_id="solution_output")
@@ -490,37 +534,36 @@ with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
490
  with gr.Accordion("Similar Practice Problems", open=False):
491
  similar_problems_output = gr.Markdown(value="*Click 'Similar Questions' above to generate practice problems.*")
492
 
493
- # --- Event Handlers (Connecting UI to *Original* Backend Functions) ---
494
 
495
  # Main process button click
496
  process_btn.click(
497
- fn=process_image, # Uses the original process_image function
498
  inputs=[input_image],
499
  outputs=[
500
  processed_image,
501
  extracted_text_output,
502
- classification_output, # Populated by classification_json from return
503
  solution_output,
504
- extracted_text_state, # Populated by extracted_text from return
505
- classification_state # Populated by classification_json from return
506
  ],
507
- # No progress tracking here as original function didn't support it
508
  )
509
 
510
- # Explain button click handler (Calls original explain_solution)
511
  def explain_button_handler(current_problem_text, current_solution_md):
512
- """Handler for Explain It button using state and original backend"""
513
- print("Explain button clicked (using original backend).")
514
- # Basic input validation
515
- if not current_problem_text or current_problem_text.startswith("Error:") or current_problem_text == "No image uploaded":
516
  return "Please successfully process an image first to get text and a solution."
517
  if not current_solution_md or current_solution_md.startswith("Error") or "will appear here" in current_solution_md:
518
  return "Cannot explain: A valid solution needs to be generated first."
519
 
520
- # Add simple feedback, yield not applicable without queue/progress
521
- explanation_output.value = "*Generating detailed explanation... please wait.*" # Direct update attempt
522
- explanation_result = explain_solution(current_problem_text, current_solution_md) # Call original function
523
- return explanation_result # Return result to update the Markdown output
524
 
525
  explain_btn.click(
526
  fn=explain_button_handler,
@@ -528,32 +571,27 @@ with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
528
  outputs=explanation_output # Target the Markdown inside the Accordion
529
  )
530
 
531
- # Similar problems button click handler (Calls original generate_similar_problems)
532
  def similar_button_handler(current_problem_text, current_classification_json):
533
- """Handler for Similar Questions button using state and original backend"""
534
- print("Similar button clicked (using original backend).")
535
- # Basic input validation
536
- if not current_problem_text or current_problem_text.startswith("Error:") or current_problem_text == "No image uploaded":
537
  return "Please successfully process an image first to get the problem text and classification."
538
 
539
- # Add simple feedback
540
- similar_problems_output.value = "*Generating similar problems... please wait.*" # Direct update attempt
541
 
542
  try:
543
- # Parse classification JSON from state
544
  classification_dict = json.loads(current_classification_json)
545
- # Minimal validation (check if it's a dictionary)
546
- if not isinstance(classification_dict, dict):
547
- raise ValueError("Invalid classification data format.")
548
- except (json.JSONDecodeError, ValueError, TypeError) as e: # Added TypeError
549
  print(f"Error parsing classification state for similar problems: {e}")
550
- print(f"Classification JSON received: {current_classification_json}")
551
- # Use the original classification function's error structure for consistency
552
- return f"Error: Could not use problem classification data ({e}). Please ensure the problem was classified correctly (should be JSON)."
553
 
554
- # Call original function
555
  similar_result = generate_similar_problems(current_problem_text, classification_dict)
556
- return similar_result # Return result to update the Markdown output
557
 
558
  similar_btn.click(
559
  fn=similar_button_handler,
@@ -561,7 +599,7 @@ with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
561
  outputs=similar_problems_output # Target the Markdown inside the Accordion
562
  )
563
 
564
- # Add an example image (optional, as in refactored UI)
565
  gr.Examples(
566
  examples=[
567
  # Add paths to example images accessible by the script
@@ -576,7 +614,6 @@ with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
576
  label="Example Math Problems"
577
  )
578
 
579
-
580
  # --- Launch the App ---
581
  if __name__ == "__main__":
582
  # Create dummy example files if they don't exist for local testing
@@ -597,5 +634,4 @@ if __name__ == "__main__":
597
  print(f"Could not create placeholder image {fpath}: {e}")
598
 
599
  # Recommended: Enable queue for better handling of multiple users/long tasks
600
- # Queue helps manage multiple clicks even if progress isn't used in the main function
601
- demo.queue().launch(debug=True)
 
1
  import os
2
  import json
3
  import gradio as gr
4
+ import gradio.themes as gr_themes # Import themes
5
  import google.generativeai as genai
6
  from PIL import Image
7
  import numpy as np
 
10
  import traceback
11
  import pytesseract
12
  import cv2
13
+ import time
14
 
15
+ # --- Load Environment Variables (Keep as is) ---
16
  load_dotenv()
17
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") or HfFolder.get_token("GEMINI_API_KEY")
18
  if not GEMINI_API_KEY:
19
+ # Try to get it from Gradio secrets if running on Spaces
20
  try:
21
  import secrets
22
  GEMINI_API_KEY = secrets.GEMINI_API_KEY
 
28
 
29
  genai.configure(api_key=GEMINI_API_KEY)
30
 
31
+ # --- Define Model Names (Keep as is) ---
32
  CLASSIFICATION_MODEL = "gemini-1.5-flash"
33
  SOLUTION_MODEL = "gemini-1.5-pro-latest"
34
  EXPLANATION_MODEL = "gemini-1.5-pro-latest"
35
  SIMILAR_MODEL = "gemini-1.5-pro-latest"
36
+ MODEL_IMAGE = "gemini-1.5-pro-latest" # Using Pro for OCR
37
 
38
+ print(f"Using models: Classification: {CLASSIFICATION_MODEL}, Solution: {SOLUTION_MODEL}, Explanation: {EXPLANATION_MODEL}, Similar: {SIMILAR_MODEL}, Image Analysis: {MODEL_IMAGE}")
39
 
40
+ # --- Set Tesseract Path (Keep as is, but ensure it's correct for your env) ---
 
 
 
41
  # Make sure this path is correct for your deployment environment
42
  try:
43
  # Check common paths
44
+ if os.path.exists('/usr/bin/tesseract'):
 
 
45
  pytesseract.pytesseract.tesseract_cmd = '/usr/bin/tesseract'
46
+ elif os.path.exists('/opt/homebrew/bin/tesseract'): # macOS Homebrew
47
+ pytesseract.pytesseract.tesseract_cmd = '/opt/homebrew/bin/tesseract'
48
+ # Add more checks if needed (e.g., Windows)
49
  else:
50
  # Attempt to find Tesseract in PATH (might work in some environments)
51
  from shutil import which
 
54
  pytesseract.pytesseract.tesseract_cmd = tesseract_path
55
  else:
56
  print("Warning: Tesseract command not found at specified paths or in PATH. Fallback OCR might fail.")
57
+ # No exception here, let Gemini try first
58
  except Exception as e:
59
  print(f"Warning: Error setting Tesseract path: {e}. Fallback OCR might fail.")
60
 
61
 
62
+ # --- Backend Functions (Keep core logic, add minor logging/error handling improvements) ---
63
 
 
64
  def extract_text_with_gemini(image):
65
+ """Extract text from image using Gemini Pro Vision, with Tesseract fallback"""
66
+ extracted_text = ""
67
  try:
68
  if isinstance(image, np.ndarray):
69
  image = Image.fromarray(image)
70
 
71
+ print("Attempting text extraction with Gemini Pro Vision...")
72
  model = genai.GenerativeModel(MODEL_IMAGE)
73
+ prompt = """Extract ALL text, numbers, and mathematical equations from this image precisely.
74
+ Include ALL symbols, numbers, letters, and mathematical notation exactly as they appear.
75
+ Format any equations properly and maintain their layout as much as possible.
76
+ Do not add any commentary or explanation, just output the extracted text verbatim."""
 
 
77
 
78
+ response = model.generate_content([prompt, image], request_options={'timeout': 120}) # Add timeout
79
  extracted_text = response.text.strip()
80
+ print(f"Gemini extracted text (first 100 chars): {extracted_text[:100]}...")
81
 
82
+ # Fallback condition: if Gemini returns very little text or indicates failure
83
+ if len(extracted_text) < 15 or "unable to extract" in extracted_text.lower():
84
+ print("Gemini returned limited or no text, trying Tesseract as fallback...")
85
+ raise ValueError("Gemini extraction insufficient, attempting fallback.") # Trigger fallback
 
 
 
86
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  return extracted_text
88
 
89
  except Exception as e:
90
+ print(f"Gemini Extraction Error: {e}. Attempting Tesseract fallback.")
91
  print(traceback.format_exc())
92
  try:
93
+ if 'pytesseract' not in globals() or not hasattr(pytesseract.pytesseract, 'tesseract_cmd') or not pytesseract.pytesseract.tesseract_cmd:
94
+ print("Tesseract is not configured. Skipping fallback.")
95
+ return extracted_text if extracted_text else f"Error: Gemini failed and Tesseract is not available. Details: {str(e)}"
96
+
97
  if isinstance(image, Image.Image):
98
+ image_array = np.array(image.convert('L')) # Convert to grayscale PIL image first
99
+ elif isinstance(image, np.ndarray):
100
+ if len(image.shape) == 3:
101
+ image_array = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
102
+ else:
103
+ image_array = image # Assume already grayscale if 2D
104
  else:
105
+ return f"Error: Unsupported image type for Tesseract fallback. Gemini Error: {str(e)}"
106
+
107
+ # Preprocessing for Tesseract (optional but can help)
108
+ # image_array = cv2.threshold(image_array, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
109
+
110
+ custom_config = r'--oem 1 --psm 6' # Assume a block of text
111
+ tesseract_text = pytesseract.image_to_string(image_array, config=custom_config, lang='eng+equ') # Add 'equ' for equations if lang pack installed
112
+ tesseract_text = tesseract_text.strip()
113
+ print(f"Tesseract extracted text (first 100 chars): {tesseract_text[:100]}...")
114
+
115
+ # Use Tesseract result only if it's significantly better than a poor Gemini result
116
+ if len(tesseract_text) > max(len(extracted_text), 20):
117
+ print("Using Tesseract result as fallback.")
118
+ return tesseract_text
119
+ elif extracted_text:
120
+ print("Keeping Gemini result despite fallback attempt.")
121
+ return extracted_text # Keep original Gemini if Tesseract wasn't better
122
  else:
123
+ return f"Error: Both Gemini and Tesseract failed to extract sufficient text. Gemini Error: {str(e)}"
124
 
 
125
  except Exception as e2:
126
+ print(f"Tesseract Fallback OCR Error: {e2}")
127
+ print(traceback.format_exc())
128
+ return extracted_text if extracted_text else f"Error: Gemini and Tesseract failed. Gemini: {str(e)}, Tesseract: {str(e2)}"
129
+
130
 
 
131
  def classify_with_gemini_flash(math_problem):
132
  """Classify the math problem using Gemini model"""
133
+ default_classification = {
134
+ "category": "Unknown", "subtopic": "Unknown",
135
+ "difficulty": "Unknown", "key_concepts": ["Unknown"]
136
+ }
137
+ if not math_problem or len(math_problem) < 5: # Basic check
138
+ print("Skipping classification due to insufficient text.")
139
+ return default_classification
140
  try:
141
+ print(f"Classifying problem with {CLASSIFICATION_MODEL}...")
142
  model = genai.GenerativeModel(
143
  model_name=CLASSIFICATION_MODEL,
144
  generation_config={
145
+ "temperature": 0.1, "top_p": 0.95,
146
+ "max_output_tokens": 200, "response_mime_type": "application/json",
 
 
147
  }
148
  )
 
149
  prompt = f"""
150
+ Task: Classify the following math problem precisely.
151
 
152
+ PROBLEM:
153
+ ```
154
+ {math_problem}
155
+ ```
156
 
157
+ Instructions:
158
+ 1. Identify the Primary Math Category (e.g., Algebra, Calculus, Geometry, Trigonometry, Statistics, Number Theory, Linear Algebra, Differential Equations).
159
+ 2. Determine the Specific Subtopic (e.g., Solving Linear Equations, Limits, Euclidean Geometry, Sine Rule, Normal Distribution, Prime Numbers).
160
+ 3. Assess the Difficulty Level (e.g., High School - Basic, High School - Advanced, College - Introductory, College - Advanced).
161
+ 4. List the Key Mathematical Concepts involved (be specific, e.g., quadratic formula, integration by parts, Pythagorean theorem, standard deviation).
162
 
163
+ Format the response STRICTLY as a JSON object with keys: "category", "subtopic", "difficulty", "key_concepts" (where key_concepts is a list of strings).
164
+ Example: {{ "category": "Algebra", "subtopic": "Quadratic Equations", "difficulty": "High School - Advanced", "key_concepts": ["quadratic formula", "discriminant", "factoring"] }}
165
  """
166
+ response = model.generate_content(prompt, request_options={'timeout': 60})
 
167
  try:
168
+ # Clean potential markdown code fences
169
  cleaned_text = response.text.strip().replace("```json", "").replace("```", "").strip()
170
  classification = json.loads(cleaned_text)
171
+ # Validate structure
172
+ if not all(k in classification for k in default_classification.keys()):
173
+ print(f"Warning: Classification missing keys. Response: {cleaned_text}")
174
+ # Fill missing keys
175
+ for k, v in default_classification.items():
176
+ classification.setdefault(k, v)
177
+ if not isinstance(classification.get("key_concepts"), list):
178
+ classification["key_concepts"] = [str(classification.get("key_concepts", "Unknown"))]
179
+
180
+ print(f"Classification successful: {classification}")
181
  return classification
182
+ except (json.JSONDecodeError, AttributeError) as json_e:
183
+ print(f"JSON Decode/Attribute Error: Unable to parse classification response: {response.text}. Error: {json_e}")
184
+ return default_classification
 
 
 
 
185
  except Exception as e:
186
  print(f"Classification Error: {e}")
187
  print(traceback.format_exc())
188
+ error_classification = default_classification.copy()
189
+ error_classification["key_concepts"] = [f"Error: {str(e)}"]
190
+ return error_classification
 
191
 
 
192
  def solve_with_gemini_pro(math_problem, classification):
193
  """Solve the math problem using Gemini model"""
194
+ if not math_problem or len(math_problem) < 5:
195
+ return "Cannot solve: Invalid math problem text provided."
196
  try:
197
+ print(f"Solving problem with {SOLUTION_MODEL}...")
198
  model = genai.GenerativeModel(
199
  model_name=SOLUTION_MODEL,
200
  generation_config={
201
+ "temperature": 0.2, "top_p": 0.9,
202
+ "max_output_tokens": 2000, # Increased token limit for complex solutions
 
203
  }
204
  )
205
 
206
+ # Ensure classification is a dict and format concepts
207
  if not isinstance(classification, dict):
208
+ classification = {"category": "Unknown", "subtopic": "Unknown", "difficulty": "Unknown", "key_concepts": ["Unknown"]}
209
+ key_concepts = classification.get("key_concepts", ["Unknown"])
210
+ if isinstance(key_concepts, list):
211
+ key_concepts_str = ", ".join(key_concepts) if key_concepts else "Unknown"
 
 
 
 
 
 
 
 
 
 
 
212
  else:
213
+ key_concepts_str = str(key_concepts)
214
 
215
  prompt = f"""
216
+ Task: Solve the following mathematical problem step-by-step. Assume you are a helpful math tutor.
217
+
218
+ PROBLEM:
219
+ ```
220
+ {math_problem}
221
+ ```
222
+
223
+ PROBLEM CONTEXT (from classification):
224
+ - Category: {classification.get("category", "Unknown")}
225
+ - Subtopic: {classification.get("subtopic", "Unknown")}
226
+ - Difficulty: {classification.get("difficulty", "Unknown")}
227
+ - Key Concepts: {key_concepts_str}
228
+
229
+ Instructions:
230
+ 1. **Understand the Goal:** Briefly state what the problem is asking for.
231
+ 2. **Identify Strategy/Concepts:** Mention the main mathematical concepts or methods needed (referencing the classification if helpful).
232
+ 3. **Step-by-Step Solution:** Provide a clear, numbered sequence of steps to reach the solution.
233
+ * Explain the reasoning behind each step.
234
+ * Show all necessary calculations clearly. Use LaTeX for mathematical notation where appropriate (e.g., $\\frac{{a}}{{b}}$, $x^2$, $\\int f(x) dx$). Wrap inline math in single $ and display math in double $$.
235
+ * Define any variables used.
236
+ 4. **Final Answer:** Clearly state the final answer(s).
237
+ 5. **Verification (Optional but Recommended):** If possible, briefly describe how the answer could be checked or verified.
238
+ 6. **Conclusion/Key Takeaway:** Briefly summarize the core concept demonstrated or a key takeaway.
239
+
240
+ Format the output using Markdown for readability. Use headings, bullet points, and numbered lists effectively. Ensure LaTeX math expressions are correctly formatted.
241
  """
242
+ response = model.generate_content(prompt, request_options={'timeout': 180}) # Increased timeout for complex solves
243
+ print("Solution generation complete.")
244
+ # Basic check for failed generation
245
+ if not response.text or len(response.text) < 20:
246
+ print(f"Warning: Solution generation produced very short output: {response.text}")
247
+ # Add a fallback message if the response seems incomplete/failed
248
+ if "cannot solve" in response.text.lower() or "don't understand" in response.text.lower():
249
+ return response.text # Return Gemini's explicit failure message
250
+ else:
251
+ return f"Error: Solution generation failed or produced incomplete results.\n\nRaw Response:\n{response.text}"
252
  return response.text
253
  except Exception as e:
254
  print(f"Solution Error: {e}")
255
  print(traceback.format_exc())
256
+ return f"## Error Generating Solution\n\nAn error occurred while trying to solve the problem: `{str(e)}`\n\nPlease check the extracted text and try again. If the problem persists, the model might be unable to process this specific query."
257
 
 
258
  def explain_solution(math_problem, solution):
259
  """Provide a more detailed explanation of the solution"""
260
+ if not solution or "error generating solution" in solution.lower() or "cannot solve" in solution.lower() :
261
+ return "Cannot explain: No valid solution provided."
262
  try:
263
+ print(f"Generating detailed explanation with {EXPLANATION_MODEL}...")
 
264
  model = genai.GenerativeModel(
265
  model_name=EXPLANATION_MODEL,
266
  generation_config={
267
+ "temperature": 0.3, "top_p": 0.95,
268
+ "max_output_tokens": 2500, # Allow more tokens for detailed explanation
 
269
  }
270
  )
 
271
  prompt = f"""
272
+ Task: Provide a detailed, pedagogical explanation of the provided solution to a math problem. Assume the reader found the original solution steps difficult to follow.
273
+
274
+ ORIGINAL PROBLEM:
275
+ ```
276
+ {math_problem}
277
+ ```
278
+
279
+ PROVIDED SOLUTION:
280
+ ```
281
+ {solution}
282
+ ```
283
+
284
+ Instructions:
285
+ Elaborate on the provided solution with the goal of enhancing understanding. Focus on the 'why' behind each step.
286
+ 1. **Reiterate Goal:** Briefly restate the problem's objective.
287
+ 2. **Core Concepts Deep Dive:** Explain the fundamental mathematical principles mentioned or implied in the solution in more detail. Use analogies or simpler examples if helpful. Define key terms.
288
+ 3. **Step-by-Step Elaboration:** Go through the solution steps again, but expand on the reasoning.
289
+ * Why was this specific step taken? What rule or theorem justifies it?
290
+ * Are there intermediate calculations or assumptions that were skipped? Spell them out.
291
+ * Address potential points of confusion.
292
+ 4. **Connections:** How does this problem relate to broader mathematical ideas or prerequisite knowledge?
293
+ 5. **Common Pitfalls:** Mention common mistakes students make when tackling similar problems.
294
+ 6. **Alternative Perspectives (Optional):** Briefly mention if there are other valid ways to approach the problem.
295
+
296
+ Format the output using Markdown for clarity (headings, lists, bold text). Use LaTeX for math notation (inline $, display $$). Make it easy to read and digest.
297
  """
298
+ response = model.generate_content(prompt, request_options={'timeout': 180})
299
+ print("Detailed explanation generation complete.")
300
  return response.text
301
  except Exception as e:
302
  print(f"Explanation Error: {e}")
303
  print(traceback.format_exc())
304
+ return f"## Error Generating Explanation\n\nAn error occurred: `{str(e)}`"
305
 
 
306
  def generate_similar_problems(math_problem, classification):
307
  """Generate similar practice math problems"""
308
+ if not math_problem or len(math_problem) < 5:
309
+ return "Cannot generate similar problems: Invalid original problem text."
310
  try:
311
+ print(f"Generating similar problems with {SIMILAR_MODEL}...")
 
312
  model = genai.GenerativeModel(
313
  model_name=SIMILAR_MODEL,
314
  generation_config={
315
+ "temperature": 0.7, "top_p": 0.95, # Higher temp for variety
316
+ "max_output_tokens": 1500,
 
317
  }
318
  )
319
 
320
+ # Ensure classification is a dict and format concepts
321
+ if not isinstance(classification, dict):
322
+ classification = {"category": "Unknown", "subtopic": "Unknown", "difficulty": "Unknown", "key_concepts": ["Unknown"]}
323
+
324
+ classification_str = f"""
325
+ - Category: {classification.get("category", "Unknown")}
326
+ - Subtopic: {classification.get("subtopic", "Unknown")}
327
+ - Difficulty: {classification.get("difficulty", "Unknown")}
328
+ - Key Concepts: {', '.join(classification.get("key_concepts", ["Unknown"]))}
329
+ """
330
 
331
  prompt = f"""
332
+ Task: Generate 3 distinct practice math problems that are similar in concept to the original problem provided, but vary slightly in presentation or difficulty.
333
 
334
+ ORIGINAL PROBLEM:
335
+ ```
336
+ {math_problem}
337
+ ```
338
 
339
+ CLASSIFICATION OF ORIGINAL PROBLEM:
340
+ {classification_str}
 
 
 
341
 
342
+ Instructions:
343
+ Create three new problems based on the original's concepts and difficulty level.
344
+ 1. **Problem 1 (Similar Difficulty):** Create a problem that closely mirrors the original in terms of concepts and required steps, but uses different numbers, variables, or context.
345
+ 2. **Problem 2 (Slightly Easier/Different Focus):** Create a problem that uses the same core concepts but might be slightly simpler, focus on a specific sub-step, or change the type of answer required (e.g., find an intermediate value instead of the final result).
346
+ 3. **Problem 3 (Slightly Harder/Extension):** Create a problem that builds upon the original concepts, perhaps adding an extra step, combining it with another related concept, or requiring more complex manipulation.
347
 
348
+ For EACH of the 3 problems:
349
+ * Clearly state the problem question. Use LaTeX for math notation.
350
+ * Provide a one-sentence HINT on how to approach it.
351
+ * Provide the final ANSWER (just the answer, not the steps).
352
 
353
+ Format the output using Markdown. Use clear headings for each problem (e.g., "### Practice Problem 1 (Similar Difficulty)").
354
+ """
355
+ response = model.generate_content(prompt, request_options={'timeout': 180})
356
+ print("Similar problems generation complete.")
357
  return response.text
358
  except Exception as e:
359
  print(f"Similar Problems Error: {e}")
360
  print(traceback.format_exc())
361
+ return f"## Error Generating Similar Problems\n\nAn error occurred: `{str(e)}`"
 
362
 
363
+ # --- Main Processing Function (Modified for better progress updates and error handling) ---
364
+ def process_image(image, progress=gr.Progress(track_ Ο„ΟŒΟ„Ξ΅=True)):
365
+ """Main processing pipeline for the NerdAI app"""
366
+ start_time = time.time()
367
  try:
368
  if image is None:
369
+ return None, "Please upload an image first.", "{}", "No image provided.", "", "No image provided." # Added state output
 
370
 
371
+ progress(0, desc="πŸš€ Starting...")
372
+ time.sleep(0.5) # Give UI time to update
373
 
374
  # Step 1: Extract text
375
+ progress(0.1, desc="πŸ” Extracting text from image...")
376
  extracted_text = extract_text_with_gemini(image)
377
+ if not extracted_text or extracted_text.startswith("Error:") or len(extracted_text) < 10 :
378
+ err_msg = extracted_text if extracted_text.startswith("Error:") else "Error: Could not extract sufficient text from the image. Please try a clearer image or check Tesseract configuration if using fallback."
379
+ print(f"Text extraction failed or insufficient: {err_msg}")
380
+ # Show the uploaded image back to the user
381
  img_display = image if isinstance(image, Image.Image) else Image.fromarray(image)
382
+ return img_display, err_msg, "{}", err_msg, "", err_msg # Return error message in multiple fields
383
 
384
+ progress(0.4, desc=f"πŸ“Š Classifying problem ({CLASSIFICATION_MODEL})...")
 
385
  classification = classify_with_gemini_flash(extracted_text)
386
+ classification_json = json.dumps(classification, indent=2)
387
 
388
+ progress(0.6, desc=f"πŸ’‘ Solving problem ({SOLUTION_MODEL})...")
 
389
  solution = solve_with_gemini_pro(extracted_text, classification)
390
 
391
+ end_time = time.time()
392
+ progress(1.0, desc=f"βœ… Done in {end_time - start_time:.2f}s!")
393
 
394
+ # Return processed image (or original), text, classification, solution, and update state
 
395
  img_display = image if isinstance(image, Image.Image) else Image.fromarray(image)
396
+ return img_display, extracted_text, classification_json, solution, extracted_text, classification_json # Pass classification JSON to state too
397
 
398
  except Exception as e:
399
  print(f"Process Image Error: {e}")
400
  print(traceback.format_exc())
401
+ error_message = f"An unexpected error occurred: {str(e)}"
402
+ # Try to return the original image if possible
403
  img_display = None
404
  if image is not None:
405
+ img_display = image if isinstance(image, Image.Image) else Image.fromarray(image)
406
+ return img_display, error_message, "{}", error_message, "", error_message # Populate errors
 
407
 
408
 
409
+ # --- Gradio Interface (Major Changes Here) ---
410
 
411
  # Custom CSS for styling
412
  css = """
 
442
  height: 100%; object-fit: contain; /* Control image scaling */
443
  }
444
 
 
445
  /* Main button */
446
  #process_button { margin-top: 15px; }
447
 
 
483
  """
484
 
485
  # Define a theme
486
+ # theme = gr_themes.Soft(primary_hue="blue", secondary_hue="sky")
487
  theme = gr_themes.Default(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.sky).set(
488
+ # Further theme customizations if needed
489
  button_primary_background_fill="#4A90E2",
490
  button_primary_background_fill_hover="#357ABD",
491
  button_secondary_background_fill="#E1E8ED",
492
  button_secondary_background_fill_hover="#CED9E0",
493
+ block_radius="8px", # Consistent border radius
494
  )
495
 
496
 
497
  with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
498
 
499
  # --- State Variables ---
500
+ # Store extracted text and classification needed for follow-up actions
501
  extracted_text_state = gr.State("")
502
+ classification_state = gr.State("{}") # Store as JSON string
503
 
504
  # --- UI Layout ---
505
  gr.Markdown("# 🧠 NerdAI Math Problem Solver", elem_id="title_markdown")
 
507
 
508
  with gr.Row():
509
  with gr.Column(scale=1, elem_id="input_col"):
510
+ input_image = gr.Image(label="Upload Math Problem", type="pil", elem_id="input_image", height=350) # Set fixed height
511
  process_btn = gr.Button("✨ Process Image and Solve", variant="primary", elem_id="process_button")
512
  with gr.Column(scale=1, elem_id="output_col"):
513
+ processed_image = gr.Image(label="Processed Image", interactive=False, elem_id="processed_image", height=350) # Set fixed height
514
 
515
  # --- Results Area ---
516
  with gr.Group(elem_id="results_group"):
517
  gr.Markdown("### Results")
518
+ with gr.Box(): # Box for slight visual separation
519
  extracted_text_output = gr.Textbox(label="πŸ“ Extracted Text", lines=3, interactive=False, placeholder="Text from the image will appear here...", elem_id="extracted_text_output")
520
  with gr.Box():
 
521
  classification_output = gr.Textbox(label=f"πŸ“Š Problem Classification ({CLASSIFICATION_MODEL})", lines=5, interactive=False, placeholder="Problem type analysis will appear here...", elem_id="classification_output")
522
 
523
  solution_output = gr.Markdown(label="βœ… Solution Steps", value="*Solution steps will appear here after processing...*", elem_id="solution_output")
 
534
  with gr.Accordion("Similar Practice Problems", open=False):
535
  similar_problems_output = gr.Markdown(value="*Click 'Similar Questions' above to generate practice problems.*")
536
 
537
+ # --- Event Handlers ---
538
 
539
  # Main process button click
540
  process_btn.click(
541
+ fn=process_image,
542
  inputs=[input_image],
543
  outputs=[
544
  processed_image,
545
  extracted_text_output,
546
+ classification_output,
547
  solution_output,
548
+ extracted_text_state, # Update state
549
+ classification_state # Update state
550
  ],
551
+ # api_name="process_math_image" # Optional: for API usage
552
  )
553
 
554
+ # Explain button click
555
  def explain_button_handler(current_problem_text, current_solution_md):
556
+ """Handler for Explain It button using state"""
557
+ print("Explain button clicked.")
558
+ if not current_problem_text or current_problem_text.startswith("Error:") or current_problem_text == "No image provided." or current_problem_text == "Please upload an image first.":
 
559
  return "Please successfully process an image first to get text and a solution."
560
  if not current_solution_md or current_solution_md.startswith("Error") or "will appear here" in current_solution_md:
561
  return "Cannot explain: A valid solution needs to be generated first."
562
 
563
+ # Add a loading indicator (optional, but good UX)
564
+ yield "*Generating detailed explanation... please wait.*"
565
+ explanation_result = explain_solution(current_problem_text, current_solution_md)
566
+ yield explanation_result
567
 
568
  explain_btn.click(
569
  fn=explain_button_handler,
 
571
  outputs=explanation_output # Target the Markdown inside the Accordion
572
  )
573
 
574
+ # Similar problems button click
575
  def similar_button_handler(current_problem_text, current_classification_json):
576
+ """Handler for Similar Questions button using state"""
577
+ print("Similar button clicked.")
578
+ if not current_problem_text or current_problem_text.startswith("Error:") or current_problem_text == "No image provided." or current_problem_text == "Please upload an image first.":
 
579
  return "Please successfully process an image first to get the problem text and classification."
580
 
581
+ # Add a loading indicator
582
+ yield "*Generating similar problems... please wait.*"
583
 
584
  try:
 
585
  classification_dict = json.loads(current_classification_json)
586
+ # Minimal validation
587
+ if not isinstance(classification_dict, dict) or not classification_dict:
588
+ raise ValueError("Invalid classification data.")
589
+ except (json.JSONDecodeError, ValueError) as e:
590
  print(f"Error parsing classification state for similar problems: {e}")
591
+ return f"Error: Could not use problem classification data ({e}). Please ensure the problem was classified correctly."
 
 
592
 
 
593
  similar_result = generate_similar_problems(current_problem_text, classification_dict)
594
+ yield similar_result
595
 
596
  similar_btn.click(
597
  fn=similar_button_handler,
 
599
  outputs=similar_problems_output # Target the Markdown inside the Accordion
600
  )
601
 
602
+ # Add an example image (optional)
603
  gr.Examples(
604
  examples=[
605
  # Add paths to example images accessible by the script
 
614
  label="Example Math Problems"
615
  )
616
 
 
617
  # --- Launch the App ---
618
  if __name__ == "__main__":
619
  # Create dummy example files if they don't exist for local testing
 
634
  print(f"Could not create placeholder image {fpath}: {e}")
635
 
636
  # Recommended: Enable queue for better handling of multiple users/long tasks
637
+ demo.queue().launch(debug=True) # debug=True for more logs, remove for production