cuneytkaya commited on
Commit
0bfe3b6
Β·
verified Β·
1 Parent(s): 58c11a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -70
app.py CHANGED
@@ -1,16 +1,16 @@
1
  import os
2
  import json
3
  import gradio as gr
4
- import gradio.themes as gr_themes # Import themes for UI
5
  import google.generativeai as genai
6
  from PIL import Image
7
  import numpy as np
8
  from huggingface_hub import HfFolder
9
  from dotenv import load_dotenv
10
  import traceback
11
- import time # Keep time import for process_image duration calculation
 
12
 
13
- # --- Load Environment Variables ---
14
  load_dotenv()
15
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") or HfFolder.get_token("GEMINI_API_KEY")
16
  if not GEMINI_API_KEY:
@@ -25,22 +25,20 @@ if not GEMINI_API_KEY:
25
 
26
  genai.configure(api_key=GEMINI_API_KEY)
27
 
28
- # --- Define Model Names (As per Original Script) ---
29
  CLASSIFICATION_MODEL = "gemini-1.5-flash"
30
  SOLUTION_MODEL = "gemini-1.5-pro-latest"
31
  EXPLANATION_MODEL = "gemini-1.5-pro-latest"
32
  SIMILAR_MODEL = "gemini-1.5-pro-latest"
33
- MODEL_IMAGE = "gemini-1.5-pro-latest" # For OCR
34
 
35
  print(f"Using models: Classification: {CLASSIFICATION_MODEL}, Solution: {SOLUTION_MODEL}, Explanation: {EXPLANATION_MODEL}, Similar: {SIMILAR_MODEL}, Image Analysis: {MODEL_IMAGE}")
36
 
37
- # --- Tesseract Related Code Removed ---
38
- print("Tesseract OCR dependency and fallback have been removed.")
39
 
40
 
41
- # --- Backend Functions (Reverted to Original Parameters & Prompts, Tesseract Removed) ---
42
 
43
- # Extract text using Gemini Pro Vision ONLY (Reverted Prompt)
 
44
  def extract_text_with_gemini(image):
45
  """Extract text from image using Gemini Pro Vision ONLY"""
46
  try:
@@ -51,7 +49,7 @@ def extract_text_with_gemini(image):
51
 
52
  print("Attempting text extraction with Gemini Pro Vision...")
53
  model = genai.GenerativeModel(MODEL_IMAGE)
54
- # Original simple prompt
55
  prompt = """
56
  Extract ALL text, numbers, and mathematical equations from this image precisely.
57
  Include ALL symbols, numbers, letters, and mathematical notation exactly as they appear.
@@ -73,7 +71,7 @@ def extract_text_with_gemini(image):
73
  print(traceback.format_exc())
74
  return f"Error during text extraction with Gemini: {str(e)}"
75
 
76
- # Classify the math problem using Gemini 1.5 Flash (Original Parameters & Prompt)
77
  def classify_with_gemini_flash(math_problem):
78
  """Classify the math problem using Gemini model (Original settings)"""
79
  if not math_problem or math_problem.startswith("Error:"):
@@ -89,7 +87,7 @@ def classify_with_gemini_flash(math_problem):
89
  "response_mime_type": "application/json",
90
  }
91
  )
92
- # Original prompt
93
  prompt = f"""
94
  Task: Classify the following math problem.
95
 
@@ -107,7 +105,7 @@ def classify_with_gemini_flash(math_problem):
107
  try:
108
  cleaned_text = response.text.strip().replace("```json", "").replace("```", "").strip()
109
  classification = json.loads(cleaned_text)
110
- # Basic validation (optional but good)
111
  keys_needed = ["category", "subtopic", "difficulty", "key_concepts"]
112
  for key in keys_needed:
113
  classification.setdefault(key, "Unknown")
@@ -122,7 +120,7 @@ def classify_with_gemini_flash(math_problem):
122
  print(traceback.format_exc())
123
  return { "category": "Error", "subtopic": "API Error", "difficulty": "Error", "key_concepts": [f"Classification failed: {str(e)}"] }
124
 
125
- # Solve the math problem using Gemini model (Original Parameters & Prompt)
126
  def solve_with_gemini_pro(math_problem, classification):
127
  """Solve the math problem using Gemini model (Original settings)"""
128
  if not math_problem or math_problem.startswith("Error:"):
@@ -136,7 +134,7 @@ def solve_with_gemini_pro(math_problem, classification):
136
  "max_output_tokens": 1000, # Original value
137
  }
138
  )
139
- # Original classification handling
140
  if not isinstance(classification, dict):
141
  classification = { "category": "Unknown", "subtopic": "Unknown", "difficulty": "Unknown", "key_concepts": ["Unknown"] }
142
  for field in ["category", "subtopic", "difficulty"]:
@@ -149,7 +147,7 @@ def solve_with_gemini_pro(math_problem, classification):
149
  else:
150
  key_concepts_str = str(classification["key_concepts"])
151
 
152
- # Original prompt
153
  prompt = f"""
154
  Task: Solve the following math problem with clear step-by-step explanations.
155
 
@@ -180,7 +178,7 @@ def solve_with_gemini_pro(math_problem, classification):
180
  print(traceback.format_exc())
181
  return f"Error generating solution: {str(e)}"
182
 
183
- # Explain the solution in more detail (Original Parameters & Prompt)
184
  def explain_solution(math_problem, solution):
185
  """Provide a more detailed explanation of the solution (Original settings)"""
186
  if not math_problem or math_problem.startswith("Error:"): return "Error: Cannot explain problem due to invalid input text."
@@ -195,7 +193,7 @@ def explain_solution(math_problem, solution):
195
  "max_output_tokens": 1500, # Original value
196
  }
197
  )
198
- # Original prompt
199
  prompt = f"""
200
  Task: Provide a more detailed explanation of the solution to this math problem.
201
 
@@ -220,7 +218,7 @@ def explain_solution(math_problem, solution):
220
  print(traceback.format_exc())
221
  return f"Error generating explanation: {str(e)}"
222
 
223
- # Generate similar practice problems (Original Parameters & Prompt)
224
  def generate_similar_problems(math_problem, classification):
225
  """Generate similar practice math problems (Original settings)"""
226
  if not math_problem or math_problem.startswith("Error:"): return "Error: Cannot generate problems due to invalid input text."
@@ -235,9 +233,9 @@ def generate_similar_problems(math_problem, classification):
235
  "max_output_tokens": 1000, # Original value
236
  }
237
  )
238
- # Original classification string preparation
239
  classification_str = json.dumps(classification, indent=2)
240
- # Original prompt
241
  prompt = f"""
242
  Task: Generate similar practice math problems based on the following problem.
243
 
@@ -266,7 +264,7 @@ def generate_similar_problems(math_problem, classification):
266
  return f"Error generating similar problems: {str(e)}"
267
 
268
 
269
- # --- Main Processing Function (No Tesseract, No gr.Progress calls) ---
270
  def process_image(image):
271
  """Main processing pipeline for the NerdAI app (No Tesseract, No Progress)"""
272
  start_time = time.time() # Keep start time
@@ -274,37 +272,37 @@ def process_image(image):
274
  if image is None:
275
  return None, "Error: No image uploaded.", "{}", "Error: No image uploaded.", "", "{}"
276
 
277
- # Use print instead of progress
278
  print("πŸš€ Starting processing...")
279
  # time.sleep(0.5) # Removed sleep associated with progress update
280
 
281
- # Step 1: Extract text
282
- print("πŸ” Extracting text with Gemini...") # Use print
283
- extracted_text = extract_text_with_gemini(image) # Calls updated function
284
 
285
  if extracted_text.startswith("Error:"):
286
  print(f"Text extraction failed: {extracted_text}")
287
- # Ensure image is PIL before returning if possible
288
  img_display = None
289
  if image is not None:
290
  try:
291
  img_display = image if isinstance(image, Image.Image) else Image.fromarray(image)
292
- except Exception: pass # Ignore conversion error on error path
293
  return img_display, extracted_text, "{}", extracted_text, "", "{}"
294
 
295
- # Step 2: Classify
296
- print(f"πŸ“Š Classifying problem ({CLASSIFICATION_MODEL})...") # Use print
297
- classification = classify_with_gemini_flash(extracted_text) # Uses original settings
298
  classification_json = json.dumps(classification, indent=2)
299
 
300
- # Step 3: Solve
301
- print(f"πŸ’‘ Solving problem ({SOLUTION_MODEL})...") # Use print
302
- solution = solve_with_gemini_pro(extracted_text, classification) # Uses original settings
303
 
304
  end_time = time.time() # Keep end time
305
- print(f"βœ… Done in {end_time - start_time:.2f}s!") # Use print for final status
306
 
307
- # Return processed image (or original), text, classification, solution, and update state
308
  img_display = image if isinstance(image, Image.Image) else Image.fromarray(image)
309
  return img_display, extracted_text, classification_json, solution, extracted_text, classification_json
310
 
@@ -320,9 +318,7 @@ def process_image(image):
320
  return img_display, error_message, "{}", error_message, "", "{}"
321
 
322
 
323
- # --- Gradio Interface (Modern UI, No gr.Box) ---
324
 
325
- # Custom CSS (Kept as is)
326
  css = """
327
  body { font-family: 'Inter', sans-serif; }
328
  .gradio-container { background-color: #f8f9fa; }
@@ -343,7 +339,7 @@ body { font-family: 'Inter', sans-serif; }
343
  footer { visibility: hidden }
344
  """
345
 
346
- # Define a theme (Kept as is)
347
  theme = gr_themes.Default(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.sky).set(
348
  button_primary_background_fill="#4A90E2", button_primary_background_fill_hover="#357ABD",
349
  button_secondary_background_fill="#E1E8ED", button_secondary_background_fill_hover="#CED9E0",
@@ -353,11 +349,11 @@ theme = gr_themes.Default(primary_hue=gr.themes.colors.blue, secondary_hue=gr.th
353
 
354
  with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
355
 
356
- # --- State Variables ---
357
  extracted_text_state = gr.State("")
358
  classification_state = gr.State("{}")
359
 
360
- # --- UI Layout ---
361
  gr.Markdown("# 🧠 NerdAI Math Problem Solver", elem_id="title_markdown")
362
  gr.Markdown("Upload a clear image of a math problem. NerdAI will extract the text, classify it, solve it step-by-step, and offer further help!", elem_id="subtitle_markdown")
363
 
@@ -368,27 +364,25 @@ with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
368
  with gr.Column(scale=1, elem_id="output_col"):
369
  processed_image = gr.Image(label="Processed Image", interactive=False, elem_id="processed_image", height=350)
370
 
371
- # --- Results Area (No gr.Box) ---
372
  with gr.Group(elem_id="results_group"):
373
  gr.Markdown("### Results")
374
  extracted_text_output = gr.Textbox(label="πŸ“ Extracted Text", lines=3, interactive=False, placeholder="Text from the image will appear here...", elem_id="extracted_text_output")
375
  classification_output = gr.Textbox(label=f"πŸ“Š Problem Classification ({CLASSIFICATION_MODEL})", lines=5, interactive=False, placeholder="Problem type analysis will appear here...", elem_id="classification_output")
376
  solution_output = gr.Markdown(label="βœ… Solution Steps", value="*Solution steps will appear here after processing...*", elem_id="solution_output")
377
 
378
- # --- Action Buttons ---
379
  with gr.Row(elem_id="action_buttons"):
380
  explain_btn = gr.Button("πŸ€” Explain Further", variant="secondary")
381
  similar_btn = gr.Button("πŸ“š Similar Questions", variant="secondary")
382
 
383
- # --- Accordion for Detailed Outputs ---
384
  with gr.Accordion("Detailed Explanation", open=False):
385
  explanation_output = gr.Markdown(value="*Click 'Explain Further' above to get a detailed breakdown.*")
386
  with gr.Accordion("Similar Practice Problems", open=False):
387
  similar_problems_output = gr.Markdown(value="*Click 'Similar Questions' above to generate practice problems.*")
388
 
389
- # --- Event Handlers (Connecting UI to Original Backend Logic) ---
390
-
391
- # Main process button click (No progress tracking passed)
392
  process_btn.click(
393
  fn=process_image,
394
  inputs=[input_image],
@@ -398,11 +392,10 @@ with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
398
  ],
399
  )
400
 
401
- # Explain button click handler (Uses original explain_solution)
402
- # Using yield for feedback requires queue enabled
403
  def explain_button_handler(current_problem_text, current_solution_md):
404
  print("Explain button clicked.")
405
- # Input validation remains important
406
  if not current_problem_text or current_problem_text.startswith("Error:") : yield "Please process an image successfully first." ; return
407
  if not current_solution_md or current_solution_md.startswith("Error:") or "will appear here" in current_solution_md: yield "A valid solution needs to be generated first." ; return
408
 
@@ -416,14 +409,13 @@ with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
416
  outputs=explanation_output
417
  )
418
 
419
- # Similar problems button click handler (Uses original generate_similar_problems)
420
- # Using yield for feedback requires queue enabled
421
  def similar_button_handler(current_problem_text, current_classification_json):
422
  print("Similar button clicked.")
423
- # Input validation
424
  if not current_problem_text or current_problem_text.startswith("Error:") : yield "Please process an image successfully first." ; return
425
 
426
- yield "*Generating similar problems... please wait.*" # Provide feedback
427
  classification_dict = {}
428
  try:
429
  if isinstance(current_classification_json, str) and current_classification_json.strip():
@@ -437,7 +429,7 @@ with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
437
  except (json.JSONDecodeError, ValueError, TypeError) as e:
438
  print(f"Error parsing/validating classification state: {e}")
439
  yield f"Error: Could not use classification data ({e}). Cannot generate similar problems."
440
- return # Stop execution
441
 
442
  similar_result = generate_similar_problems(current_problem_text, classification_dict)
443
  yield similar_result
@@ -448,21 +440,11 @@ with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
448
  outputs=similar_problems_output
449
  )
450
 
451
- # Example Images (Kept as is)
452
- gr.Examples(
453
- examples=[
454
- ["examples/algebra_problem.png"],
455
- ["examples/calculus_problem.jpg"],
456
- ["examples/geometry_problem.png"],
457
- ],
458
- inputs=input_image,
459
- cache_examples=False,
460
- label="Example Math Problems"
461
- )
462
 
463
- # --- Launch the App ---
464
  if __name__ == "__main__":
465
- # Create dummy example files (Kept as is)
466
  if not os.path.exists("examples"): os.makedirs("examples")
467
  for fname in ["algebra_problem.png", "calculus_problem.jpg", "geometry_problem.png"]:
468
  fpath = os.path.join("examples", fname)
@@ -474,5 +456,5 @@ if __name__ == "__main__":
474
  print(f"Created placeholder example: {fpath}")
475
  except Exception as e: print(f"Could not create placeholder image {fpath}: {e}")
476
 
477
- # Enable queue for better handling of multiple users and yield feedback
478
- demo.queue().launch(debug=True) # Set debug=False for production
 
1
  import os
2
  import json
3
  import gradio as gr
4
+ import gradio.themes as gr_themes
5
  import google.generativeai as genai
6
  from PIL import Image
7
  import numpy as np
8
  from huggingface_hub import HfFolder
9
  from dotenv import load_dotenv
10
  import traceback
11
+ import time
12
+
13
 
 
14
  load_dotenv()
15
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") or HfFolder.get_token("GEMINI_API_KEY")
16
  if not GEMINI_API_KEY:
 
25
 
26
  genai.configure(api_key=GEMINI_API_KEY)
27
 
28
+
29
  CLASSIFICATION_MODEL = "gemini-1.5-flash"
30
  SOLUTION_MODEL = "gemini-1.5-pro-latest"
31
  EXPLANATION_MODEL = "gemini-1.5-pro-latest"
32
  SIMILAR_MODEL = "gemini-1.5-pro-latest"
33
+ MODEL_IMAGE = "gemini-1.5-pro-latest"
34
 
35
  print(f"Using models: Classification: {CLASSIFICATION_MODEL}, Solution: {SOLUTION_MODEL}, Explanation: {EXPLANATION_MODEL}, Similar: {SIMILAR_MODEL}, Image Analysis: {MODEL_IMAGE}")
36
 
 
 
37
 
38
 
 
39
 
40
+
41
+
42
  def extract_text_with_gemini(image):
43
  """Extract text from image using Gemini Pro Vision ONLY"""
44
  try:
 
49
 
50
  print("Attempting text extraction with Gemini Pro Vision...")
51
  model = genai.GenerativeModel(MODEL_IMAGE)
52
+
53
  prompt = """
54
  Extract ALL text, numbers, and mathematical equations from this image precisely.
55
  Include ALL symbols, numbers, letters, and mathematical notation exactly as they appear.
 
71
  print(traceback.format_exc())
72
  return f"Error during text extraction with Gemini: {str(e)}"
73
 
74
+
75
  def classify_with_gemini_flash(math_problem):
76
  """Classify the math problem using Gemini model (Original settings)"""
77
  if not math_problem or math_problem.startswith("Error:"):
 
87
  "response_mime_type": "application/json",
88
  }
89
  )
90
+
91
  prompt = f"""
92
  Task: Classify the following math problem.
93
 
 
105
  try:
106
  cleaned_text = response.text.strip().replace("```json", "").replace("```", "").strip()
107
  classification = json.loads(cleaned_text)
108
+
109
  keys_needed = ["category", "subtopic", "difficulty", "key_concepts"]
110
  for key in keys_needed:
111
  classification.setdefault(key, "Unknown")
 
120
  print(traceback.format_exc())
121
  return { "category": "Error", "subtopic": "API Error", "difficulty": "Error", "key_concepts": [f"Classification failed: {str(e)}"] }
122
 
123
+
124
  def solve_with_gemini_pro(math_problem, classification):
125
  """Solve the math problem using Gemini model (Original settings)"""
126
  if not math_problem or math_problem.startswith("Error:"):
 
134
  "max_output_tokens": 1000, # Original value
135
  }
136
  )
137
+
138
  if not isinstance(classification, dict):
139
  classification = { "category": "Unknown", "subtopic": "Unknown", "difficulty": "Unknown", "key_concepts": ["Unknown"] }
140
  for field in ["category", "subtopic", "difficulty"]:
 
147
  else:
148
  key_concepts_str = str(classification["key_concepts"])
149
 
150
+
151
  prompt = f"""
152
  Task: Solve the following math problem with clear step-by-step explanations.
153
 
 
178
  print(traceback.format_exc())
179
  return f"Error generating solution: {str(e)}"
180
 
181
+
182
  def explain_solution(math_problem, solution):
183
  """Provide a more detailed explanation of the solution (Original settings)"""
184
  if not math_problem or math_problem.startswith("Error:"): return "Error: Cannot explain problem due to invalid input text."
 
193
  "max_output_tokens": 1500, # Original value
194
  }
195
  )
196
+
197
  prompt = f"""
198
  Task: Provide a more detailed explanation of the solution to this math problem.
199
 
 
218
  print(traceback.format_exc())
219
  return f"Error generating explanation: {str(e)}"
220
 
221
+
222
  def generate_similar_problems(math_problem, classification):
223
  """Generate similar practice math problems (Original settings)"""
224
  if not math_problem or math_problem.startswith("Error:"): return "Error: Cannot generate problems due to invalid input text."
 
233
  "max_output_tokens": 1000, # Original value
234
  }
235
  )
236
+
237
  classification_str = json.dumps(classification, indent=2)
238
+
239
  prompt = f"""
240
  Task: Generate similar practice math problems based on the following problem.
241
 
 
264
  return f"Error generating similar problems: {str(e)}"
265
 
266
 
267
+
268
  def process_image(image):
269
  """Main processing pipeline for the NerdAI app (No Tesseract, No Progress)"""
270
  start_time = time.time() # Keep start time
 
272
  if image is None:
273
  return None, "Error: No image uploaded.", "{}", "Error: No image uploaded.", "", "{}"
274
 
275
+
276
  print("πŸš€ Starting processing...")
277
  # time.sleep(0.5) # Removed sleep associated with progress update
278
 
279
+
280
+ print("πŸ” Extracting text with Gemini...")
281
+ extracted_text = extract_text_with_gemini(image)
282
 
283
  if extracted_text.startswith("Error:"):
284
  print(f"Text extraction failed: {extracted_text}")
285
+
286
  img_display = None
287
  if image is not None:
288
  try:
289
  img_display = image if isinstance(image, Image.Image) else Image.fromarray(image)
290
+ except Exception: pass
291
  return img_display, extracted_text, "{}", extracted_text, "", "{}"
292
 
293
+
294
+ print(f"πŸ“Š Classifying problem ({CLASSIFICATION_MODEL})...")
295
+ classification = classify_with_gemini_flash(extracted_text)
296
  classification_json = json.dumps(classification, indent=2)
297
 
298
+
299
+ print(f"πŸ’‘ Solving problem ({SOLUTION_MODEL})...")
300
+ solution = solve_with_gemini_pro(extracted_text, classification)
301
 
302
  end_time = time.time() # Keep end time
303
+ print(f"βœ… Done in {end_time - start_time:.2f}s!")
304
 
305
+
306
  img_display = image if isinstance(image, Image.Image) else Image.fromarray(image)
307
  return img_display, extracted_text, classification_json, solution, extracted_text, classification_json
308
 
 
318
  return img_display, error_message, "{}", error_message, "", "{}"
319
 
320
 
 
321
 
 
322
  css = """
323
  body { font-family: 'Inter', sans-serif; }
324
  .gradio-container { background-color: #f8f9fa; }
 
339
  footer { visibility: hidden }
340
  """
341
 
342
+
343
  theme = gr_themes.Default(primary_hue=gr.themes.colors.blue, secondary_hue=gr.themes.colors.sky).set(
344
  button_primary_background_fill="#4A90E2", button_primary_background_fill_hover="#357ABD",
345
  button_secondary_background_fill="#E1E8ED", button_secondary_background_fill_hover="#CED9E0",
 
349
 
350
  with gr.Blocks(theme=theme, css=css, title="NerdAI Math Solver") as demo:
351
 
352
+
353
  extracted_text_state = gr.State("")
354
  classification_state = gr.State("{}")
355
 
356
+
357
  gr.Markdown("# 🧠 NerdAI Math Problem Solver", elem_id="title_markdown")
358
  gr.Markdown("Upload a clear image of a math problem. NerdAI will extract the text, classify it, solve it step-by-step, and offer further help!", elem_id="subtitle_markdown")
359
 
 
364
  with gr.Column(scale=1, elem_id="output_col"):
365
  processed_image = gr.Image(label="Processed Image", interactive=False, elem_id="processed_image", height=350)
366
 
367
+
368
  with gr.Group(elem_id="results_group"):
369
  gr.Markdown("### Results")
370
  extracted_text_output = gr.Textbox(label="πŸ“ Extracted Text", lines=3, interactive=False, placeholder="Text from the image will appear here...", elem_id="extracted_text_output")
371
  classification_output = gr.Textbox(label=f"πŸ“Š Problem Classification ({CLASSIFICATION_MODEL})", lines=5, interactive=False, placeholder="Problem type analysis will appear here...", elem_id="classification_output")
372
  solution_output = gr.Markdown(label="βœ… Solution Steps", value="*Solution steps will appear here after processing...*", elem_id="solution_output")
373
 
374
+
375
  with gr.Row(elem_id="action_buttons"):
376
  explain_btn = gr.Button("πŸ€” Explain Further", variant="secondary")
377
  similar_btn = gr.Button("πŸ“š Similar Questions", variant="secondary")
378
 
379
+
380
  with gr.Accordion("Detailed Explanation", open=False):
381
  explanation_output = gr.Markdown(value="*Click 'Explain Further' above to get a detailed breakdown.*")
382
  with gr.Accordion("Similar Practice Problems", open=False):
383
  similar_problems_output = gr.Markdown(value="*Click 'Similar Questions' above to generate practice problems.*")
384
 
385
+
 
 
386
  process_btn.click(
387
  fn=process_image,
388
  inputs=[input_image],
 
392
  ],
393
  )
394
 
395
+
 
396
  def explain_button_handler(current_problem_text, current_solution_md):
397
  print("Explain button clicked.")
398
+
399
  if not current_problem_text or current_problem_text.startswith("Error:") : yield "Please process an image successfully first." ; return
400
  if not current_solution_md or current_solution_md.startswith("Error:") or "will appear here" in current_solution_md: yield "A valid solution needs to be generated first." ; return
401
 
 
409
  outputs=explanation_output
410
  )
411
 
412
+
 
413
  def similar_button_handler(current_problem_text, current_classification_json):
414
  print("Similar button clicked.")
415
+
416
  if not current_problem_text or current_problem_text.startswith("Error:") : yield "Please process an image successfully first." ; return
417
 
418
+ yield "*Generating similar problems... please wait.*"
419
  classification_dict = {}
420
  try:
421
  if isinstance(current_classification_json, str) and current_classification_json.strip():
 
429
  except (json.JSONDecodeError, ValueError, TypeError) as e:
430
  print(f"Error parsing/validating classification state: {e}")
431
  yield f"Error: Could not use classification data ({e}). Cannot generate similar problems."
432
+ return
433
 
434
  similar_result = generate_similar_problems(current_problem_text, classification_dict)
435
  yield similar_result
 
440
  outputs=similar_problems_output
441
  )
442
 
443
+
444
+
 
 
 
 
 
 
 
 
 
445
 
 
446
  if __name__ == "__main__":
447
+
448
  if not os.path.exists("examples"): os.makedirs("examples")
449
  for fname in ["algebra_problem.png", "calculus_problem.jpg", "geometry_problem.png"]:
450
  fpath = os.path.join("examples", fname)
 
456
  print(f"Created placeholder example: {fpath}")
457
  except Exception as e: print(f"Could not create placeholder image {fpath}: {e}")
458
 
459
+
460
+ demo.queue().launch(debug=True)