Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -9,25 +9,28 @@ import json
|
|
9 |
# Load the model
|
10 |
try:
|
11 |
model = load_model('wound_classifier_model_googlenet.h5')
|
|
|
12 |
except Exception as e:
|
13 |
-
raise RuntimeError(f"
|
14 |
|
15 |
# OpenRouter.ai Configuration
|
16 |
OPENROUTER_API_KEY = "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"
|
17 |
OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions"
|
18 |
-
MODEL_NAME = "mistralai/mistral-
|
19 |
|
20 |
input_shape = (224, 224, 3)
|
21 |
|
22 |
def preprocess_image(image, target_size):
|
23 |
"""Preprocess the input image for the model."""
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
31 |
|
32 |
def get_medical_guidelines(wound_type):
|
33 |
"""Fetch medical guidelines using OpenRouter.ai's API."""
|
@@ -39,20 +42,37 @@ def get_medical_guidelines(wound_type):
|
|
39 |
}
|
40 |
|
41 |
prompt = f"""As a medical professional, provide detailed guidelines for treating a {wound_type} wound.
|
42 |
-
Include
|
|
|
|
|
|
|
|
|
43 |
|
44 |
data = {
|
45 |
"model": MODEL_NAME,
|
46 |
"messages": [{"role": "user", "content": prompt}],
|
47 |
-
"temperature": 0.
|
48 |
}
|
49 |
|
50 |
try:
|
51 |
-
|
|
|
52 |
response.raise_for_status()
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
except Exception as e:
|
55 |
-
|
|
|
56 |
|
57 |
def predict(image):
|
58 |
"""Main prediction function."""
|
@@ -60,38 +80,51 @@ def predict(image):
|
|
60 |
# Preprocess image
|
61 |
input_data = preprocess_image(image, (input_shape[0], input_shape[1]))
|
62 |
input_data = np.expand_dims(input_data, axis=0)
|
|
|
63 |
|
64 |
# Load class labels
|
65 |
-
|
66 |
-
|
|
|
|
|
|
|
|
|
67 |
|
|
|
68 |
if len(class_labels) != model.output_shape[-1]:
|
69 |
-
raise ValueError("
|
70 |
|
71 |
# Make prediction
|
72 |
predictions = model.predict(input_data)
|
73 |
-
|
|
|
|
|
|
|
74 |
predicted_class = max(results, key=results.get)
|
|
|
75 |
|
76 |
# Get medical guidelines
|
77 |
guidelines = get_medical_guidelines(predicted_class)
|
|
|
78 |
|
79 |
-
return results, guidelines
|
80 |
|
81 |
except Exception as e:
|
82 |
-
|
|
|
83 |
|
84 |
# Gradio Interface
|
85 |
iface = gr.Interface(
|
86 |
fn=predict,
|
87 |
-
inputs=gr.Image(type="pil"),
|
88 |
outputs=[
|
89 |
gr.Label(num_top_classes=3, label="Classification Results"),
|
90 |
-
gr.
|
91 |
],
|
92 |
-
live=
|
93 |
title="Wound Classification & Treatment Advisor",
|
94 |
-
description="Upload a wound image for classification and
|
|
|
95 |
)
|
96 |
|
97 |
iface.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
9 |
# Load the model
|
10 |
try:
|
11 |
model = load_model('wound_classifier_model_googlenet.h5')
|
12 |
+
print("✅ Model loaded successfully")
|
13 |
except Exception as e:
|
14 |
+
raise RuntimeError(f"❌ Model loading failed: {e}")
|
15 |
|
16 |
# OpenRouter.ai Configuration
|
17 |
OPENROUTER_API_KEY = "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"
|
18 |
OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions"
|
19 |
+
MODEL_NAME = "mistralai/mistral-7b-instruct" # Updated model name
|
20 |
|
21 |
input_shape = (224, 224, 3)
|
22 |
|
23 |
def preprocess_image(image, target_size):
|
24 |
"""Preprocess the input image for the model."""
|
25 |
+
try:
|
26 |
+
if image is None:
|
27 |
+
raise ValueError("No image provided")
|
28 |
+
image = image.convert("RGB")
|
29 |
+
image = image.resize(target_size)
|
30 |
+
return np.array(image) / 255.0
|
31 |
+
except Exception as e:
|
32 |
+
print(f"⚠️ Image preprocessing error: {e}")
|
33 |
+
raise
|
34 |
|
35 |
def get_medical_guidelines(wound_type):
|
36 |
"""Fetch medical guidelines using OpenRouter.ai's API."""
|
|
|
42 |
}
|
43 |
|
44 |
prompt = f"""As a medical professional, provide detailed guidelines for treating a {wound_type} wound.
|
45 |
+
Include:
|
46 |
+
1. First aid steps
|
47 |
+
2. Precautions
|
48 |
+
3. When to seek professional help
|
49 |
+
Output in markdown with clear sections."""
|
50 |
|
51 |
data = {
|
52 |
"model": MODEL_NAME,
|
53 |
"messages": [{"role": "user", "content": prompt}],
|
54 |
+
"temperature": 0.5
|
55 |
}
|
56 |
|
57 |
try:
|
58 |
+
print(f"🚀 Sending request to OpenRouter API for {wound_type}...")
|
59 |
+
response = requests.post(OPENROUTER_API_URL, headers=headers, json=data, timeout=10)
|
60 |
response.raise_for_status()
|
61 |
+
|
62 |
+
response_json = response.json()
|
63 |
+
print("🔧 Raw API response:", json.dumps(response_json, indent=2))
|
64 |
+
|
65 |
+
if "choices" not in response_json:
|
66 |
+
return "⚠️ API response format unexpected. Please check logs."
|
67 |
+
|
68 |
+
return response_json["choices"][0]["message"]["content"]
|
69 |
+
|
70 |
+
except requests.exceptions.HTTPError as e:
|
71 |
+
print(f"❌ HTTP Error: {e.response.status_code} - {e.response.text}")
|
72 |
+
return f"API Error: {e.response.status_code} - Check console for details"
|
73 |
except Exception as e:
|
74 |
+
print(f"⚠️ General API error: {str(e)}")
|
75 |
+
return f"Error: {str(e)}"
|
76 |
|
77 |
def predict(image):
|
78 |
"""Main prediction function."""
|
|
|
80 |
# Preprocess image
|
81 |
input_data = preprocess_image(image, (input_shape[0], input_shape[1]))
|
82 |
input_data = np.expand_dims(input_data, axis=0)
|
83 |
+
print("🖼️ Image preprocessed successfully")
|
84 |
|
85 |
# Load class labels
|
86 |
+
try:
|
87 |
+
with open('classes.txt', 'r') as file:
|
88 |
+
class_labels = file.read().splitlines()
|
89 |
+
print("📋 Class labels loaded:", class_labels)
|
90 |
+
except Exception as e:
|
91 |
+
raise RuntimeError(f"Class labels loading failed: {e}")
|
92 |
|
93 |
+
# Verify model compatibility
|
94 |
if len(class_labels) != model.output_shape[-1]:
|
95 |
+
raise ValueError(f"Model expects {model.output_shape[-1]} classes, found {len(class_labels)}")
|
96 |
|
97 |
# Make prediction
|
98 |
predictions = model.predict(input_data)
|
99 |
+
print("📊 Raw predictions:", predictions)
|
100 |
+
|
101 |
+
results = {class_labels[i]: float(predictions[0][i])
|
102 |
+
for i in range(len(class_labels))}
|
103 |
predicted_class = max(results, key=results.get)
|
104 |
+
print(f"🏆 Predicted class: {predicted_class}")
|
105 |
|
106 |
# Get medical guidelines
|
107 |
guidelines = get_medical_guidelines(predicted_class)
|
108 |
+
print("📜 Guidelines generated successfully")
|
109 |
|
110 |
+
return results, guidelines
|
111 |
|
112 |
except Exception as e:
|
113 |
+
print(f"🔥 Critical error in prediction: {str(e)}")
|
114 |
+
return {"Error": str(e)}, ""
|
115 |
|
116 |
# Gradio Interface
|
117 |
iface = gr.Interface(
|
118 |
fn=predict,
|
119 |
+
inputs=gr.Image(type="pil", label="Upload Wound Image"),
|
120 |
outputs=[
|
121 |
gr.Label(num_top_classes=3, label="Classification Results"),
|
122 |
+
gr.Markdown(label="Medical Guidelines")
|
123 |
],
|
124 |
+
live=False,
|
125 |
title="Wound Classification & Treatment Advisor",
|
126 |
+
description="Upload a wound image for AI-powered classification and treatment guidelines.",
|
127 |
+
allow_flagging="never"
|
128 |
)
|
129 |
|
130 |
iface.launch(server_name="0.0.0.0", server_port=7860)
|