Spaces:
Runtime error
Runtime error
Yohan Runhaar
commited on
Commit
Β·
548fbd0
1
Parent(s):
0340c57
Initial commit for Coral AI Demo
Browse files- .idea/workspace.xml +1 -1
- README.md +5 -4
- app.py +63 -53
.idea/workspace.xml
CHANGED
@@ -42,7 +42,7 @@
|
|
42 |
<option name="number" value="Default" />
|
43 |
<option name="presentableId" value="Default" />
|
44 |
<updated>1733405525071</updated>
|
45 |
-
<workItem from="1733405526612" duration="
|
46 |
</task>
|
47 |
<servers />
|
48 |
</component>
|
|
|
42 |
<option name="number" value="Default" />
|
43 |
<option name="presentableId" value="Default" />
|
44 |
<updated>1733405525071</updated>
|
45 |
+
<workItem from="1733405526612" duration="1615000" />
|
46 |
</task>
|
47 |
<servers />
|
48 |
</component>
|
README.md
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
---
|
2 |
-
title: Coral
|
3 |
emoji: π
|
4 |
colorFrom: blue
|
5 |
colorTo: green
|
6 |
-
sdk:
|
7 |
-
|
|
|
8 |
license: mit
|
9 |
-
short_description:
|
10 |
---
|
11 |
|
12 |
# Coral AI Demo
|
|
|
1 |
---
|
2 |
+
title: Coral AI Demo
|
3 |
emoji: π
|
4 |
colorFrom: blue
|
5 |
colorTo: green
|
6 |
+
sdk: gradio
|
7 |
+
app_file: app.py
|
8 |
+
pinned: true
|
9 |
license: mit
|
10 |
+
short_description: Interact with Coral AI models for image analysis and segmentation.
|
11 |
---
|
12 |
|
13 |
# Coral AI Demo
|
app.py
CHANGED
@@ -1,73 +1,83 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
import requests
|
4 |
import os
|
|
|
5 |
|
6 |
-
# Hugging Face
|
7 |
-
|
8 |
-
|
9 |
-
# Available models
|
10 |
-
MODEL_VERSIONS = [
|
11 |
"yolov8_xlarge_latest.pt",
|
12 |
"yolov8_xlarge_v1.pt",
|
13 |
-
"yolov8_xlarge_v2.pt"
|
14 |
]
|
15 |
|
|
|
|
|
|
|
|
|
16 |
|
17 |
-
# Download
|
18 |
-
|
19 |
-
|
|
|
20 |
if not os.path.exists(model_path):
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
response = requests.get(url, stream=True)
|
25 |
-
with open(model_path, "wb") as f:
|
26 |
-
for chunk in response.iter_content(chunk_size=8192):
|
27 |
-
if chunk:
|
28 |
-
f.write(chunk)
|
29 |
-
print(f"Downloaded {version} to {model_path}")
|
30 |
-
return model_path
|
31 |
|
|
|
|
|
32 |
|
33 |
-
# Load the model
|
34 |
-
def load_model(version):
|
35 |
-
model_path = download_model(version)
|
36 |
-
model = torch.load(model_path, map_location=torch.device("cpu"))
|
37 |
-
model.eval()
|
38 |
-
return model
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
-
# Perform inference
|
42 |
-
|
43 |
-
try:
|
44 |
-
model = load_model(version)
|
45 |
-
# Placeholder logic: replace with actual model inference
|
46 |
-
return f"Model '{version}' processed the uploaded image successfully."
|
47 |
-
except Exception as e:
|
48 |
-
return f"Error during inference: {str(e)}"
|
49 |
|
|
|
|
|
|
|
50 |
|
51 |
-
# Set up Gradio interface
|
52 |
-
with gr.Blocks() as demo:
|
53 |
-
gr.Markdown("# Coral AI Demo")
|
54 |
-
gr.Markdown("Select a model version and upload an image for inference.")
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
62 |
|
63 |
-
|
64 |
-
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
)
|
72 |
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
|
|
3 |
import os
|
4 |
+
from ultralytics import YOLO
|
5 |
|
6 |
+
# Coral AI model files hosted in the Hugging Face model repository
|
7 |
+
model_names = [
|
|
|
|
|
|
|
8 |
"yolov8_xlarge_latest.pt",
|
9 |
"yolov8_xlarge_v1.pt",
|
10 |
+
"yolov8_xlarge_v2.pt",
|
11 |
]
|
12 |
|
13 |
+
# Set the initial model
|
14 |
+
current_model_name = "yolov8_xlarge_latest.pt"
|
15 |
+
model_dir = "models"
|
16 |
+
os.makedirs(model_dir, exist_ok=True)
|
17 |
|
18 |
+
# Download models if not already present locally
|
19 |
+
HF_MODELS_REPO = "https://huggingface.co/reefsupport/coral-ai/resolve/main/models"
|
20 |
+
for model_name in model_names:
|
21 |
+
model_path = os.path.join(model_dir, model_name)
|
22 |
if not os.path.exists(model_path):
|
23 |
+
print(f"Downloading {model_name}...")
|
24 |
+
model_url = f"{HF_MODELS_REPO}/{model_name}"
|
25 |
+
torch.hub.download_url_to_file(model_url, model_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
+
# Load the initial model
|
28 |
+
model = YOLO(os.path.join(model_dir, current_model_name))
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
+
def coral_ai_inference(image: str, model_name: str):
|
32 |
+
"""
|
33 |
+
Coral AI inference function
|
34 |
+
Args:
|
35 |
+
image: Input image filepath
|
36 |
+
model_name: Name of the model
|
37 |
+
Returns:
|
38 |
+
Rendered image
|
39 |
+
"""
|
40 |
+
global model
|
41 |
+
global current_model_name
|
42 |
+
if model_name != current_model_name:
|
43 |
+
model = YOLO(os.path.join(model_dir, model_name))
|
44 |
+
current_model_name = model_name
|
45 |
|
46 |
+
# Perform inference
|
47 |
+
results = model.predict(image, return_outputs=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
+
# Render results (placeholder logic)
|
50 |
+
rendered_image = results[0].plot() # Visualization of predictions
|
51 |
+
return rendered_image
|
52 |
|
|
|
|
|
|
|
|
|
53 |
|
54 |
+
# Define Gradio inputs and outputs
|
55 |
+
inputs = [
|
56 |
+
gr.Image(type="filepath", label="Input Image"),
|
57 |
+
gr.Dropdown(
|
58 |
+
model_names,
|
59 |
+
value=current_model_name,
|
60 |
+
label="Model Type",
|
61 |
+
),
|
62 |
+
]
|
63 |
|
64 |
+
outputs = gr.Image(type="filepath", label="Output Image")
|
65 |
+
title = "Coral AI YOLOv8 Segmentation Demo"
|
66 |
|
67 |
+
examples = [
|
68 |
+
["examples/coral_image1.jpg", "yolov8_xlarge_latest.pt"],
|
69 |
+
["examples/coral_image2.jpg", "yolov8_xlarge_latest.pt"],
|
70 |
+
["examples/coral_image3.jpg", "yolov8_xlarge_latest.pt"],
|
71 |
+
]
|
|
|
72 |
|
73 |
+
# Create and launch the Gradio interface
|
74 |
+
demo_app = gr.Interface(
|
75 |
+
fn=coral_ai_inference,
|
76 |
+
inputs=inputs,
|
77 |
+
outputs=outputs,
|
78 |
+
title=title,
|
79 |
+
examples=examples,
|
80 |
+
cache_examples=True,
|
81 |
+
theme="default",
|
82 |
+
)
|
83 |
+
demo_app.queue().launch(debug=True)
|