Yohan Runhaar commited on
Commit
548fbd0
Β·
1 Parent(s): 0340c57

Initial commit for Coral AI Demo

Browse files
Files changed (3) hide show
  1. .idea/workspace.xml +1 -1
  2. README.md +5 -4
  3. app.py +63 -53
.idea/workspace.xml CHANGED
@@ -42,7 +42,7 @@
42
  <option name="number" value="Default" />
43
  <option name="presentableId" value="Default" />
44
  <updated>1733405525071</updated>
45
- <workItem from="1733405526612" duration="1184000" />
46
  </task>
47
  <servers />
48
  </component>
 
42
  <option name="number" value="Default" />
43
  <option name="presentableId" value="Default" />
44
  <updated>1733405525071</updated>
45
+ <workItem from="1733405526612" duration="1615000" />
46
  </task>
47
  <servers />
48
  </component>
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Coral Ai Demo
3
  emoji: 🐠
4
  colorFrom: blue
5
  colorTo: green
6
- sdk: static
7
- pinned: false
 
8
  license: mit
9
- short_description: Demo Spaces for our Coral AI Models
10
  ---
11
 
12
  # Coral AI Demo
 
1
  ---
2
+ title: Coral AI Demo
3
  emoji: 🐠
4
  colorFrom: blue
5
  colorTo: green
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: true
9
  license: mit
10
+ short_description: Interact with Coral AI models for image analysis and segmentation.
11
  ---
12
 
13
  # Coral AI Demo
app.py CHANGED
@@ -1,73 +1,83 @@
1
  import gradio as gr
2
  import torch
3
- import requests
4
  import os
 
5
 
6
- # Hugging Face models repository URL
7
- HF_MODELS_REPO = "https://huggingface.co/reefsupport/coral-ai/resolve/main/models"
8
-
9
- # Available models
10
- MODEL_VERSIONS = [
11
  "yolov8_xlarge_latest.pt",
12
  "yolov8_xlarge_v1.pt",
13
- "yolov8_xlarge_v2.pt"
14
  ]
15
 
 
 
 
 
16
 
17
- # Download the selected model if not present locally
18
- def download_model(version):
19
- model_path = f"models/{version}"
 
20
  if not os.path.exists(model_path):
21
- os.makedirs("models", exist_ok=True)
22
- url = f"{HF_MODELS_REPO}/{version}"
23
- print(f"Downloading {version} from {url}...")
24
- response = requests.get(url, stream=True)
25
- with open(model_path, "wb") as f:
26
- for chunk in response.iter_content(chunk_size=8192):
27
- if chunk:
28
- f.write(chunk)
29
- print(f"Downloaded {version} to {model_path}")
30
- return model_path
31
 
 
 
32
 
33
- # Load the model
34
- def load_model(version):
35
- model_path = download_model(version)
36
- model = torch.load(model_path, map_location=torch.device("cpu"))
37
- model.eval()
38
- return model
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- # Perform inference
42
- def infer(version, image):
43
- try:
44
- model = load_model(version)
45
- # Placeholder logic: replace with actual model inference
46
- return f"Model '{version}' processed the uploaded image successfully."
47
- except Exception as e:
48
- return f"Error during inference: {str(e)}"
49
 
 
 
 
50
 
51
- # Set up Gradio interface
52
- with gr.Blocks() as demo:
53
- gr.Markdown("# Coral AI Demo")
54
- gr.Markdown("Select a model version and upload an image for inference.")
55
 
56
- with gr.Row():
57
- version_dropdown = gr.Dropdown(choices=MODEL_VERSIONS, label="Select Model Version")
58
- image_input = gr.Image(type="filepath", label="Upload Image")
59
-
60
- with gr.Row():
61
- result_output = gr.Textbox(label="Inference Result")
 
 
 
62
 
63
- with gr.Row():
64
- run_button = gr.Button("Run Model")
65
 
66
- # Link components
67
- run_button.click(
68
- fn=infer,
69
- inputs=[version_dropdown, image_input],
70
- outputs=result_output
71
- )
72
 
73
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
 
3
  import os
4
+ from ultralytics import YOLO
5
 
6
+ # Coral AI model files hosted in the Hugging Face model repository
7
+ model_names = [
 
 
 
8
  "yolov8_xlarge_latest.pt",
9
  "yolov8_xlarge_v1.pt",
10
+ "yolov8_xlarge_v2.pt",
11
  ]
12
 
13
+ # Set the initial model
14
+ current_model_name = "yolov8_xlarge_latest.pt"
15
+ model_dir = "models"
16
+ os.makedirs(model_dir, exist_ok=True)
17
 
18
+ # Download models if not already present locally
19
+ HF_MODELS_REPO = "https://huggingface.co/reefsupport/coral-ai/resolve/main/models"
20
+ for model_name in model_names:
21
+ model_path = os.path.join(model_dir, model_name)
22
  if not os.path.exists(model_path):
23
+ print(f"Downloading {model_name}...")
24
+ model_url = f"{HF_MODELS_REPO}/{model_name}"
25
+ torch.hub.download_url_to_file(model_url, model_path)
 
 
 
 
 
 
 
26
 
27
+ # Load the initial model
28
+ model = YOLO(os.path.join(model_dir, current_model_name))
29
 
 
 
 
 
 
 
30
 
31
+ def coral_ai_inference(image: str, model_name: str):
32
+ """
33
+ Coral AI inference function
34
+ Args:
35
+ image: Input image filepath
36
+ model_name: Name of the model
37
+ Returns:
38
+ Rendered image
39
+ """
40
+ global model
41
+ global current_model_name
42
+ if model_name != current_model_name:
43
+ model = YOLO(os.path.join(model_dir, model_name))
44
+ current_model_name = model_name
45
 
46
+ # Perform inference
47
+ results = model.predict(image, return_outputs=True)
 
 
 
 
 
 
48
 
49
+ # Render results (placeholder logic)
50
+ rendered_image = results[0].plot() # Visualization of predictions
51
+ return rendered_image
52
 
 
 
 
 
53
 
54
+ # Define Gradio inputs and outputs
55
+ inputs = [
56
+ gr.Image(type="filepath", label="Input Image"),
57
+ gr.Dropdown(
58
+ model_names,
59
+ value=current_model_name,
60
+ label="Model Type",
61
+ ),
62
+ ]
63
 
64
+ outputs = gr.Image(type="filepath", label="Output Image")
65
+ title = "Coral AI YOLOv8 Segmentation Demo"
66
 
67
+ examples = [
68
+ ["examples/coral_image1.jpg", "yolov8_xlarge_latest.pt"],
69
+ ["examples/coral_image2.jpg", "yolov8_xlarge_latest.pt"],
70
+ ["examples/coral_image3.jpg", "yolov8_xlarge_latest.pt"],
71
+ ]
 
72
 
73
+ # Create and launch the Gradio interface
74
+ demo_app = gr.Interface(
75
+ fn=coral_ai_inference,
76
+ inputs=inputs,
77
+ outputs=outputs,
78
+ title=title,
79
+ examples=examples,
80
+ cache_examples=True,
81
+ theme="default",
82
+ )
83
+ demo_app.queue().launch(debug=True)