Commit
·
ab62755
1
Parent(s):
da548c5
Update README.md
Browse files
README.md
CHANGED
@@ -87,42 +87,41 @@ Users should be informed about the model's limitations and potential biases. Fur
|
|
87 |
To get started with the YOLOv8s object Detection and Classification model, follow these steps:
|
88 |
|
89 |
```bash
|
90 |
-
pip install
|
91 |
```
|
92 |
|
93 |
- Load model and perform prediction:
|
94 |
|
95 |
```python
|
96 |
-
from ultralyticsplus import YOLO, render_result
|
97 |
|
98 |
-
|
99 |
-
|
|
|
|
|
100 |
|
101 |
-
|
102 |
-
|
103 |
-
model.overrides['iou'] = 0.45 # NMS IoU threshold
|
104 |
-
model.overrides['agnostic_nms'] = False # NMS class-agnostic
|
105 |
-
model.overrides['max_det'] = 1000 # maximum number of detections per image
|
106 |
|
107 |
-
|
108 |
-
|
109 |
|
110 |
-
|
111 |
-
|
112 |
|
113 |
-
#
|
114 |
-
|
115 |
-
|
116 |
-
render.show()
|
117 |
-
```
|
118 |
-
|
119 |
-
|
120 |
-
- Finetune the model on your custom dataset:
|
121 |
|
122 |
-
```bash
|
123 |
-
yolov8 train --data dataset.yaml --img 640 --batch -1 --weights foduucom/object_detection--epochs 10
|
124 |
-
```
|
125 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
|
127 |
|
128 |
### Compute Infrastructure
|
|
|
87 |
To get started with the YOLOv8s object Detection and Classification model, follow these steps:
|
88 |
|
89 |
```bash
|
90 |
+
pip install transformers
|
91 |
```
|
92 |
|
93 |
- Load model and perform prediction:
|
94 |
|
95 |
```python
|
|
|
96 |
|
97 |
+
from transformers import YolosImageProcessor, YolosForObjectDetection
|
98 |
+
from PIL import Image
|
99 |
+
import torch
|
100 |
+
import requests
|
101 |
|
102 |
+
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
103 |
+
image = Image.open(requests.get(url, stream=True).raw)
|
|
|
|
|
|
|
104 |
|
105 |
+
model = YolosForObjectDetection.from_pretrained('foduucom/thermal-image-object-detection')
|
106 |
+
image_processor = YolosImageProcessor.from_pretrained("foduucom/thermal-image-object-detection")
|
107 |
|
108 |
+
inputs = image_processor(images=image, return_tensors="pt")
|
109 |
+
outputs = model(**inputs)
|
110 |
|
111 |
+
# model predicts bounding boxes and corresponding COCO classes
|
112 |
+
logits = outputs.logits
|
113 |
+
bboxes = outputs.pred_boxes
|
|
|
|
|
|
|
|
|
|
|
114 |
|
|
|
|
|
|
|
115 |
|
116 |
+
# print results
|
117 |
+
target_sizes = torch.tensor([image.size[::-1]])
|
118 |
+
results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[0]
|
119 |
+
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
|
120 |
+
box = [round(i, 2) for i in box.tolist()]
|
121 |
+
print(
|
122 |
+
f"Detected {model.config.id2label[label.item()]} with confidence "
|
123 |
+
f"{round(score.item(), 3)} at location {box}"
|
124 |
+
)
|
125 |
|
126 |
|
127 |
### Compute Infrastructure
|