practisebook commited on
Commit
c832915
·
verified ·
1 Parent(s): 27d7d83

Update assets/detection.js

Browse files
Files changed (1) hide show
  1. assets/detection.js +43 -23
assets/detection.js CHANGED
@@ -3,6 +3,16 @@ const canvas = document.getElementById("canvas");
3
  const ctx = canvas.getContext("2d");
4
  const startButton = document.getElementById("start");
5
 
 
 
 
 
 
 
 
 
 
 
6
  function speak(text) {
7
  const synth = window.speechSynthesis;
8
  const utterance = new SpeechSynthesisUtterance(text);
@@ -19,12 +29,10 @@ async function setupCamera() {
19
  });
20
  }
21
 
22
- async function detectObjects() {
23
- console.log("Loading YOLO model...");
24
- const model = await tf.loadGraphModel(
25
- "https://path-to-your-model/model.json"
26
- ); // Replace with your actual model path
27
 
 
28
  canvas.width = video.videoWidth;
29
  canvas.height = video.videoHeight;
30
 
@@ -33,31 +41,43 @@ async function detectObjects() {
33
 
34
  const inputTensor = tf.browser
35
  .fromPixels(video)
36
- .resizeBilinear([640, 480])
37
- .expandDims(0);
 
38
 
39
  const predictions = await model.executeAsync(inputTensor);
40
 
 
 
41
  ctx.clearRect(0, 0, canvas.width, canvas.height);
42
  ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
43
 
44
- predictions.forEach((prediction) => {
45
- const [x, y, width, height] = prediction.bbox;
46
- const label = prediction.class;
47
- const confidence = prediction.score;
 
 
 
48
 
49
- // Draw bounding box
50
- ctx.strokeStyle = "red";
51
- ctx.lineWidth = 2;
52
- ctx.strokeRect(x, y, width, height);
53
 
54
- // Draw label
55
- ctx.fillStyle = "red";
56
- ctx.font = "18px Arial";
57
- ctx.fillText(`${label} (${(confidence * 100).toFixed(2)}%)`, x, y - 10);
58
 
59
- // Provide audio feedback
60
- speak(`${label} detected with ${(confidence * 100).toFixed(2)}% confidence.`);
 
 
 
 
 
 
61
  });
62
 
63
  requestAnimationFrame(processFrame);
@@ -68,7 +88,7 @@ async function detectObjects() {
68
 
69
  startButton.addEventListener("click", async () => {
70
  startButton.disabled = true;
 
71
  await setupCamera();
72
- video.play();
73
- detectObjects();
74
  });
 
3
  const ctx = canvas.getContext("2d");
4
  const startButton = document.getElementById("start");
5
 
6
+ async function loadModel() {
7
+ console.log("Loading model...");
8
+ const model = await tf.loadGraphModel(
9
+ "https://tfhub.dev/tensorflow/ssd_mobilenet_v2/1/default/1", // Replace with your model URL if required
10
+ { fromTFHub: true }
11
+ );
12
+ console.log("Model loaded successfully.");
13
+ return model;
14
+ }
15
+
16
  function speak(text) {
17
  const synth = window.speechSynthesis;
18
  const utterance = new SpeechSynthesisUtterance(text);
 
29
  });
30
  }
31
 
32
+ async function detectObjects(model) {
33
+ console.log("Starting detection...");
 
 
 
34
 
35
+ video.play();
36
  canvas.width = video.videoWidth;
37
  canvas.height = video.videoHeight;
38
 
 
41
 
42
  const inputTensor = tf.browser
43
  .fromPixels(video)
44
+ .resizeBilinear([300, 300])
45
+ .expandDims(0)
46
+ .div(tf.scalar(255));
47
 
48
  const predictions = await model.executeAsync(inputTensor);
49
 
50
+ const [boxes, scores, classes] = predictions;
51
+
52
  ctx.clearRect(0, 0, canvas.width, canvas.height);
53
  ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
54
 
55
+ const detectionThreshold = 0.5;
56
+
57
+ scores.arraySync()[0].forEach((score, index) => {
58
+ if (score > detectionThreshold) {
59
+ const bbox = boxes.arraySync()[0][index];
60
+ const classId = classes.arraySync()[0][index];
61
+ const label = `Object ${classId}`;
62
 
63
+ const x = bbox[1] * canvas.width;
64
+ const y = bbox[0] * canvas.height;
65
+ const width = (bbox[3] - bbox[1]) * canvas.width;
66
+ const height = (bbox[2] - bbox[0]) * canvas.height;
67
 
68
+ // Draw bounding box
69
+ ctx.strokeStyle = "red";
70
+ ctx.lineWidth = 2;
71
+ ctx.strokeRect(x, y, width, height);
72
 
73
+ // Draw label
74
+ ctx.fillStyle = "red";
75
+ ctx.font = "16px Arial";
76
+ ctx.fillText(label, x, y > 10 ? y - 5 : y + 20);
77
+
78
+ // Speak detected object
79
+ speak(`Detected ${label}`);
80
+ }
81
  });
82
 
83
  requestAnimationFrame(processFrame);
 
88
 
89
  startButton.addEventListener("click", async () => {
90
  startButton.disabled = true;
91
+ const model = await loadModel();
92
  await setupCamera();
93
+ detectObjects(model);
 
94
  });