Spaces:
Running
Running
Update index.html
Browse files- index.html +116 -22
index.html
CHANGED
@@ -1,29 +1,123 @@
|
|
1 |
<!DOCTYPE html>
|
2 |
<html lang="en">
|
3 |
-
|
4 |
<head>
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
</head>
|
11 |
-
|
12 |
<body>
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
</html>
|
|
|
1 |
<!DOCTYPE html>
|
2 |
<html lang="en">
|
|
|
3 |
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>Nearest Sentence Finder</title>
|
7 |
+
<script src="https://cdn.jsdelivr.net/npm/@xenova/transformers"></script>
|
8 |
+
<style>
|
9 |
+
body {
|
10 |
+
font-family: Arial, sans-serif;
|
11 |
+
text-align: center;
|
12 |
+
margin-top: 50px;
|
13 |
+
}
|
14 |
+
#nearestSentence {
|
15 |
+
font-size: 1.5em;
|
16 |
+
color: #333;
|
17 |
+
}
|
18 |
+
</style>
|
19 |
</head>
|
|
|
20 |
<body>
|
21 |
+
<h1>Nearest Sentence Finder</h1>
|
22 |
+
<p>Sentence closest to the spoken words:</p>
|
23 |
+
<div id="nearestSentence">Loading...</div>
|
24 |
+
|
25 |
+
<script>
|
26 |
+
// Check for browser support
|
27 |
+
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
28 |
+
if (!SpeechRecognition) {
|
29 |
+
alert("Your browser does not support the Web Speech API. Please use a compatible browser.");
|
30 |
+
} else {
|
31 |
+
const recognition = new SpeechRecognition();
|
32 |
+
recognition.continuous = true;
|
33 |
+
recognition.interimResults = false;
|
34 |
+
recognition.lang = 'en-US';
|
35 |
+
|
36 |
+
let wordBuffer = [];
|
37 |
+
const bufferDuration = 30 * 1000; // 30 seconds
|
38 |
+
const nearestSentenceElement = document.getElementById('nearestSentence');
|
39 |
+
|
40 |
+
// Predefined sentences
|
41 |
+
const sampleSentences = ['person', 'man', 'woman', 'camera', 'tv'];
|
42 |
+
let sampleEmbeddings = [];
|
43 |
+
let pipeline;
|
44 |
+
|
45 |
+
// Load the pipeline and compute embeddings for sample sentences
|
46 |
+
async function initializePipeline() {
|
47 |
+
pipeline = await transformers.pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
|
48 |
+
sampleEmbeddings = await Promise.all(
|
49 |
+
sampleSentences.map(sentence => pipeline(sentence).then(embedding => embedding[0]))
|
50 |
+
);
|
51 |
+
nearestSentenceElement.textContent = 'Model loaded. Start speaking!';
|
52 |
+
}
|
53 |
+
|
54 |
+
// Compute Euclidean distance
|
55 |
+
function euclideanDistance(vec1, vec2) {
|
56 |
+
return Math.sqrt(vec1.reduce((sum, val, i) => sum + Math.pow(val - vec2[i], 2), 0));
|
57 |
+
}
|
58 |
+
|
59 |
+
// Find the nearest sentence
|
60 |
+
function findNearestSentence(transcriptEmbedding) {
|
61 |
+
let nearestSentence = '';
|
62 |
+
let minDistance = Infinity;
|
63 |
+
|
64 |
+
sampleEmbeddings.forEach((embedding, index) => {
|
65 |
+
const distance = euclideanDistance(transcriptEmbedding, embedding);
|
66 |
+
if (distance < minDistance) {
|
67 |
+
minDistance = distance;
|
68 |
+
nearestSentence = sampleSentences[index];
|
69 |
+
}
|
70 |
+
});
|
71 |
+
|
72 |
+
return nearestSentence;
|
73 |
+
}
|
74 |
|
75 |
+
// Start speech recognition
|
76 |
+
recognition.start();
|
77 |
+
|
78 |
+
recognition.onresult = async (event) => {
|
79 |
+
const transcript = Array.from(event.results)
|
80 |
+
.map(result => result[0].transcript)
|
81 |
+
.join(' ');
|
82 |
+
|
83 |
+
const timestamp = Date.now();
|
84 |
+
console.log({transcript, timestamp})
|
85 |
+
|
86 |
+
// Add transcript to the buffer with timestamps
|
87 |
+
wordBuffer.push({ transcript, timestamp });
|
88 |
+
|
89 |
+
// Remove transcripts older than 30 seconds
|
90 |
+
wordBuffer = wordBuffer.filter(item => timestamp - item.timestamp <= bufferDuration);
|
91 |
+
|
92 |
+
// Combine all transcripts in the buffer
|
93 |
+
const combinedTranscript = wordBuffer.map(item => item.transcript).join(' ');
|
94 |
+
console.log({combinedTranscript})
|
95 |
+
|
96 |
+
// Compute embedding for the combined transcript
|
97 |
+
if (pipeline) {
|
98 |
+
const transcriptEmbedding = await pipeline(combinedTranscript).then(embedding => embedding[0]);
|
99 |
+
|
100 |
+
// Find and display the nearest sentence
|
101 |
+
const nearestSentence = findNearestSentence(transcriptEmbedding);
|
102 |
+
nearestSentenceElement.textContent = nearestSentence;
|
103 |
+
}
|
104 |
+
};
|
105 |
+
|
106 |
+
recognition.onerror = (event) => {
|
107 |
+
console.error('Speech recognition error:', event.error);
|
108 |
+
if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
|
109 |
+
alert('Microphone access is blocked. Please allow microphone access and reload the page.');
|
110 |
+
}
|
111 |
+
};
|
112 |
+
|
113 |
+
recognition.onend = () => {
|
114 |
+
console.warn('Speech recognition stopped. Restarting...');
|
115 |
+
recognition.start(); // Restart recognition if it stops
|
116 |
+
};
|
117 |
+
|
118 |
+
// Initialize the pipeline
|
119 |
+
initializePipeline();
|
120 |
+
}
|
121 |
+
</script>
|
122 |
+
</body>
|
123 |
</html>
|