kimhyunwoo commited on
Commit
35c0ea2
ยท
verified ยท
1 Parent(s): f8a195e

Update index.html

Browse files
Files changed (1) hide show
  1. index.html +39 -115
index.html CHANGED
@@ -5,6 +5,7 @@
5
  <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
6
  <title>AI Assistant (Gemma 3 1B - v3 Attempt)</title>
7
  <style>
 
8
  @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
9
  :root {
10
  --primary-color: #007bff; --secondary-color: #6c757d; --text-color: #212529;
@@ -19,7 +20,6 @@
19
  --header-bg: #ffffff; --header-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
20
  --container-shadow: 0 4px 15px rgba(0, 0, 0, 0.07);
21
  }
22
- /* ๋‚˜๋จธ์ง€ CSS๋Š” ์ด์ „๊ณผ ๋™์ผํ•˜๊ฒŒ ์œ ์ง€ */
23
  * { box-sizing: border-box; margin: 0; padding: 0; }
24
  html { height: 100%; }
25
  body {
@@ -66,7 +66,7 @@
66
  .control-button:active:not(:disabled) { transform: scale(0.95); }
67
  .control-button:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; transform: none; box-shadow: none; }
68
  #toggleSpeakerButton.muted { background-color: #aaa; }
69
- @media (max-width: 600px) { /* ๋ฐ˜์‘ํ˜• ์Šคํƒ€์ผ ์œ ์ง€ */
70
  body { padding: 5px; justify-content: flex-start; }
71
  #control-panel { margin-bottom: 5px; padding: 12px; }
72
  #chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; }
@@ -80,8 +80,6 @@
80
  {
81
  "imports": {
82
  "@xenova/transformers": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest"
83
- /* "/transformers": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest" */
84
- /* "/transformers/": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest/" */
85
  }
86
  }
87
  </script>
@@ -97,7 +95,7 @@
97
  <h1 id="chatbot-name">AI Assistant</h1>
98
  <div id="chatbox">
99
  <div id="messages">
100
- <!-- Chat messages will appear here -->
101
  </div>
102
  </div>
103
  <div id="input-area">
@@ -118,11 +116,11 @@
118
 
119
  env.allowLocalModels = false;
120
  env.useBrowserCache = true;
121
- env.backends.onnx.executionProviders = ['webgpu', 'wasm']; // Keep WebGPU priority
122
  console.log('Using Execution Providers:', env.backends.onnx.executionProviders);
123
- // Attempt to leverage WebGPU if available (v3 feature)
124
  env.backends.onnx.prefer_alternative_execution_providers = true;
125
 
 
126
  const chatbox = document.getElementById('messages');
127
  const userInput = document.getElementById('userInput');
128
  const sendButton = document.getElementById('sendButton');
@@ -132,18 +130,21 @@
132
  const modelStatus = document.getElementById('model-status');
133
  const loadModelButton = document.getElementById('loadModelButton');
134
 
 
135
  let generator = null;
136
  let isLoadingModel = false;
137
  let conversationHistory = [];
138
  let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
139
- const stateKey = 'gemma3_1b_v3_state_v1'; // New key for v3 attempt
140
  const historyKey = 'gemma3_1b_v3_history_v1';
141
 
 
142
  let recognition = null;
143
  let synthesis = window.speechSynthesis;
144
  let targetVoice = null;
145
  let isListening = false;
146
 
 
147
  window.addEventListener('load', () => {
148
  loadState();
149
  chatbotNameElement.textContent = botState.botName;
@@ -154,146 +155,69 @@
154
  if (conversationHistory.length > 0) displayHistory();
155
  setTimeout(loadVoices, 500);
156
  loadModelButton.addEventListener('click', handleLoadModelClick);
157
- // Log the loaded library version (if possible, might not be exposed easily)
158
  console.log("Attempting to use Transformers.js (latest version from CDN)");
159
  displayMessage('system', `Using latest Transformers.js from CDN. Attempting to load ${MODEL_NAME}.`, false);
160
  });
161
 
162
- function loadState() {
 
163
  const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} }
164
  const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); } catch(e) { conversationHistory = []; } }
165
  }
166
- function saveState() {
167
  localStorage.setItem(stateKey, JSON.stringify(botState));
168
  localStorage.setItem(historyKey, JSON.stringify(conversationHistory));
169
  }
170
- function displayHistory() {
171
  chatbox.innerHTML = ''; conversationHistory.forEach(msg => displayMessage(msg.sender, msg.text, false));
172
  }
173
 
174
- function displayMessage(sender, text, animate = true, isError = false) {
175
- const messageDiv = document.createElement('div');
176
- let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message';
177
- if (sender === 'system' && isError) messageClass = 'error-message'; // Style system errors
178
- messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none';
179
- text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>');
180
- messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
181
  }
182
-
183
- function updateModelStatus(message, type = 'info') {
184
  modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
185
  }
186
-
187
- function updateChatUIState(isModelLoadedSuccessfully) {
188
- userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel;
189
- sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === '';
190
- speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition;
191
- toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis;
192
- loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully;
193
- if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; }
194
- else if (isLoadingModel) { userInput.placeholder = "Model loading..."; }
195
- else { userInput.placeholder = "Please load the model first..."; }
196
  }
197
-
198
- function updateSpeakerButtonUI() { /* No change */
199
  toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? '๐Ÿ”Š' : '๐Ÿ”‡'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
200
  }
201
  function showSpeechStatus(message) { console.log("Speech Status:", message); }
202
  function setupInputAutosize() { userInput.addEventListener('input', () => { userInput.style.height = 'auto'; userInput.style.height = userInput.scrollHeight + 'px'; updateChatUIState(generator !== null); }); }
203
 
204
- async function handleLoadModelClick() {
205
- if (isLoadingModel || generator) return;
206
- isLoadingModel = true;
207
- generator = null;
208
- updateChatUIState(false);
209
- await initializeModel(MODEL_NAME);
210
- isLoadingModel = false;
211
- updateChatUIState(generator !== null);
212
  }
213
-
214
- async function initializeModel(modelId) {
215
- updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" } using latest Transformers.js...`, 'loading');
216
- displayMessage('system', `Attempting to load ${modelId} using latest library version.`, false);
217
-
218
  try {
219
- // Attempt pipeline creation with Q4 dtype and latest library
220
- generator = await pipeline(TASK, modelId, {
221
- dtype: QUANTIZATION,
222
- progress_callback: (progress) => {
223
- const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
224
- updateModelStatus(msg, 'loading');
225
- }
226
- });
227
-
228
- // If successful (possible with v3+)
229
- updateModelStatus(`${modelId} loaded successfully with latest library!`, 'success');
230
- displayMessage('system', `[SUCCESS] ${modelId} loaded. The newer library version might support it.`, false);
231
-
232
  } catch (error) {
233
- console.error(`Model loading failed for ${modelId} (with latest library):`, error);
234
- let errorMsg = `Failed to load ${modelId}: ${error.message}.`;
235
- if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) {
236
- errorMsg += " The 'gemma3_text' type is still likely unsupported even in the latest library version, or the ONNX conversion has issues.";
237
- } else if (error.message.includes("split")) {
238
- errorMsg += " A TypeError occurred, possibly related to model config parsing incompatibility.";
239
- }
240
- else {
241
- errorMsg += " Check console for details. Memory limits on HF Spaces could also be a factor.";
242
- }
243
- updateModelStatus(errorMsg, 'error');
244
- displayMessage('system', `[ERROR] ${errorMsg}`, true, true);
245
- generator = null; // Ensure generator is null
246
  }
247
  }
248
-
249
- function buildPrompt() {
250
- const historyLimit = 5; const recentHistory = conversationHistory.slice(-historyLimit);
251
- let prompt = "<start_of_turn>system\nYou are 'AI Assistant', a helpful AI assistant. Answer the user's questions clearly and concisely in English.\n<end_of_turn>\n";
252
- recentHistory.forEach(msg => { const role = msg.sender === 'user' ? 'user' : 'model'; prompt += `<start_of_turn>${role}\n${msg.text}\n<end_of_turn>\n`; });
253
- prompt += "<start_of_turn>model\n";
254
- console.log("Generated Prompt:", prompt); return prompt;
255
  }
256
-
257
- function cleanupResponse(responseText, prompt) {
258
- let cleaned = responseText; if (cleaned.startsWith(prompt)) { cleaned = cleaned.substring(prompt.length); } else { cleaned = cleaned.replace(/^model\n?/, '').trim(); }
259
- cleaned = cleaned.replace(/<end_of_turn>/g, '').trim(); cleaned = cleaned.replace(/<start_of_turn>/g, '').trim(); cleaned = cleaned.replace(/^['"]/, '').replace(/['"]$/, '');
260
- if (!cleaned || cleaned.length < 2) { const fallbacks = [ "Sorry, I didn't quite understand.", "Could you please rephrase that?", "I'm not sure how to respond." ]; return fallbacks[Math.floor(Math.random() * fallbacks.length)]; }
261
- return cleaned;
262
  }
263
-
264
- async function handleUserMessage() {
265
- const userText = userInput.value.trim();
266
- if (!userText || !generator || isLoadingModel) return;
267
- userInput.value = ''; userInput.style.height = 'auto'; updateChatUIState(true);
268
- displayMessage('user', userText); conversationHistory.push({ sender: 'user', text: userText });
269
- updateModelStatus("AI thinking...", "loading");
270
- const prompt = buildPrompt();
271
- try {
272
- const outputs = await generator(prompt, { max_new_tokens: 300, temperature: 0.7, repetition_penalty: 1.1, top_k: 50, top_p: 0.9, do_sample: true });
273
- const rawResponse = Array.isArray(outputs) ? outputs[0].generated_text : outputs.generated_text;
274
- const replyText = cleanupResponse(rawResponse, prompt);
275
- displayMessage('bot', replyText); conversationHistory.push({ sender: 'bot', text: replyText });
276
- if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) { speakText(replyText); }
277
- saveState();
278
- } catch (error) {
279
- console.error("AI response generation error:", error); displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true);
280
- const errorReply = "Sorry, I encountered an error generating the response."; displayMessage('bot', errorReply); conversationHistory.push({ sender: 'bot', text: errorReply });
281
- } finally {
282
- if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success");
283
- updateChatUIState(generator !== null); userInput.focus();
284
- }
285
  }
286
 
287
  // --- Speech API Functions ---
288
- function initializeSpeechAPI() {
289
- const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
290
- if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); }
291
- if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); }
292
- updateChatUIState(false);
293
  }
294
- function loadVoices() { if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
295
- function findAndSetVoice(voices) { targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
296
- function speakText(text) { if (!synthesis || !botState.botSettings.useSpeechOutput || !targetVoice) return; synthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.voice = targetVoice; utterance.lang = targetVoice.lang; utterance.rate = 1.0; utterance.pitch = 1.0; synthesis.speak(utterance); }
297
 
298
  // --- Event Listeners ---
299
  sendButton.addEventListener('click', handleUserMessage);
 
5
  <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
6
  <title>AI Assistant (Gemma 3 1B - v3 Attempt)</title>
7
  <style>
8
+ /* CSS styles remain the same as the previous correct version */
9
  @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
10
  :root {
11
  --primary-color: #007bff; --secondary-color: #6c757d; --text-color: #212529;
 
20
  --header-bg: #ffffff; --header-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
21
  --container-shadow: 0 4px 15px rgba(0, 0, 0, 0.07);
22
  }
 
23
  * { box-sizing: border-box; margin: 0; padding: 0; }
24
  html { height: 100%; }
25
  body {
 
66
  .control-button:active:not(:disabled) { transform: scale(0.95); }
67
  .control-button:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; transform: none; box-shadow: none; }
68
  #toggleSpeakerButton.muted { background-color: #aaa; }
69
+ @media (max-width: 600px) { /* Responsive styles */
70
  body { padding: 5px; justify-content: flex-start; }
71
  #control-panel { margin-bottom: 5px; padding: 12px; }
72
  #chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; }
 
80
  {
81
  "imports": {
82
  "@xenova/transformers": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest"
 
 
83
  }
84
  }
85
  </script>
 
95
  <h1 id="chatbot-name">AI Assistant</h1>
96
  <div id="chatbox">
97
  <div id="messages">
98
+ <!-- Chat messages appear here -->
99
  </div>
100
  </div>
101
  <div id="input-area">
 
116
 
117
  env.allowLocalModels = false;
118
  env.useBrowserCache = true;
119
+ env.backends.onnx.executionProviders = ['webgpu', 'wasm'];
120
  console.log('Using Execution Providers:', env.backends.onnx.executionProviders);
 
121
  env.backends.onnx.prefer_alternative_execution_providers = true;
122
 
123
+ // DOM Elements (no changes)
124
  const chatbox = document.getElementById('messages');
125
  const userInput = document.getElementById('userInput');
126
  const sendButton = document.getElementById('sendButton');
 
130
  const modelStatus = document.getElementById('model-status');
131
  const loadModelButton = document.getElementById('loadModelButton');
132
 
133
+ // State Management (no changes)
134
  let generator = null;
135
  let isLoadingModel = false;
136
  let conversationHistory = [];
137
  let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
138
+ const stateKey = 'gemma3_1b_v3_state_v1';
139
  const historyKey = 'gemma3_1b_v3_history_v1';
140
 
141
+ // Web Speech API (no changes)
142
  let recognition = null;
143
  let synthesis = window.speechSynthesis;
144
  let targetVoice = null;
145
  let isListening = false;
146
 
147
+ // --- Initialization ---
148
  window.addEventListener('load', () => {
149
  loadState();
150
  chatbotNameElement.textContent = botState.botName;
 
155
  if (conversationHistory.length > 0) displayHistory();
156
  setTimeout(loadVoices, 500);
157
  loadModelButton.addEventListener('click', handleLoadModelClick);
 
158
  console.log("Attempting to use Transformers.js (latest version from CDN)");
159
  displayMessage('system', `Using latest Transformers.js from CDN. Attempting to load ${MODEL_NAME}.`, false);
160
  });
161
 
162
+ // --- State Persistence ---
163
+ function loadState() { /* No changes */
164
  const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} }
165
  const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); } catch(e) { conversationHistory = []; } }
166
  }
167
+ function saveState() { /* No changes */
168
  localStorage.setItem(stateKey, JSON.stringify(botState));
169
  localStorage.setItem(historyKey, JSON.stringify(conversationHistory));
170
  }
171
+ function displayHistory() { /* No changes */
172
  chatbox.innerHTML = ''; conversationHistory.forEach(msg => displayMessage(msg.sender, msg.text, false));
173
  }
174
 
175
+ // --- UI Update Functions ---
176
+ function displayMessage(sender, text, animate = true, isError = false) { /* No changes */
177
+ const messageDiv = document.createElement('div'); let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message'; if (sender === 'system' && isError) messageClass = 'error-message'; messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none'; text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>'); messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
 
 
 
 
178
  }
179
+ function updateModelStatus(message, type = 'info') { /* No changes */
 
180
  modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
181
  }
182
+ function updateChatUIState(isModelLoadedSuccessfully) { /* No changes */
183
+ userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel; sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === ''; speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition; toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis; loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; } else if (isLoadingModel) { userInput.placeholder = "Model loading..."; } else { userInput.placeholder = "Please load the model first..."; }
 
 
 
 
 
 
 
 
184
  }
185
+ function updateSpeakerButtonUI() { /* No changes */
 
186
  toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? '๐Ÿ”Š' : '๐Ÿ”‡'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
187
  }
188
  function showSpeechStatus(message) { console.log("Speech Status:", message); }
189
  function setupInputAutosize() { userInput.addEventListener('input', () => { userInput.style.height = 'auto'; userInput.style.height = userInput.scrollHeight + 'px'; updateChatUIState(generator !== null); }); }
190
 
191
+ // --- Model & AI Logic ---
192
+ async function handleLoadModelClick() { /* No changes */
193
+ if (isLoadingModel || generator) return; isLoadingModel = true; generator = null; updateChatUIState(false); await initializeModel(MODEL_NAME); isLoadingModel = false; updateChatUIState(generator !== null);
 
 
 
 
 
194
  }
195
+ async function initializeModel(modelId) { /* No changes */
196
+ updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" } using latest Transformers.js...`, 'loading'); displayMessage('system', `Attempting to load ${modelId} using latest library version.`, false);
 
 
 
197
  try {
198
+ generator = await pipeline(TASK, modelId, { dtype: QUANTIZATION, progress_callback: (progress) => { const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`; updateModelStatus(msg, 'loading'); } });
199
+ updateModelStatus(`${modelId} loaded successfully with latest library!`, 'success'); displayMessage('system', `[SUCCESS] ${modelId} loaded. The newer library version might support it.`, false);
 
 
 
 
 
 
 
 
 
 
 
200
  } catch (error) {
201
+ console.error(`Model loading failed for ${modelId} (with latest library):`, error); let errorMsg = `Failed to load ${modelId}: ${error.message}.`; if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) { errorMsg += " The 'gemma3_text' type is still likely unsupported even in the latest library version, or the ONNX conversion has issues."; } else if (error.message.includes("split")) { errorMsg += " A TypeError occurred, possibly related to model config parsing incompatibility."; } else { errorMsg += " Check console for details. Memory limits on HF Spaces could also be a factor."; } updateModelStatus(errorMsg, 'error'); displayMessage('system', `[ERROR] ${errorMsg}`, true, true); generator = null;
 
 
 
 
 
 
 
 
 
 
 
 
202
  }
203
  }
204
+ function buildPrompt() { /* No changes */
205
+ const historyLimit = 5; const recentHistory = conversationHistory.slice(-historyLimit); let prompt = "<start_of_turn>system\nYou are 'AI Assistant', a helpful AI assistant. Answer the user's questions clearly and concisely in English.\n<end_of_turn>\n"; recentHistory.forEach(msg => { const role = msg.sender === 'user' ? 'user' : 'model'; prompt += `<start_of_turn>${role}\n${msg.text}\n<end_of_turn>\n`; }); prompt += "<start_of_turn>model\n"; console.log("Generated Prompt:", prompt); return prompt;
 
 
 
 
 
206
  }
207
+ function cleanupResponse(responseText, prompt) { /* No changes */
208
+ let cleaned = responseText; if (cleaned.startsWith(prompt)) { cleaned = cleaned.substring(prompt.length); } else { cleaned = cleaned.replace(/^model\n?/, '').trim(); } cleaned = cleaned.replace(/<end_of_turn>/g, '').trim(); cleaned = cleaned.replace(/<start_of_turn>/g, '').trim(); cleaned = cleaned.replace(/^['"]/, '').replace(/['"]$/, ''); if (!cleaned || cleaned.length < 2) { const fallbacks = [ "Sorry, I didn't quite understand.", "Could you please rephrase that?", "I'm not sure how to respond." ]; return fallbacks[Math.floor(Math.random() * fallbacks.length)]; } return cleaned;
 
 
 
 
209
  }
210
+ async function handleUserMessage() { /* No changes */
211
+ const userText = userInput.value.trim(); if (!userText || !generator || isLoadingModel) return; userInput.value = ''; userInput.style.height = 'auto'; updateChatUIState(true); displayMessage('user', userText); conversationHistory.push({ sender: 'user', text: userText }); updateModelStatus("AI thinking...", "loading"); const prompt = buildPrompt(); try { const outputs = await generator(prompt, { max_new_tokens: 300, temperature: 0.7, repetition_penalty: 1.1, top_k: 50, top_p: 0.9, do_sample: true }); const rawResponse = Array.isArray(outputs) ? outputs[0].generated_text : outputs.generated_text; const replyText = cleanupResponse(rawResponse, prompt); displayMessage('bot', replyText); conversationHistory.push({ sender: 'bot', text: replyText }); if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) { speakText(replyText); } saveState(); } catch (error) { console.error("AI response generation error:", error); displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true); const errorReply = "Sorry, I encountered an error generating the response."; displayMessage('bot', errorReply); conversationHistory.push({ sender: 'bot', text: errorReply }); } finally { if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success"); updateChatUIState(generator !== null); userInput.focus(); }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  }
213
 
214
  // --- Speech API Functions ---
215
+ function initializeSpeechAPI() { /* No changes */
216
+ const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); } if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); } updateChatUIState(false);
 
 
 
217
  }
218
+ function loadVoices() { /* No changes */ if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
219
+ function findAndSetVoice(voices) { /* No changes */ targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
220
+ function speakText(text) { /* No changes */ if (!synthesis || !botState.botSettings.useSpeechOutput || !targetVoice) return; synthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.voice = targetVoice; utterance.lang = targetVoice.lang; utterance.rate = 1.0; utterance.pitch = 1.0; synthesis.speak(utterance); }
221
 
222
  // --- Event Listeners ---
223
  sendButton.addEventListener('click', handleUserMessage);