kimhyunwoo commited on
Commit
f8a195e
·
verified ·
1 Parent(s): 060ac05

Update index.html

Browse files
Files changed (1) hide show
  1. index.html +88 -112
index.html CHANGED
@@ -3,7 +3,7 @@
3
  <head>
4
  <meta charset="UTF-8">
5
  <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
6
- <title>AI Assistant (Gemma 3 1B Attempt)</title>
7
  <style>
8
  @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
9
  :root {
@@ -19,6 +19,7 @@
19
  --header-bg: #ffffff; --header-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
20
  --container-shadow: 0 4px 15px rgba(0, 0, 0, 0.07);
21
  }
 
22
  * { box-sizing: border-box; margin: 0; padding: 0; }
23
  html { height: 100%; }
24
  body {
@@ -28,10 +29,10 @@
28
  #control-panel {
29
  background: var(--header-bg); padding: 15px; border-radius: 8px; margin-bottom: 10px;
30
  box-shadow: var(--header-shadow); width: 100%; max-width: 600px; border: 1px solid var(--border-color);
31
- text-align: center; /* Center button and status */
32
  }
33
  #loadModelButton {
34
- padding: 10px 20px; font-size: 1em; background-color: var(--primary-color); /* Use primary color */
35
  color: white; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; margin-bottom: 10px;
36
  }
37
  #loadModelButton:hover:not(:disabled) { background-color: var(--button-hover-bg); }
@@ -41,7 +42,6 @@
41
  #model-status.loading { background-color: var(--warning-bg); border: 1px solid var(--warning-border); color: var(--warning-color); }
42
  #model-status.success { background-color: var(--success-bg); border: 1px solid var(--success-border); color: var(--success-color); }
43
  #model-status.error { background-color: var(--error-bg); border: 1px solid var(--error-border); color: var(--error-color); }
44
- #model-status.tokenizer-only { background-color: var(--warning-bg); border: 1px solid var(--warning-border); color: var(--warning-color); } /* Style for tokenizer only */
45
 
46
  #chat-container {
47
  width: 100%; max-width: 600px; height: 75vh; max-height: 700px; background-color: #ffffff;
@@ -57,6 +57,7 @@
57
  .bot-message { background-color: var(--bot-msg-bg); border: 1px solid var(--bot-msg-border); align-self: flex-start; border-bottom-left-radius: 4px; margin-right: auto; }
58
  .bot-message a { color: var(--primary-color); text-decoration: none; } .bot-message a:hover { text-decoration: underline; }
59
  .system-message { font-style: italic; color: var(--system-msg-color); text-align: center; font-size: 0.85em; background-color: transparent; box-shadow: none; align-self: center; max-width: 100%; padding: 5px 0; animation: none; }
 
60
  #input-area { display: flex; padding: 10px 12px; border-top: 1px solid var(--border-color); background-color: var(--header-bg); align-items: center; gap: 8px; flex-shrink: 0; }
61
  #userInput { flex-grow: 1; padding: 10px 15px; border: 1px solid var(--input-border); border-radius: 20px; outline: none; font-size: 1em; font-family: 'Roboto', sans-serif; background-color: var(--input-bg); transition: border-color 0.2s ease; min-height: 42px; resize: none; overflow-y: auto; }
62
  #userInput:focus { border-color: var(--primary-color); }
@@ -65,7 +66,7 @@
65
  .control-button:active:not(:disabled) { transform: scale(0.95); }
66
  .control-button:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; transform: none; box-shadow: none; }
67
  #toggleSpeakerButton.muted { background-color: #aaa; }
68
- @media (max-width: 600px) {
69
  body { padding: 5px; justify-content: flex-start; }
70
  #control-panel { margin-bottom: 5px; padding: 12px; }
71
  #chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; }
@@ -76,14 +77,20 @@
76
  }
77
  </style>
78
  <script type="importmap">
79
- { "imports": { "@xenova/transformers": "https://cdn.jsdelivr.net/npm/@xenova/[email protected]" } }
 
 
 
 
 
 
80
  </script>
81
  </head>
82
  <body>
83
  <div id="control-panel">
84
  <h2>Model Loader</h2>
85
- <button id="loadModelButton">Load Gemma 3 1B Model</button>
86
- <div id="model-status" class="info">Click the button to load the Gemma 3 1B model. Warning: Loading may fail due to incompatibility.</div>
87
  </div>
88
 
89
  <div id="chat-container">
@@ -102,16 +109,19 @@
102
  </div>
103
 
104
  <script type="module">
105
- import { pipeline, AutoTokenizer, env } from '@xenova/transformers'; // Added AutoTokenizer
 
106
 
107
- const MODEL_NAME = 'onnx-community/gemma-3-1b-it-ONNX-GQA'; // Target model
108
  const TASK = 'text-generation';
109
- const QUANTIZATION = 'q4'; // Attempting Q4 as per model card
110
 
111
  env.allowLocalModels = false;
112
  env.useBrowserCache = true;
113
- env.backends.onnx.executionProviders = ['webgpu', 'wasm'];
114
  console.log('Using Execution Providers:', env.backends.onnx.executionProviders);
 
 
115
 
116
  const chatbox = document.getElementById('messages');
117
  const userInput = document.getElementById('userInput');
@@ -122,14 +132,12 @@
122
  const modelStatus = document.getElementById('model-status');
123
  const loadModelButton = document.getElementById('loadModelButton');
124
 
125
- let generator = null; // Full pipeline
126
- let tokenizer = null; // Separate tokenizer instance
127
- let currentModelName = null;
128
  let isLoadingModel = false;
129
  let conversationHistory = [];
130
  let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
131
- const stateKey = 'gemma3_1b_only_state_v1';
132
- const historyKey = 'gemma3_1b_only_history_v1';
133
 
134
  let recognition = null;
135
  let synthesis = window.speechSynthesis;
@@ -142,10 +150,13 @@
142
  updateSpeakerButtonUI();
143
  initializeSpeechAPI();
144
  setupInputAutosize();
145
- updateChatUIState(false); // Initial state: model not loaded
146
- if (conversationHistory.length > 0) displayHistory();
147
- setTimeout(loadVoices, 500);
148
  loadModelButton.addEventListener('click', handleLoadModelClick);
 
 
 
149
  });
150
 
151
  function loadState() {
@@ -163,102 +174,80 @@
163
  function displayMessage(sender, text, animate = true, isError = false) {
164
  const messageDiv = document.createElement('div');
165
  let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message';
166
- if (isError) messageClass = 'error-message';
167
  messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none';
168
  text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>');
169
  messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
170
  }
171
 
172
- function updateModelStatus(message, type = 'info') {
173
- modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
174
- }
175
-
176
- function updateChatUIState(isModelLoadedSuccessfully, isTokenizerLoaded = false) {
177
- const isReadyForChat = isModelLoadedSuccessfully; // Only enable full chat if pipeline loaded
178
- userInput.disabled = !isReadyForChat || isLoadingModel;
179
- sendButton.disabled = !isReadyForChat || isLoadingModel || userInput.value.trim() === '';
180
- speechButton.disabled = !isReadyForChat || isLoadingModel || isListening || !recognition;
181
- toggleSpeakerButton.disabled = !isReadyForChat || isLoadingModel || !synthesis;
182
- loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; // Disable load button if loading or successful
183
 
184
- if (isReadyForChat) { userInput.placeholder = "How can I help you today?"; }
185
- else if (isLoadingModel) { userInput.placeholder = "Model loading..."; }
186
- else if (isTokenizerLoaded) { userInput.placeholder = "Tokenizer loaded, but chat unavailable."; } // Tokenizer only state
187
- else { userInput.placeholder = "Please load the model first..."; }
188
- }
 
 
 
 
 
189
 
190
- function updateSpeakerButtonUI() { /* No change */
191
- toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? '🔊' : '🔇'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
192
- }
193
- function showSpeechStatus(message) { console.log("Speech Status:", message); }
194
- function setupInputAutosize() { userInput.addEventListener('input', () => { userInput.style.height = 'auto'; userInput.style.height = userInput.scrollHeight + 'px'; updateChatUIState(generator !== null, tokenizer !== null); }); }
195
 
196
  async function handleLoadModelClick() {
197
  if (isLoadingModel || generator) return;
198
  isLoadingModel = true;
199
- currentModelName = MODEL_NAME; // Only one model now
200
- generator = null; // Reset generator
201
- tokenizer = null; // Reset tokenizer
202
- updateChatUIState(false); // Disable UI while loading
203
- await initializeModelAndTokenizer(currentModelName);
204
  isLoadingModel = false;
205
- updateChatUIState(generator !== null, tokenizer !== null); // Update UI based on final state
206
  }
207
 
208
- async function initializeModelAndTokenizer(modelId) {
209
- updateModelStatus(`Loading ${modelId} (Q4)...`, 'loading');
210
- let pipelineLoaded = false;
211
- let tokenizerLoaded = false;
212
- let loadError = null;
213
 
214
- // --- Try loading the full pipeline first ---
215
  try {
 
216
  generator = await pipeline(TASK, modelId, {
217
- dtype: QUANTIZATION, // Using Q4 as per model card
218
  progress_callback: (progress) => {
219
- const msg = `[Loading Pipeline: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
220
  updateModelStatus(msg, 'loading');
221
  }
222
  });
223
- pipelineLoaded = true;
224
- tokenizer = generator.tokenizer; // Get tokenizer from successful pipeline
225
- tokenizerLoaded = true;
226
- updateModelStatus(`${modelId} pipeline loaded successfully!`, 'success');
227
- displayMessage('system', `[SUCCESS] ${modelId} is ready for chat.`, false);
228
 
229
  } catch (error) {
230
- console.error(`Pipeline loading failed for ${modelId}:`, error);
231
- loadError = error; // Store error to check later
 
 
 
 
 
 
 
 
 
 
232
  generator = null; // Ensure generator is null
233
- updateModelStatus(`Pipeline loading failed for ${modelId}. Attempting tokenizer only...`, 'warning');
234
-
235
- // --- If pipeline failed, try loading only the tokenizer ---
236
- try {
237
- updateModelStatus(`Loading Tokenizer for ${modelId}...`, 'loading');
238
- tokenizer = await AutoTokenizer.from_pretrained(modelId, {
239
- progress_callback: (progress) => {
240
- const msg = `[Loading Tokenizer: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
241
- updateModelStatus(msg, 'loading');
242
- }
243
- });
244
- tokenizerLoaded = true;
245
- // Report specific failure for pipeline but success for tokenizer
246
- updateModelStatus(`Model Pipeline failed: ${loadError.message}. Tokenizer loaded successfully. Chat is disabled.`, 'tokenizer-only');
247
- displayMessage('system', `[WARNING] ${modelId} Model Pipeline failed to load. Tokenizer is available, but chat generation will not work.`, true, true);
248
-
249
- } catch (tokenizerError) {
250
- console.error(`Tokenizer loading also failed for ${modelId}:`, tokenizerError);
251
- tokenizer = null; // Ensure tokenizer is null
252
- // Report double failure
253
- updateModelStatus(`FATAL: Failed to load both Pipeline and Tokenizer for ${modelId}. Pipeline Error: ${loadError.message}. Tokenizer Error: ${tokenizerError.message}`, 'error');
254
- displayMessage('system', `[FATAL ERROR] Could not load essential components for ${modelId}. Please check browser console and model compatibility.`, true, true);
255
- }
256
  }
257
  }
258
 
259
  function buildPrompt() {
260
- const historyLimit = 5; // Shorter context for 1B
261
- const recentHistory = conversationHistory.slice(-historyLimit);
262
  let prompt = "<start_of_turn>system\nYou are 'AI Assistant', a helpful AI assistant. Answer the user's questions clearly and concisely in English.\n<end_of_turn>\n";
263
  recentHistory.forEach(msg => { const role = msg.sender === 'user' ? 'user' : 'model'; prompt += `<start_of_turn>${role}\n${msg.text}\n<end_of_turn>\n`; });
264
  prompt += "<start_of_turn>model\n";
@@ -268,52 +257,39 @@
268
  function cleanupResponse(responseText, prompt) {
269
  let cleaned = responseText; if (cleaned.startsWith(prompt)) { cleaned = cleaned.substring(prompt.length); } else { cleaned = cleaned.replace(/^model\n?/, '').trim(); }
270
  cleaned = cleaned.replace(/<end_of_turn>/g, '').trim(); cleaned = cleaned.replace(/<start_of_turn>/g, '').trim(); cleaned = cleaned.replace(/^['"]/, '').replace(/['"]$/, '');
271
- if (!cleaned || cleaned.length < 2) { console.warn("Generated reply empty/short:", cleaned); const fallbacks = [ "Sorry, I didn't quite understand.", "Could you please rephrase that?", "I'm not sure how to respond." ]; return fallbacks[Math.floor(Math.random() * fallbacks.length)]; }
272
  return cleaned;
273
  }
274
 
275
  async function handleUserMessage() {
276
  const userText = userInput.value.trim();
277
- // Check if GENERATOR (full pipeline) is loaded
278
  if (!userText || !generator || isLoadingModel) return;
279
-
280
- userInput.value = ''; userInput.style.height = 'auto';
281
- updateChatUIState(true); // Disable send button immediately
282
-
283
- displayMessage('user', userText);
284
- conversationHistory.push({ sender: 'user', text: userText });
285
-
286
  updateModelStatus("AI thinking...", "loading");
287
-
288
  const prompt = buildPrompt();
289
  try {
290
  const outputs = await generator(prompt, { max_new_tokens: 300, temperature: 0.7, repetition_penalty: 1.1, top_k: 50, top_p: 0.9, do_sample: true });
291
  const rawResponse = Array.isArray(outputs) ? outputs[0].generated_text : outputs.generated_text;
292
  const replyText = cleanupResponse(rawResponse, prompt);
293
-
294
- displayMessage('bot', replyText);
295
- conversationHistory.push({ sender: 'bot', text: replyText });
296
  if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) { speakText(replyText); }
297
  saveState();
298
  } catch (error) {
299
- console.error("AI response generation error:", error);
300
- displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true);
301
- const errorReply = "Sorry, I encountered an error generating the response.";
302
- displayMessage('bot', errorReply);
303
- conversationHistory.push({ sender: 'bot', text: errorReply });
304
  } finally {
305
- updateModelStatus(`${currentModelName} ready.`, "success");
306
- updateChatUIState(true);
307
- userInput.focus();
308
  }
309
  }
310
 
311
  // --- Speech API Functions ---
312
  function initializeSpeechAPI() {
313
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
314
- if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null, tokenizer !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${currentModelName} ready.` : (tokenizer ? 'Tokenizer loaded.' : 'No model loaded.'), generator ? 'success' : (tokenizer ? 'tokenizer-only' : 'info')), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null, tokenizer !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); }
315
  if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); }
316
- updateChatUIState(false); // Initial state
317
  }
318
  function loadVoices() { if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
319
  function findAndSetVoice(voices) { targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
@@ -322,7 +298,7 @@
322
  // --- Event Listeners ---
323
  sendButton.addEventListener('click', handleUserMessage);
324
  userInput.addEventListener('keypress', (e) => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); handleUserMessage(); } });
325
- speechButton.addEventListener('click', () => { if (recognition && !isListening && generator && !isLoadingModel) { try { recognition.start(); } catch (error) { console.error("Rec start fail:", error); updateModelStatus(`Failed to start recognition`, 'error'); setTimeout(() => updateModelStatus(generator ? `${currentModelName} ready.` : (tokenizer ? 'Tokenizer loaded.' : 'No model loaded.'), generator ? 'success' : (tokenizer ? 'tokenizer-only' : 'info')), 2000); isListening = false; updateChatUIState(generator !== null, tokenizer !== null); } } });
326
 
327
  </script>
328
  </body>
 
3
  <head>
4
  <meta charset="UTF-8">
5
  <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
6
+ <title>AI Assistant (Gemma 3 1B - v3 Attempt)</title>
7
  <style>
8
  @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
9
  :root {
 
19
  --header-bg: #ffffff; --header-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
20
  --container-shadow: 0 4px 15px rgba(0, 0, 0, 0.07);
21
  }
22
+ /* 나머지 CSS는 이전과 동일하게 유지 */
23
  * { box-sizing: border-box; margin: 0; padding: 0; }
24
  html { height: 100%; }
25
  body {
 
29
  #control-panel {
30
  background: var(--header-bg); padding: 15px; border-radius: 8px; margin-bottom: 10px;
31
  box-shadow: var(--header-shadow); width: 100%; max-width: 600px; border: 1px solid var(--border-color);
32
+ text-align: center;
33
  }
34
  #loadModelButton {
35
+ padding: 10px 20px; font-size: 1em; background-color: var(--primary-color);
36
  color: white; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; margin-bottom: 10px;
37
  }
38
  #loadModelButton:hover:not(:disabled) { background-color: var(--button-hover-bg); }
 
42
  #model-status.loading { background-color: var(--warning-bg); border: 1px solid var(--warning-border); color: var(--warning-color); }
43
  #model-status.success { background-color: var(--success-bg); border: 1px solid var(--success-border); color: var(--success-color); }
44
  #model-status.error { background-color: var(--error-bg); border: 1px solid var(--error-border); color: var(--error-color); }
 
45
 
46
  #chat-container {
47
  width: 100%; max-width: 600px; height: 75vh; max-height: 700px; background-color: #ffffff;
 
57
  .bot-message { background-color: var(--bot-msg-bg); border: 1px solid var(--bot-msg-border); align-self: flex-start; border-bottom-left-radius: 4px; margin-right: auto; }
58
  .bot-message a { color: var(--primary-color); text-decoration: none; } .bot-message a:hover { text-decoration: underline; }
59
  .system-message { font-style: italic; color: var(--system-msg-color); text-align: center; font-size: 0.85em; background-color: transparent; box-shadow: none; align-self: center; max-width: 100%; padding: 5px 0; animation: none; }
60
+ .error-message { color: var(--error-color); font-weight: 500; background-color: var(--error-bg); border: 1px solid var(--error-border); padding: 10px 15px; border-radius: 8px; align-self: stretch; text-align: left; }
61
  #input-area { display: flex; padding: 10px 12px; border-top: 1px solid var(--border-color); background-color: var(--header-bg); align-items: center; gap: 8px; flex-shrink: 0; }
62
  #userInput { flex-grow: 1; padding: 10px 15px; border: 1px solid var(--input-border); border-radius: 20px; outline: none; font-size: 1em; font-family: 'Roboto', sans-serif; background-color: var(--input-bg); transition: border-color 0.2s ease; min-height: 42px; resize: none; overflow-y: auto; }
63
  #userInput:focus { border-color: var(--primary-color); }
 
66
  .control-button:active:not(:disabled) { transform: scale(0.95); }
67
  .control-button:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; transform: none; box-shadow: none; }
68
  #toggleSpeakerButton.muted { background-color: #aaa; }
69
+ @media (max-width: 600px) { /* 반응형 스타일 유지 */
70
  body { padding: 5px; justify-content: flex-start; }
71
  #control-panel { margin-bottom: 5px; padding: 12px; }
72
  #chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; }
 
77
  }
78
  </style>
79
  <script type="importmap">
80
+ {
81
+ "imports": {
82
+ "@xenova/transformers": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest"
83
+ /* "/transformers": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest" */
84
+ /* "/transformers/": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest/" */
85
+ }
86
+ }
87
  </script>
88
  </head>
89
  <body>
90
  <div id="control-panel">
91
  <h2>Model Loader</h2>
92
+ <button id="loadModelButton">Load Gemma 3 1B Model (Q4)</button>
93
+ <div id="model-status" class="info">Click button to load Gemma 3 1B (using Transformers.js v3+). Success depends on library support for 'gemma3_text'.</div>
94
  </div>
95
 
96
  <div id="chat-container">
 
109
  </div>
110
 
111
  <script type="module">
112
+ // Importing from the latest version specified in import map
113
+ import { pipeline, env } from '@xenova/transformers';
114
 
115
+ const MODEL_NAME = 'onnx-community/gemma-3-1b-it-ONNX-GQA';
116
  const TASK = 'text-generation';
117
+ const QUANTIZATION = 'q4';
118
 
119
  env.allowLocalModels = false;
120
  env.useBrowserCache = true;
121
+ env.backends.onnx.executionProviders = ['webgpu', 'wasm']; // Keep WebGPU priority
122
  console.log('Using Execution Providers:', env.backends.onnx.executionProviders);
123
+ // Attempt to leverage WebGPU if available (v3 feature)
124
+ env.backends.onnx.prefer_alternative_execution_providers = true;
125
 
126
  const chatbox = document.getElementById('messages');
127
  const userInput = document.getElementById('userInput');
 
132
  const modelStatus = document.getElementById('model-status');
133
  const loadModelButton = document.getElementById('loadModelButton');
134
 
135
+ let generator = null;
 
 
136
  let isLoadingModel = false;
137
  let conversationHistory = [];
138
  let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
139
+ const stateKey = 'gemma3_1b_v3_state_v1'; // New key for v3 attempt
140
+ const historyKey = 'gemma3_1b_v3_history_v1';
141
 
142
  let recognition = null;
143
  let synthesis = window.speechSynthesis;
 
150
  updateSpeakerButtonUI();
151
  initializeSpeechAPI();
152
  setupInputAutosize();
153
+ updateChatUIState(false);
154
+ if (conversationHistory.length > 0) displayHistory();
155
+ setTimeout(loadVoices, 500);
156
  loadModelButton.addEventListener('click', handleLoadModelClick);
157
+ // Log the loaded library version (if possible, might not be exposed easily)
158
+ console.log("Attempting to use Transformers.js (latest version from CDN)");
159
+ displayMessage('system', `Using latest Transformers.js from CDN. Attempting to load ${MODEL_NAME}.`, false);
160
  });
161
 
162
  function loadState() {
 
174
  function displayMessage(sender, text, animate = true, isError = false) {
175
  const messageDiv = document.createElement('div');
176
  let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message';
177
+ if (sender === 'system' && isError) messageClass = 'error-message'; // Style system errors
178
  messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none';
179
  text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>');
180
  messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
181
  }
182
 
183
+ function updateModelStatus(message, type = 'info') {
184
+ modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
185
+ }
 
 
 
 
 
 
 
 
186
 
187
+ function updateChatUIState(isModelLoadedSuccessfully) {
188
+ userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel;
189
+ sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === '';
190
+ speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition;
191
+ toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis;
192
+ loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully;
193
+ if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; }
194
+ else if (isLoadingModel) { userInput.placeholder = "Model loading..."; }
195
+ else { userInput.placeholder = "Please load the model first..."; }
196
+ }
197
 
198
+ function updateSpeakerButtonUI() { /* No change */
199
+ toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? '🔊' : '🔇'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
200
+ }
201
+ function showSpeechStatus(message) { console.log("Speech Status:", message); }
202
+ function setupInputAutosize() { userInput.addEventListener('input', () => { userInput.style.height = 'auto'; userInput.style.height = userInput.scrollHeight + 'px'; updateChatUIState(generator !== null); }); }
203
 
204
  async function handleLoadModelClick() {
205
  if (isLoadingModel || generator) return;
206
  isLoadingModel = true;
207
+ generator = null;
208
+ updateChatUIState(false);
209
+ await initializeModel(MODEL_NAME);
 
 
210
  isLoadingModel = false;
211
+ updateChatUIState(generator !== null);
212
  }
213
 
214
+ async function initializeModel(modelId) {
215
+ updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" } using latest Transformers.js...`, 'loading');
216
+ displayMessage('system', `Attempting to load ${modelId} using latest library version.`, false);
 
 
217
 
 
218
  try {
219
+ // Attempt pipeline creation with Q4 dtype and latest library
220
  generator = await pipeline(TASK, modelId, {
221
+ dtype: QUANTIZATION,
222
  progress_callback: (progress) => {
223
+ const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
224
  updateModelStatus(msg, 'loading');
225
  }
226
  });
227
+
228
+ // If successful (possible with v3+)
229
+ updateModelStatus(`${modelId} loaded successfully with latest library!`, 'success');
230
+ displayMessage('system', `[SUCCESS] ${modelId} loaded. The newer library version might support it.`, false);
 
231
 
232
  } catch (error) {
233
+ console.error(`Model loading failed for ${modelId} (with latest library):`, error);
234
+ let errorMsg = `Failed to load ${modelId}: ${error.message}.`;
235
+ if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) {
236
+ errorMsg += " The 'gemma3_text' type is still likely unsupported even in the latest library version, or the ONNX conversion has issues.";
237
+ } else if (error.message.includes("split")) {
238
+ errorMsg += " A TypeError occurred, possibly related to model config parsing incompatibility.";
239
+ }
240
+ else {
241
+ errorMsg += " Check console for details. Memory limits on HF Spaces could also be a factor.";
242
+ }
243
+ updateModelStatus(errorMsg, 'error');
244
+ displayMessage('system', `[ERROR] ${errorMsg}`, true, true);
245
  generator = null; // Ensure generator is null
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
  }
247
  }
248
 
249
  function buildPrompt() {
250
+ const historyLimit = 5; const recentHistory = conversationHistory.slice(-historyLimit);
 
251
  let prompt = "<start_of_turn>system\nYou are 'AI Assistant', a helpful AI assistant. Answer the user's questions clearly and concisely in English.\n<end_of_turn>\n";
252
  recentHistory.forEach(msg => { const role = msg.sender === 'user' ? 'user' : 'model'; prompt += `<start_of_turn>${role}\n${msg.text}\n<end_of_turn>\n`; });
253
  prompt += "<start_of_turn>model\n";
 
257
  function cleanupResponse(responseText, prompt) {
258
  let cleaned = responseText; if (cleaned.startsWith(prompt)) { cleaned = cleaned.substring(prompt.length); } else { cleaned = cleaned.replace(/^model\n?/, '').trim(); }
259
  cleaned = cleaned.replace(/<end_of_turn>/g, '').trim(); cleaned = cleaned.replace(/<start_of_turn>/g, '').trim(); cleaned = cleaned.replace(/^['"]/, '').replace(/['"]$/, '');
260
+ if (!cleaned || cleaned.length < 2) { const fallbacks = [ "Sorry, I didn't quite understand.", "Could you please rephrase that?", "I'm not sure how to respond." ]; return fallbacks[Math.floor(Math.random() * fallbacks.length)]; }
261
  return cleaned;
262
  }
263
 
264
  async function handleUserMessage() {
265
  const userText = userInput.value.trim();
 
266
  if (!userText || !generator || isLoadingModel) return;
267
+ userInput.value = ''; userInput.style.height = 'auto'; updateChatUIState(true);
268
+ displayMessage('user', userText); conversationHistory.push({ sender: 'user', text: userText });
 
 
 
 
 
269
  updateModelStatus("AI thinking...", "loading");
 
270
  const prompt = buildPrompt();
271
  try {
272
  const outputs = await generator(prompt, { max_new_tokens: 300, temperature: 0.7, repetition_penalty: 1.1, top_k: 50, top_p: 0.9, do_sample: true });
273
  const rawResponse = Array.isArray(outputs) ? outputs[0].generated_text : outputs.generated_text;
274
  const replyText = cleanupResponse(rawResponse, prompt);
275
+ displayMessage('bot', replyText); conversationHistory.push({ sender: 'bot', text: replyText });
 
 
276
  if (botState.botSettings.useSpeechOutput && synthesis && targetVoice) { speakText(replyText); }
277
  saveState();
278
  } catch (error) {
279
+ console.error("AI response generation error:", error); displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true);
280
+ const errorReply = "Sorry, I encountered an error generating the response."; displayMessage('bot', errorReply); conversationHistory.push({ sender: 'bot', text: errorReply });
 
 
 
281
  } finally {
282
+ if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success");
283
+ updateChatUIState(generator !== null); userInput.focus();
 
284
  }
285
  }
286
 
287
  // --- Speech API Functions ---
288
  function initializeSpeechAPI() {
289
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
290
+ if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); }
291
  if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); }
292
+ updateChatUIState(false);
293
  }
294
  function loadVoices() { if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
295
  function findAndSetVoice(voices) { targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
 
298
  // --- Event Listeners ---
299
  sendButton.addEventListener('click', handleUserMessage);
300
  userInput.addEventListener('keypress', (e) => { if (e.key === 'Enter' && !e.shiftKey) { e.preventDefault(); handleUserMessage(); } });
301
+ speechButton.addEventListener('click', () => { if (recognition && !isListening && generator && !isLoadingModel) { try { recognition.start(); } catch (error) { console.error("Rec start fail:", error); updateModelStatus(`Failed to start recognition`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 2000); isListening = false; updateChatUIState(generator !== null); } } });
302
 
303
  </script>
304
  </body>