kimhyunwoo commited on
Commit
4ec33a2
ยท
verified ยท
1 Parent(s): 6b44370

Update index.html

Browse files
Files changed (1) hide show
  1. index.html +97 -90
index.html CHANGED
@@ -3,7 +3,7 @@
3
  <head>
4
  <meta charset="UTF-8">
5
  <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
6
- <title>AI Assistant (Gemma 3 1B ONNX Attempt)</title> {/* Updated Title */}
7
  <style>
8
  /* CSS๋Š” ์ด์ „๊ณผ ๋™์ผ */
9
  @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
@@ -23,16 +23,21 @@
23
  * { box-sizing: border-box; margin: 0; padding: 0; }
24
  html { height: 100%; }
25
  body { font-family: 'Roboto', sans-serif; display: flex; flex-direction: column; align-items: center; justify-content: flex-start; min-height: 100vh; background-color: var(--bg-color); color: var(--text-color); padding: 10px; overscroll-behavior: none; }
26
- #control-panel { background: var(--header-bg); padding: 15px; border-radius: 8px; margin-bottom: 10px; box-shadow: var(--header-shadow); width: 100%; max-width: 600px; border: 1px solid var(--border-color); text-align: center; }
27
- #loadModelButton { padding: 10px 20px; font-size: 1em; background-color: var(--primary-color); color: white; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; margin-bottom: 10px; }
 
 
 
 
28
  #loadModelButton:hover:not(:disabled) { background-color: var(--button-hover-bg); }
29
  #loadModelButton:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; }
30
- #model-status { font-size: 0.9em; padding: 10px; border-radius: 4px; text-align: center; min-height: 40px; line-height: 1.4; }
31
  #model-status.info { background-color: #e2e3e5; border: 1px solid #d6d8db; color: #383d41; }
32
  #model-status.loading { background-color: var(--warning-bg); border: 1px solid var(--warning-border); color: var(--warning-color); }
33
  #model-status.success { background-color: var(--success-bg); border: 1px solid var(--success-border); color: var(--success-color); }
34
  #model-status.error { background-color: var(--error-bg); border: 1px solid var(--error-border); color: var(--error-color); }
35
- #chat-container { width: 100%; max-width: 600px; height: 75vh; max-height: 700px; background-color: #ffffff; border-radius: 12px; box-shadow: var(--container-shadow); display: flex; flex-direction: column; overflow: hidden; border: 1px solid var(--border-color); }
 
36
  h1 { text-align: center; color: var(--primary-color); padding: 15px; background-color: var(--header-bg); border-bottom: 1px solid var(--border-color); font-size: 1.2em; font-weight: 500; flex-shrink: 0; box-shadow: var(--header-shadow); position: relative; z-index: 10; }
37
  #chatbox { flex-grow: 1; overflow-y: auto; padding: 15px; display: flex; flex-direction: column; gap: 12px; scrollbar-width: thin; scrollbar-color: var(--scrollbar-thumb) var(--scrollbar-track); background-color: var(--bg-color); }
38
  #chatbox::-webkit-scrollbar { width: 6px; } #chatbox::-webkit-scrollbar-track { background: var(--scrollbar-track); border-radius: 3px; } #chatbox::-webkit-scrollbar-thumb { background-color: var(--scrollbar-thumb); border-radius: 3px; }
@@ -53,6 +58,7 @@
53
  #toggleSpeakerButton.muted { background-color: #aaa; }
54
  @media (max-width: 600px) { /* Responsive styles */
55
  body { padding: 5px; justify-content: flex-start; } #control-panel { margin-bottom: 5px; padding: 12px; }
 
56
  #chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; }
57
  h1 { font-size: 1.1em; padding: 12px; } #chatbox { padding: 12px 8px; gap: 10px; }
58
  #messages div { max-width: 90%; font-size: 0.95em; padding: 9px 14px;}
@@ -60,32 +66,34 @@
60
  .control-button { width: 40px; height: 40px; font-size: 1.2em; }
61
  }
62
  </style>
63
- <!-- Using LATEST version of Transformers.js via CDN -->
64
  <script type="importmap">
65
- {
66
- "imports": {
67
- "@xenova/transformers": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest"
68
- }
69
- }
70
  </script>
71
  </head>
72
  <body>
73
  <div id="control-panel">
74
  <h2>Model Loader</h2>
75
- <!-- Button to explicitly trigger model loading -->
76
- <button id="loadModelButton">Load Gemma 3 1B ONNX (Q4)</button> {/* Updated Button Text */}
77
- <div id="model-status" class="info">Click button to load `onnx-community/gemma-3-1b-it-ONNX` (Q4). **Warning:** Loading may still fail due to library incompatibility.</div>
 
 
 
 
 
 
 
 
 
78
  </div>
79
 
80
  <div id="chat-container">
81
  <h1 id="chatbot-name">AI Assistant</h1>
82
  <div id="chatbox">
83
- <div id="messages">
84
- <!-- Chat messages appear here -->
85
- </div>
86
  </div>
87
  <div id="input-area">
88
- <textarea id="userInput" placeholder="Please attempt to load the model first..." rows="1" disabled></textarea>
89
  <button id="speechButton" class="control-button" title="Speak message" disabled>๐ŸŽค</button>
90
  <button id="toggleSpeakerButton" class="control-button" title="Toggle AI speech output" disabled>๐Ÿ”Š</button>
91
  <button id="sendButton" class="control-button" title="Send message" disabled>โžค</button>
@@ -95,10 +103,9 @@
95
  <script type="module">
96
  import { pipeline, env } from '@xenova/transformers';
97
 
98
- // Configuration based on the LATEST user request
99
- const MODEL_NAME = 'onnx-community/gemma-3-1b-it-ONNX'; // Using the non-GQA version
100
  const TASK = 'text-generation';
101
- const QUANTIZATION = 'q4'; // Using Q4 as specified in the example structure
102
 
103
  // Environment setup
104
  env.allowRemoteModels = true;
@@ -116,14 +123,15 @@
116
  const toggleSpeakerButton = document.getElementById('toggleSpeakerButton');
117
  const modelStatus = document.getElementById('model-status');
118
  const loadModelButton = document.getElementById('loadModelButton');
 
119
 
120
  // State
121
  let generator = null;
122
  let isLoadingModel = false;
123
- let conversationHistory = [];
124
  let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
125
- const stateKey = 'gemma3_1b_onnx_state_v1'; // New key for this specific model
126
- const historyKey = 'gemma3_1b_onnx_history_v1';
127
 
128
  // Speech API
129
  let recognition = null;
@@ -133,43 +141,37 @@
133
 
134
  // --- Initialization ---
135
  window.addEventListener('load', () => {
136
- loadState();
137
- chatbotNameElement.textContent = botState.botName;
138
- updateSpeakerButtonUI();
139
- initializeSpeechAPI();
140
- setupInputAutosize();
141
- updateChatUIState(false);
142
- displayHistory();
143
- setTimeout(loadVoices, 500);
144
  loadModelButton.addEventListener('click', handleLoadModelClick);
145
- console.log("Attempting to use Transformers.js (latest) loaded via import map.");
146
- displayMessage('system', `Using latest Transformers.js. Ready to load ${MODEL_NAME}.`, false);
147
  });
148
 
149
  // --- State Persistence ---
150
- function loadState() {
151
  const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} }
152
  const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); if (!Array.isArray(conversationHistory)) conversationHistory = []; } catch(e) { conversationHistory = []; } }
153
  }
154
- function saveState() {
155
- localStorage.setItem(stateKey, JSON.stringify(botState));
156
- localStorage.setItem(historyKey, JSON.stringify(conversationHistory));
157
  }
158
- function displayHistory() {
159
  chatbox.innerHTML = ''; conversationHistory.forEach(msg => { if (msg.role === 'user' || msg.role === 'assistant') { displayMessage(msg.role === 'user' ? 'user' : 'bot', msg.content, false); } });
160
  }
161
 
162
  // --- UI Update Functions ---
163
- function displayMessage(sender, text, animate = true, isError = false) {
164
  const messageDiv = document.createElement('div'); let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message'; if (sender === 'system' && isError) messageClass = 'error-message'; messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none'; text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>'); messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
165
  }
166
- function updateModelStatus(message, type = 'info') {
167
  modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
168
  }
169
- function updateChatUIState(isModelLoadedSuccessfully) {
170
- userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel; sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === ''; speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition; toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis; loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; } else if (isLoadingModel) { userInput.placeholder = "Model loading..."; } else { userInput.placeholder = "Please attempt to load the model first..."; }
171
  }
172
- function updateSpeakerButtonUI() {
173
  toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? '๐Ÿ”Š' : '๐Ÿ”‡'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
174
  }
175
  function showSpeechStatus(message) { console.log("Speech Status:", message); }
@@ -179,41 +181,46 @@
179
  async function handleLoadModelClick() {
180
  if (isLoadingModel || generator) return;
181
  isLoadingModel = true; generator = null;
 
182
  updateChatUIState(false);
183
- // Pass the specific model name requested by the user
184
- await initializeModel(MODEL_NAME);
185
  isLoadingModel = false;
186
  updateChatUIState(generator !== null);
187
  }
188
 
189
- // Initialize model using the exact parameters from the latest example structure
190
- async function initializeModel(modelId) {
191
- updateModelStatus(`Loading ${modelId} with { dtype: "${QUANTIZATION}" }... (Strict doc example)`, 'loading');
192
- displayMessage('system', `Attempting to load ${modelId} using documented method (dtype: ${QUANTIZATION})...`, false);
 
 
 
 
 
 
 
 
 
 
 
193
 
194
  try {
195
- // Pipeline creation EXACTLY as per the example structure provided by user
196
- generator = await pipeline(TASK, modelId, {
197
- dtype: QUANTIZATION, // Explicitly use q4
198
- progress_callback: (progress) => {
199
- const msg = `[Loading: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
200
- updateModelStatus(msg, 'loading');
201
- }
202
- });
203
 
204
- updateModelStatus(`${modelId} loaded successfully!`, 'success');
205
- displayMessage('system', `[SUCCESS] ${modelId} loaded.`, false);
 
206
 
207
  } catch (error) {
208
- console.error(`Model loading failed for ${modelId} (Strict Attempt):`, error);
209
  let errorMsg = `Failed to load ${modelId}: ${error.message}.`;
210
- // Provide specific feedback based on likely errors
211
  if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) {
212
- errorMsg += " The 'gemma3_text' model type is likely unsupported by this library version.";
213
  } else if (error.message.includes("split is not a function")) {
214
- errorMsg += " A TypeError occurred, likely due to config parsing incompatibility.";
215
  } else {
216
- errorMsg += " Unknown error. Check console/network/memory.";
217
  }
218
  updateModelStatus(errorMsg, 'error');
219
  displayMessage('system', `[ERROR] ${errorMsg}`, true, true);
@@ -221,46 +228,46 @@
221
  }
222
  }
223
 
224
- // Build messages array as per documentation example
225
  function buildMessages(newUserMessage) {
226
  let messages = [{ role: "system", content: "You are a helpful assistant." }];
227
  messages = messages.concat(conversationHistory);
228
  messages.push({ role: "user", content: newUserMessage });
229
- console.log("Input Messages for Pipeline:", messages);
230
- return messages;
231
  }
232
 
233
- // Cleanup response as per documentation example structure
234
  function cleanupResponse(output) {
235
- try {
236
- if (output && output.length > 0 && output[0].generated_text && Array.isArray(output[0].generated_text)) {
237
- const lastMessage = output[0].generated_text.at(-1);
238
- if (lastMessage && (lastMessage.role === 'assistant' || lastMessage.role === 'model') && typeof lastMessage.content === 'string') {
239
- let content = lastMessage.content.trim();
240
- content = content.replace(/<end_of_turn>/g, '').trim();
241
- if (content.length > 0) return content;
242
- }
243
- }
244
- } catch (e) { console.error("Error parsing generator output with .at(-1):", e, "Output:", output); }
245
- console.warn("Could not extract response using output[0].generated_text.at(-1).content. Output structure might differ or generation failed.", output);
246
- const fallbacks = [ "Sorry, response format was unexpected.", "My response might be garbled.", "Error processing the AI answer." ];
247
- return fallbacks[Math.floor(Math.random() * fallbacks.length)];
248
  }
249
 
 
250
  // --- Main Interaction Logic ---
251
  async function handleUserMessage() {
252
  const userText = userInput.value.trim();
253
- if (!userText || !generator || isLoadingModel) return;
254
  userInput.value = ''; userInput.style.height = 'auto'; updateChatUIState(true);
255
  displayMessage('user', userText); conversationHistory.push({ role: 'user', content: userText });
256
  updateModelStatus("AI thinking...", "loading");
257
  const messages = buildMessages(userText); // Use messages format
258
 
259
  try {
260
- // Call generator EXACTLY as in the example
261
  const outputs = await generator(messages, {
262
  max_new_tokens: 512,
263
- do_sample: false // From example
 
 
264
  });
265
  const replyText = cleanupResponse(outputs);
266
  console.log("Cleaned AI Output:", replyText);
@@ -270,18 +277,18 @@
270
  } catch (error) {
271
  console.error("AI response generation error:", error); displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true);
272
  } finally {
273
- if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success");
274
  updateChatUIState(generator !== null); userInput.focus();
275
  }
276
  }
277
 
278
  // --- Speech API Functions ---
279
- function initializeSpeechAPI() { /* No changes needed */
280
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); } if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); } updateChatUIState(false);
281
  }
282
- function loadVoices() { /* No changes needed */ if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
283
- function findAndSetVoice(voices) { /* No changes needed */ targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
284
- function speakText(text) { /* No changes needed */ if (!synthesis || !botState.botSettings.useSpeechOutput || !targetVoice) return; synthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.voice = targetVoice; utterance.lang = targetVoice.lang; utterance.rate = 1.0; utterance.pitch = 1.0; synthesis.speak(utterance); }
285
 
286
  // --- Event Listeners ---
287
  sendButton.addEventListener('click', handleUserMessage);
 
3
  <head>
4
  <meta charset="UTF-8">
5
  <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no">
6
+ <title>AI Assistant (Gemma 3 1B - Quantization Select)</title>
7
  <style>
8
  /* CSS๋Š” ์ด์ „๊ณผ ๋™์ผ */
9
  @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300;400;500;700&display=swap');
 
23
  * { box-sizing: border-box; margin: 0; padding: 0; }
24
  html { height: 100%; }
25
  body { font-family: 'Roboto', sans-serif; display: flex; flex-direction: column; align-items: center; justify-content: flex-start; min-height: 100vh; background-color: var(--bg-color); color: var(--text-color); padding: 10px; overscroll-behavior: none; }
26
+ #control-panel { background: var(--header-bg); padding: 15px; border-radius: 8px; margin-bottom: 10px; box-shadow: var(--header-shadow); width: 100%; max-width: 600px; border: 1px solid var(--border-color); }
27
+ #control-panel h2 { font-size: 1.1em; margin-bottom: 10px; color: var(--primary-color); font-weight: 500; text-align: center; }
28
+ .option-group { margin-bottom: 10px; text-align: center; }
29
+ .option-group label { margin-right: 15px; font-size: 0.95em; }
30
+ .option-group select { padding: 5px 8px; border-radius: 4px; border: 1px solid var(--input-border); font-size: 0.95em; }
31
+ #loadModelButton { padding: 10px 20px; font-size: 1em; background-color: var(--primary-color); color: white; border: none; border-radius: 5px; cursor: pointer; transition: background-color 0.2s; margin-top: 10px; display: block; margin-left: auto; margin-right: auto; }
32
  #loadModelButton:hover:not(:disabled) { background-color: var(--button-hover-bg); }
33
  #loadModelButton:disabled { background-color: var(--button-disabled-bg); cursor: not-allowed; }
34
+ #model-status { font-size: 0.9em; padding: 10px; border-radius: 4px; text-align: center; min-height: 40px; line-height: 1.4; margin-top: 10px; }
35
  #model-status.info { background-color: #e2e3e5; border: 1px solid #d6d8db; color: #383d41; }
36
  #model-status.loading { background-color: var(--warning-bg); border: 1px solid var(--warning-border); color: var(--warning-color); }
37
  #model-status.success { background-color: var(--success-bg); border: 1px solid var(--success-border); color: var(--success-color); }
38
  #model-status.error { background-color: var(--error-bg); border: 1px solid var(--error-border); color: var(--error-color); }
39
+
40
+ #chat-container { width: 100%; max-width: 600px; height: 70vh; /* Adjusted height */ max-height: 650px; background-color: #ffffff; border-radius: 12px; box-shadow: var(--container-shadow); display: flex; flex-direction: column; overflow: hidden; border: 1px solid var(--border-color); }
41
  h1 { text-align: center; color: var(--primary-color); padding: 15px; background-color: var(--header-bg); border-bottom: 1px solid var(--border-color); font-size: 1.2em; font-weight: 500; flex-shrink: 0; box-shadow: var(--header-shadow); position: relative; z-index: 10; }
42
  #chatbox { flex-grow: 1; overflow-y: auto; padding: 15px; display: flex; flex-direction: column; gap: 12px; scrollbar-width: thin; scrollbar-color: var(--scrollbar-thumb) var(--scrollbar-track); background-color: var(--bg-color); }
43
  #chatbox::-webkit-scrollbar { width: 6px; } #chatbox::-webkit-scrollbar-track { background: var(--scrollbar-track); border-radius: 3px; } #chatbox::-webkit-scrollbar-thumb { background-color: var(--scrollbar-thumb); border-radius: 3px; }
 
58
  #toggleSpeakerButton.muted { background-color: #aaa; }
59
  @media (max-width: 600px) { /* Responsive styles */
60
  body { padding: 5px; justify-content: flex-start; } #control-panel { margin-bottom: 5px; padding: 12px; }
61
+ .option-group { text-align: left; } .option-group label { display: block; margin-bottom: 8px; margin-right: 0; }
62
  #chat-container { width: 100%; height: auto; flex-grow: 1; border-radius: 12px; max-height: none; margin-bottom: 5px; }
63
  h1 { font-size: 1.1em; padding: 12px; } #chatbox { padding: 12px 8px; gap: 10px; }
64
  #messages div { max-width: 90%; font-size: 0.95em; padding: 9px 14px;}
 
66
  .control-button { width: 40px; height: 40px; font-size: 1.2em; }
67
  }
68
  </style>
 
69
  <script type="importmap">
70
+ { "imports": { "@xenova/transformers": "https://cdn.jsdelivr.net/npm/@xenova/transformers@latest" } }
 
 
 
 
71
  </script>
72
  </head>
73
  <body>
74
  <div id="control-panel">
75
  <h2>Model Loader</h2>
76
+ <div class="option-group">
77
+ <label for="quantization-select">Select Quantization (dtype):</label>
78
+ <select id="quantization-select">
79
+ <option value="q4" selected>Q4 (Recommended by Doc)</option>
80
+ <option value="fp16">FP16 (Half Precision)</option>
81
+ <option value="int8">INT8 (Integer Quantized)</option>
82
+ <!-- Add other dtypes if supported/tested, e.g., 'fp32' -->
83
+ <option value="">Default (No dtype specified)</option>
84
+ </select>
85
+ </div>
86
+ <button id="loadModelButton">Load Gemma 3 1B Model</button>
87
+ <div id="model-status" class="info">Select quantization and click load. **Warning:** Loading `gemma3_text` models is expected to fail due to library incompatibility.</div>
88
  </div>
89
 
90
  <div id="chat-container">
91
  <h1 id="chatbot-name">AI Assistant</h1>
92
  <div id="chatbox">
93
+ <div id="messages"></div>
 
 
94
  </div>
95
  <div id="input-area">
96
+ <textarea id="userInput" placeholder="Please load a model first..." rows="1" disabled></textarea>
97
  <button id="speechButton" class="control-button" title="Speak message" disabled>๐ŸŽค</button>
98
  <button id="toggleSpeakerButton" class="control-button" title="Toggle AI speech output" disabled>๐Ÿ”Š</button>
99
  <button id="sendButton" class="control-button" title="Send message" disabled>โžค</button>
 
103
  <script type="module">
104
  import { pipeline, env } from '@xenova/transformers';
105
 
106
+ // Using the GQA version as consistently referred to in user prompts
107
+ const MODEL_NAME = 'onnx-community/gemma-3-1b-it-ONNX-GQA';
108
  const TASK = 'text-generation';
 
109
 
110
  // Environment setup
111
  env.allowRemoteModels = true;
 
123
  const toggleSpeakerButton = document.getElementById('toggleSpeakerButton');
124
  const modelStatus = document.getElementById('model-status');
125
  const loadModelButton = document.getElementById('loadModelButton');
126
+ const quantizationSelect = document.getElementById('quantization-select'); // Dropdown
127
 
128
  // State
129
  let generator = null;
130
  let isLoadingModel = false;
131
+ let conversationHistory = []; // Stores { role: 'user' | 'assistant', content: '...' }
132
  let botState = { botName: "AI Assistant", userName: "User", botSettings: { useSpeechOutput: true } };
133
+ const stateKey = 'gemma3_1b_quant_state_v1';
134
+ const historyKey = 'gemma3_1b_quant_history_v1';
135
 
136
  // Speech API
137
  let recognition = null;
 
141
 
142
  // --- Initialization ---
143
  window.addEventListener('load', () => {
144
+ loadState(); chatbotNameElement.textContent = botState.botName;
145
+ updateSpeakerButtonUI(); initializeSpeechAPI(); setupInputAutosize();
146
+ updateChatUIState(false); displayHistory(); setTimeout(loadVoices, 500);
 
 
 
 
 
147
  loadModelButton.addEventListener('click', handleLoadModelClick);
148
+ console.log("Using latest Transformers.js. Ready to load model.");
149
+ displayMessage('system', `Ready to load ${MODEL_NAME}. Select quantization type.`, false);
150
  });
151
 
152
  // --- State Persistence ---
153
+ function loadState() { /* No changes */
154
  const savedState = localStorage.getItem(stateKey); if (savedState) { try { const loaded = JSON.parse(savedState); botState = { ...botState, ...loaded, botSettings: { ...botState.botSettings, ...(loaded.botSettings || {}) } }; } catch(e) {} }
155
  const savedHistory = localStorage.getItem(historyKey); if (savedHistory) { try { conversationHistory = JSON.parse(savedHistory); if (!Array.isArray(conversationHistory)) conversationHistory = []; } catch(e) { conversationHistory = []; } }
156
  }
157
+ function saveState() { /* No changes */
158
+ localStorage.setItem(stateKey, JSON.stringify(botState)); localStorage.setItem(historyKey, JSON.stringify(conversationHistory));
 
159
  }
160
+ function displayHistory() { /* No changes */
161
  chatbox.innerHTML = ''; conversationHistory.forEach(msg => { if (msg.role === 'user' || msg.role === 'assistant') { displayMessage(msg.role === 'user' ? 'user' : 'bot', msg.content, false); } });
162
  }
163
 
164
  // --- UI Update Functions ---
165
+ function displayMessage(sender, text, animate = true, isError = false) { /* No changes */
166
  const messageDiv = document.createElement('div'); let messageClass = sender === 'user' ? 'user-message' : sender === 'bot' ? 'bot-message' : 'system-message'; if (sender === 'system' && isError) messageClass = 'error-message'; messageDiv.classList.add(messageClass); if (!animate) messageDiv.style.animation = 'none'; text = text.replace(/</g, "<").replace(/>/g, ">"); text = text.replace(/\[(.*?)\]\((.*?)\)/g, '<a href="$2" target="_blank" rel="noopener noreferrer">$1</a>'); text = text.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\*(.*?)\*/g, '<em>$1</em>'); text = text.replace(/\n/g, '<br>'); messageDiv.innerHTML = text; chatbox.appendChild(messageDiv); chatbox.scrollTo({ top: chatbox.scrollHeight, behavior: animate ? 'smooth' : 'auto' });
167
  }
168
+ function updateModelStatus(message, type = 'info') { /* No changes */
169
  modelStatus.textContent = message; modelStatus.className = 'model-status ' + type; console.log(`Model Status (${type}): ${message}`);
170
  }
171
+ function updateChatUIState(isModelLoadedSuccessfully) { /* No changes */
172
+ userInput.disabled = !isModelLoadedSuccessfully || isLoadingModel; sendButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || userInput.value.trim() === ''; speechButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || isListening || !recognition; toggleSpeakerButton.disabled = !isModelLoadedSuccessfully || isLoadingModel || !synthesis; loadModelButton.disabled = isLoadingModel || isModelLoadedSuccessfully; quantizationSelect.disabled = isLoadingModel || isModelLoadedSuccessfully; if (isModelLoadedSuccessfully) { userInput.placeholder = "How can I help you today?"; } else if (isLoadingModel) { userInput.placeholder = "Model loading..."; } else { userInput.placeholder = "Please load a model first..."; }
173
  }
174
+ function updateSpeakerButtonUI() { /* No changes */
175
  toggleSpeakerButton.textContent = botState.botSettings.useSpeechOutput ? '๐Ÿ”Š' : '๐Ÿ”‡'; toggleSpeakerButton.title = botState.botSettings.useSpeechOutput ? 'Turn off AI speech' : 'Turn on AI speech'; toggleSpeakerButton.classList.toggle('muted', !botState.botSettings.useSpeechOutput);
176
  }
177
  function showSpeechStatus(message) { console.log("Speech Status:", message); }
 
181
  async function handleLoadModelClick() {
182
  if (isLoadingModel || generator) return;
183
  isLoadingModel = true; generator = null;
184
+ const selectedQuantization = quantizationSelect.value || null; // Get selected dtype, null if empty string
185
  updateChatUIState(false);
186
+ await initializeModel(MODEL_NAME, selectedQuantization); // Pass selected dtype
 
187
  isLoadingModel = false;
188
  updateChatUIState(generator !== null);
189
  }
190
 
191
+ // Initialize model with selected quantization type (dtype)
192
+ async function initializeModel(modelId, quantizationType) {
193
+ let options = {
194
+ progress_callback: (progress) => {
195
+ const msg = `[Loading ${quantizationType || 'Default'}]: ${progress.status}] ${progress.file ? progress.file.split('/').pop() : ''} (${Math.round(progress.progress || 0)}%)`;
196
+ updateModelStatus(msg, 'loading');
197
+ }
198
+ };
199
+ if (quantizationType) {
200
+ options.dtype = quantizationType; // Set dtype only if selected
201
+ }
202
+
203
+ const loadingMsg = `Loading ${modelId} ${quantizationType ? `with dtype: "${quantizationType}"` : '(default dtype)'}... (Still expected to fail)`;
204
+ updateModelStatus(loadingMsg, 'loading');
205
+ displayMessage('system', `Attempting to load ${modelId} with ${quantizationType ? `dtype: ${quantizationType}` : 'default dtype'}...`, false);
206
 
207
  try {
208
+ // Create pipeline with selected options
209
+ generator = await pipeline(TASK, modelId, options);
 
 
 
 
 
 
210
 
211
+ // UNLIKELY TO REACH HERE
212
+ updateModelStatus(`${modelId} (dtype: ${quantizationType || 'auto'}) loaded successfully!`, 'success');
213
+ displayMessage('system', `[SUCCESS] ${modelId} loaded with ${quantizationType || 'auto'} dtype.`, false);
214
 
215
  } catch (error) {
216
+ console.error(`Model loading failed for ${modelId} (dtype: ${quantizationType || 'auto'}):`, error);
217
  let errorMsg = `Failed to load ${modelId}: ${error.message}.`;
 
218
  if (error.message.includes("Unsupported model type") || error.message.includes("gemma3_text")) {
219
+ errorMsg += " The 'gemma3_text' model type is likely unsupported regardless of dtype.";
220
  } else if (error.message.includes("split is not a function")) {
221
+ errorMsg += " TypeError during config parsing, likely due to unsupported type.";
222
  } else {
223
+ errorMsg += " Check console/network/memory.";
224
  }
225
  updateModelStatus(errorMsg, 'error');
226
  displayMessage('system', `[ERROR] ${errorMsg}`, true, true);
 
228
  }
229
  }
230
 
231
+ // Build messages array (same as before)
232
  function buildMessages(newUserMessage) {
233
  let messages = [{ role: "system", content: "You are a helpful assistant." }];
234
  messages = messages.concat(conversationHistory);
235
  messages.push({ role: "user", content: newUserMessage });
236
+ console.log("Input Messages:", messages); return messages;
 
237
  }
238
 
239
+ // Cleanup response (same as before, assuming messages format output)
240
  function cleanupResponse(output) {
241
+ try {
242
+ if (output && output.length > 0 && output[0].generated_text && Array.isArray(output[0].generated_text)) {
243
+ const lastMessage = output[0].generated_text.at(-1);
244
+ if (lastMessage && (lastMessage.role === 'assistant' || lastMessage.role === 'model') && typeof lastMessage.content === 'string') {
245
+ let content = lastMessage.content.trim(); content = content.replace(/<end_of_turn>/g, '').trim(); if (content.length > 0) return content;
246
+ }
247
+ }
248
+ } catch (e) { console.error("Error parsing output:", e, output); }
249
+ console.warn("Could not extract response using standard structure.", output);
250
+ const fallbacks = [ "Sorry, response format was unexpected.", "My response might be garbled.", "Error processing the AI answer." ];
251
+ return fallbacks[Math.floor(Math.random() * fallbacks.length)];
 
 
252
  }
253
 
254
+
255
  // --- Main Interaction Logic ---
256
  async function handleUserMessage() {
257
  const userText = userInput.value.trim();
258
+ if (!userText || !generator || isLoadingModel) return; // Check if generator is ready
259
  userInput.value = ''; userInput.style.height = 'auto'; updateChatUIState(true);
260
  displayMessage('user', userText); conversationHistory.push({ role: 'user', content: userText });
261
  updateModelStatus("AI thinking...", "loading");
262
  const messages = buildMessages(userText); // Use messages format
263
 
264
  try {
265
+ // Call generator with messages array and parameters
266
  const outputs = await generator(messages, {
267
  max_new_tokens: 512,
268
+ do_sample: true, // Example: enable sampling
269
+ temperature: 0.7,
270
+ top_k: 50
271
  });
272
  const replyText = cleanupResponse(outputs);
273
  console.log("Cleaned AI Output:", replyText);
 
277
  } catch (error) {
278
  console.error("AI response generation error:", error); displayMessage('system', `[ERROR] Failed to generate response: ${error.message}`, true, true);
279
  } finally {
280
+ if(generator) updateModelStatus(`${MODEL_NAME} ready.`, "success"); // Assuming MODEL_NAME hasn't changed
281
  updateChatUIState(generator !== null); userInput.focus();
282
  }
283
  }
284
 
285
  // --- Speech API Functions ---
286
+ function initializeSpeechAPI() { /* No changes */
287
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; if (SpeechRecognition) { recognition = new SpeechRecognition(); recognition.lang = 'en-US'; recognition.continuous = false; recognition.interimResults = false; recognition.onstart = () => { isListening = true; updateChatUIState(generator !== null); console.log('Listening...'); }; recognition.onresult = (event) => { userInput.value = event.results[0][0].transcript; userInput.dispatchEvent(new Event('input')); handleUserMessage(); }; recognition.onerror = (event) => { console.error("Speech error:", event.error); updateModelStatus(`Speech recognition error (${event.error})`, 'error'); setTimeout(() => updateModelStatus(generator ? `${MODEL_NAME} ready.` : 'Model not loaded.', generator ? 'success' : 'error'), 3000); }; recognition.onend = () => { isListening = false; updateChatUIState(generator !== null); console.log('Stopped listening.'); }; } else { console.warn("Speech Recognition not supported."); } if (!synthesis) { console.warn("Speech Synthesis not supported."); } else { toggleSpeakerButton.addEventListener('click', () => { botState.botSettings.useSpeechOutput = !botState.botSettings.useSpeechOutput; updateSpeakerButtonUI(); saveState(); if (!botState.botSettings.useSpeechOutput) synthesis.cancel(); }); } updateChatUIState(false);
288
  }
289
+ function loadVoices() { /* No changes */ if (!synthesis) return; let voices = synthesis.getVoices(); if (voices.length === 0) { synthesis.onvoiceschanged = () => { voices = synthesis.getVoices(); findAndSetVoice(voices); }; } else { findAndSetVoice(voices); } }
290
+ function findAndSetVoice(voices) { /* No changes */ targetVoice = voices.find(v => v.lang === 'en-US') || voices.find(v => v.lang.startsWith('en-')); if (targetVoice) { console.log("Using English voice:", targetVoice.name, targetVoice.lang); } else { console.warn("No suitable English voice found."); } }
291
+ function speakText(text) { /* No changes */ if (!synthesis || !botState.botSettings.useSpeechOutput || !targetVoice) return; synthesis.cancel(); const utterance = new SpeechSynthesisUtterance(text); utterance.voice = targetVoice; utterance.lang = targetVoice.lang; utterance.rate = 1.0; utterance.pitch = 1.0; synthesis.speak(utterance); }
292
 
293
  // --- Event Listeners ---
294
  sendButton.addEventListener('click', handleUserMessage);