Update index.html
Browse files- index.html +7 -12
index.html
CHANGED
@@ -140,7 +140,7 @@
|
|
140 |
const device = document.getElementById("device").value;
|
141 |
const imageUrl = document.getElementById("image-url").value;
|
142 |
const maxTokens = parseInt(document.getElementById("max-tokens").value) || 32;
|
143 |
-
const numRuns = parseInt(document.getElementById("num-runs").value) ||
|
144 |
const doImageSplitting = document.getElementById("do-split").checked;
|
145 |
|
146 |
const dtypeSettings = { decoder: decoder_dtype, embed: embed_dtype, vision: vision_dtype };
|
@@ -183,28 +183,23 @@
|
|
183 |
const [processor, model] = await SmolVLM.getInstance(modelId, dtypeSettings, device);
|
184 |
const text = processor.apply_chat_template(messages, { add_generation_prompt: true });
|
185 |
const inputs = await processor(text, [image], { do_image_splitting: doImageSplitting });
|
186 |
-
|
187 |
-
|
188 |
-
let startTime;
|
189 |
-
let tps = 0;
|
190 |
-
const token_callback_function = () => {
|
191 |
-
startTime = startTime || performance.now();
|
192 |
-
tps = (numTokens++ / (performance.now() - startTime)) * 1000;
|
193 |
-
};
|
194 |
const streamer = new TextStreamer(processor.tokenizer, {
|
195 |
skip_prompt: true,
|
196 |
skip_special_tokens: true,
|
197 |
-
token_callback_function,
|
198 |
});
|
199 |
-
|
200 |
await model.generate({
|
201 |
...inputs,
|
202 |
max_new_tokens: maxTokens,
|
203 |
min_new_tokens: maxTokens,
|
204 |
streamer,
|
205 |
});
|
|
|
|
|
|
|
|
|
206 |
|
207 |
-
const elapsed = performance.now() - start;
|
208 |
totalTime += elapsed;
|
209 |
totalTps += tps;
|
210 |
runsResults.push({
|
|
|
140 |
const device = document.getElementById("device").value;
|
141 |
const imageUrl = document.getElementById("image-url").value;
|
142 |
const maxTokens = parseInt(document.getElementById("max-tokens").value) || 32;
|
143 |
+
const numRuns = parseInt(document.getElementById("num-runs").value) || 3;
|
144 |
const doImageSplitting = document.getElementById("do-split").checked;
|
145 |
|
146 |
const dtypeSettings = { decoder: decoder_dtype, embed: embed_dtype, vision: vision_dtype };
|
|
|
183 |
const [processor, model] = await SmolVLM.getInstance(modelId, dtypeSettings, device);
|
184 |
const text = processor.apply_chat_template(messages, { add_generation_prompt: true });
|
185 |
const inputs = await processor(text, [image], { do_image_splitting: doImageSplitting });
|
186 |
+
|
187 |
+
const start = performance.now();
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
const streamer = new TextStreamer(processor.tokenizer, {
|
189 |
skip_prompt: true,
|
190 |
skip_special_tokens: true,
|
|
|
191 |
});
|
|
|
192 |
await model.generate({
|
193 |
...inputs,
|
194 |
max_new_tokens: maxTokens,
|
195 |
min_new_tokens: maxTokens,
|
196 |
streamer,
|
197 |
});
|
198 |
+
const end = performance.now();
|
199 |
+
const elapsed = end - start;
|
200 |
+
|
201 |
+
const tps = maxTokens / (elapsed / 1000); // accurate TPS
|
202 |
|
|
|
203 |
totalTime += elapsed;
|
204 |
totalTps += tps;
|
205 |
runsResults.push({
|