File size: 3,232 Bytes
c2d5ffe 11f5144 fac66ea c2d5ffe 11f5144 c2d5ffe 11f5144 c2d5ffe fac66ea c2d5ffe fac66ea 11f5144 c2d5ffe fac66ea 11f5144 c2d5ffe fac66ea 11f5144 fac66ea c2d5ffe fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 fac66ea 11f5144 c2d5ffe fac66ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
const { createApp, ref, onMounted, computed } = Vue;
import { HfInference } from "https://cdn.skypack.dev/@huggingface/inference@latest";
const app = createApp({
setup() {
const token = ref(localStorage.getItem("token") || "");
const userPrompt = ref("Write about the difference between AI Engineering");
const currentGeneratedText = ref("");
const models = ref(["mistralai/Mistral-7B-v0.1",
"google/flan-t5-xxl"]);
const selectedModel = ref("mistralai/Mistral-7B-v0.1");
const isRunning = ref(false);
const responseLength = ref("150");
const temperature = ref("1.0");
const generating = ref(false);
let controller;
const statusMessage = computed(() => {
if (generating.value) return "Generating..."
return "Ready"
})
const createTextGenerationStream = (hfInstance, prompt, abortControllerSignal) => {
return hfInstance.textGenerationStream(
{
model: selectedModel.value,
inputs: prompt,
parameters: {
max_new_tokens: parseInt(responseLength.value),
temperature: parseFloat(temperature.value),
},
},
{
use_cache: false,
signal: abortControllerSignal,
}
);
};
const generateTextStream = async function* (hfInstance, abortSignal, prompt) {
let generatedText = ""
generating.value = true
for await (const output of createTextGenerationStream(hfInstance, prompt, abortSignal)) {
generatedText += output.token.text;
yield generatedText;
}
generating.value = false
};
const run = async () => {
isRunning.value = true;
currentGeneratedText.value = "";
controller = new AbortController();
localStorage.setItem("token", token.value);
const hfInstance = new HfInference(token.value);
try {
for await (const textStream of generateTextStream(
hfInstance,
controller.signal,
userPrompt.value
)) {
currentGeneratedText.value = textStream;
}
} catch (e) {
console.log(e);
}
};
const stop = () => {
if (controller) {
controller.abort();
}
isRunning.value = false;
};
onMounted(async () => {
const localStorageToken = localStorage.getItem("token")
if (localStorageToken) {
token.value = localStorageToken;
}
});
return {
token,
userPrompt,
currentGeneratedText,
run,
stop,
models,
selectedModel,
isRunning,
responseLength,
temperature,
statusMessage
};
},
});
app.mount("#app");
|