Sarah Ciston
commited on
Commit
·
473e493
1
Parent(s):
1cff79d
try inference with Oauth
Browse files
sketch.js
CHANGED
@@ -1,10 +1,24 @@
|
|
1 |
// connect to API via module
|
2 |
|
3 |
// import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
|
4 |
-
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
// import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
6 |
// import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
|
7 |
-
|
|
|
8 |
|
9 |
// PIPELINE MODELS
|
10 |
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
|
@@ -12,7 +26,7 @@ import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers
|
|
12 |
|
13 |
|
14 |
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
15 |
-
env.allowLocalModels = false;
|
16 |
|
17 |
///////// VARIABLES
|
18 |
|
@@ -157,72 +171,82 @@ new p5(function (p5) {
|
|
157 |
|
158 |
///// MODEL STUFF
|
159 |
|
160 |
-
async function runModel(PREPROMPT, PROMPT){
|
161 |
-
|
162 |
|
163 |
-
|
164 |
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
|
176 |
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
|
199 |
-
|
200 |
|
201 |
-
|
202 |
-
|
203 |
|
204 |
-
|
205 |
-
|
206 |
-
}
|
207 |
|
|
|
208 |
|
|
|
209 |
// inference API version, not working in spaces
|
210 |
-
// const out = await inference.chatCompletion({
|
211 |
-
// model: MODELNAME,
|
212 |
-
// messages: [{ role: "user", content: PREPROMPT + PROMPT }],
|
213 |
-
// max_tokens: 100
|
214 |
-
// });
|
215 |
|
216 |
-
//
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
|
218 |
-
|
219 |
|
220 |
-
//
|
221 |
-
// // var modelResult = await out[0].generated_text
|
222 |
-
// console.log(modelResult);
|
223 |
|
224 |
-
|
|
|
|
|
225 |
|
|
|
|
|
226 |
|
227 |
|
228 |
//inference.fill_mask({
|
|
|
1 |
// connect to API via module
|
2 |
|
3 |
// import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
|
4 |
+
// import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
|
5 |
+
import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub';
|
6 |
+
|
7 |
+
const oauthResult = await oauthHandleRedirectIfPresent();
|
8 |
+
|
9 |
+
if (!oauthResult) {
|
10 |
+
// If the user is not logged in, redirect to the login page
|
11 |
+
window.location.href = await oauthLoginUrl();
|
12 |
+
}
|
13 |
+
|
14 |
+
// You can use oauthResult.accessToken, oauthResult.accessTokenExpiresAt and oauthResult.userInfo
|
15 |
+
console.log(oauthResult);
|
16 |
+
const HF_TOKEN = oauthResult
|
17 |
+
|
18 |
// import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
19 |
// import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
|
20 |
+
import { HfInference } from 'https://esm.sh/@huggingface/inference';
|
21 |
+
const inference = new HfInference(HF_TOKEN);
|
22 |
|
23 |
// PIPELINE MODELS
|
24 |
// models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
|
|
|
26 |
|
27 |
|
28 |
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
29 |
+
// env.allowLocalModels = false;
|
30 |
|
31 |
///////// VARIABLES
|
32 |
|
|
|
171 |
|
172 |
///// MODEL STUFF
|
173 |
|
174 |
+
// async function runModel(PREPROMPT, PROMPT){
|
175 |
+
// // // Chat completion API
|
176 |
|
177 |
+
// let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
|
178 |
|
179 |
+
// // 'meta-llama/Meta-Llama-3-70B-Instruct'
|
180 |
+
// // 'openai-community/gpt2'
|
181 |
+
// // 'Xenova/gpt-3.5-turbo'
|
182 |
+
// // , 'Xenova/distilgpt2'
|
183 |
+
// // 'mistralai/Mistral-7B-Instruct-v0.2'
|
184 |
+
// // 'HuggingFaceH4/zephyr-7b-beta'
|
185 |
|
186 |
+
// // pipeline/transformers version
|
187 |
+
// let pipe = await pipeline('text-generation', MODELNAME);
|
188 |
+
// // seems to work with default model distilgpt2 ugh
|
189 |
|
190 |
|
191 |
+
// // let out = await pipe(inputText, {
|
192 |
+
// // max_tokens: 250,
|
193 |
+
// // return_full_text: false
|
194 |
+
// // // repetition_penalty: 1.5,
|
195 |
+
// // // num_return_sequences: 1 //must be 1 for greedy search
|
196 |
+
// // })
|
197 |
+
|
198 |
+
// // let inputText = PREPROMPT + PROMPT
|
199 |
+
|
200 |
+
// // let out = await pipe(inputText)
|
201 |
+
|
202 |
+
// let out = await pipe({
|
203 |
+
// messages: [{
|
204 |
+
// role: "system",
|
205 |
+
// content: PREPROMPT
|
206 |
+
// },{
|
207 |
+
// role: "user",
|
208 |
+
// content: PROMPT
|
209 |
+
// }],
|
210 |
+
// max_new_tokens: 100
|
211 |
+
// });
|
212 |
|
213 |
+
// console.log(out)
|
214 |
|
215 |
+
// var modelResult = await out[0].generated_text
|
216 |
+
// console.log(modelResult)
|
217 |
|
218 |
+
// return modelResult
|
|
|
|
|
219 |
|
220 |
+
// }
|
221 |
|
222 |
+
async function runModel(PREPROMPT, PROMPT){
|
223 |
// inference API version, not working in spaces
|
|
|
|
|
|
|
|
|
|
|
224 |
|
225 |
+
// let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
|
226 |
+
let MODELNAME = "mistralai/Mistral-7B-Instruct-v0.2"
|
227 |
+
|
228 |
+
let out = await inference.chatCompletion({
|
229 |
+
model: MODELNAME,
|
230 |
+
messages: [{
|
231 |
+
role: "system",
|
232 |
+
content: PREPROMPT
|
233 |
+
},{
|
234 |
+
role: "user",
|
235 |
+
content: PROMPT
|
236 |
+
}],
|
237 |
+
max_tokens: 100
|
238 |
+
});
|
239 |
|
240 |
+
console.log(out)
|
241 |
|
242 |
+
// modelResult = await out.messages[0].content
|
|
|
|
|
243 |
|
244 |
+
var modelResult = await out.choices[0].message.content
|
245 |
+
// var modelResult = await out[0].generated_text
|
246 |
+
console.log(modelResult);
|
247 |
|
248 |
+
return modelResult
|
249 |
+
}
|
250 |
|
251 |
|
252 |
//inference.fill_mask({
|