Sarah Ciston commited on
Commit
473e493
·
1 Parent(s): 1cff79d

try inference with Oauth

Browse files
Files changed (1) hide show
  1. sketch.js +77 -53
sketch.js CHANGED
@@ -1,10 +1,24 @@
1
  // connect to API via module
2
 
3
  // import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
4
- import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
6
  // import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
7
- // const inference = new HfInference();
 
8
 
9
  // PIPELINE MODELS
10
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
@@ -12,7 +26,7 @@ import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers
12
 
13
 
14
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
15
- env.allowLocalModels = false;
16
 
17
  ///////// VARIABLES
18
 
@@ -157,72 +171,82 @@ new p5(function (p5) {
157
 
158
  ///// MODEL STUFF
159
 
160
- async function runModel(PREPROMPT, PROMPT){
161
- // // Chat completion API
162
 
163
- let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
164
 
165
- // 'meta-llama/Meta-Llama-3-70B-Instruct'
166
- // 'openai-community/gpt2'
167
- // 'Xenova/gpt-3.5-turbo'
168
- // , 'Xenova/distilgpt2'
169
- // 'mistralai/Mistral-7B-Instruct-v0.2'
170
- // 'HuggingFaceH4/zephyr-7b-beta'
171
 
172
- // pipeline/transformers version
173
- let pipe = await pipeline('text-generation', MODELNAME);
174
- // seems to work with default model distilgpt2 ugh
175
 
176
 
177
- // let out = await pipe(inputText, {
178
- // max_tokens: 250,
179
- // return_full_text: false
180
- // // repetition_penalty: 1.5,
181
- // // num_return_sequences: 1 //must be 1 for greedy search
182
- // })
183
-
184
- // let inputText = PREPROMPT + PROMPT
185
-
186
- // let out = await pipe(inputText)
187
-
188
- let out = await pipe({
189
- messages: [{
190
- role: "system",
191
- content: PREPROMPT
192
- },{
193
- role: "user",
194
- content: PROMPT
195
- }],
196
- max_new_tokens: 100
197
- });
198
 
199
- console.log(out)
200
 
201
- var modelResult = await out[0].generated_text
202
- console.log(modelResult)
203
 
204
- return modelResult
205
-
206
- }
207
 
 
208
 
 
209
  // inference API version, not working in spaces
210
- // const out = await inference.chatCompletion({
211
- // model: MODELNAME,
212
- // messages: [{ role: "user", content: PREPROMPT + PROMPT }],
213
- // max_tokens: 100
214
- // });
215
 
216
- // console.log(out)
 
 
 
 
 
 
 
 
 
 
 
 
 
217
 
218
- // // modelResult = await out.messages[0].content
219
 
220
- // var modelResult = await out.choices[0].message.content
221
- // // var modelResult = await out[0].generated_text
222
- // console.log(modelResult);
223
 
224
- // return modelResult
 
 
225
 
 
 
226
 
227
 
228
  //inference.fill_mask({
 
1
  // connect to API via module
2
 
3
  // import { AutoTokenizer, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
4
+ // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers';
5
+ import { oauthLoginUrl, oauthHandleRedirectIfPresent } from 'https://esm.sh/@huggingface/hub';
6
+
7
+ const oauthResult = await oauthHandleRedirectIfPresent();
8
+
9
+ if (!oauthResult) {
10
+ // If the user is not logged in, redirect to the login page
11
+ window.location.href = await oauthLoginUrl();
12
+ }
13
+
14
+ // You can use oauthResult.accessToken, oauthResult.accessTokenExpiresAt and oauthResult.userInfo
15
+ console.log(oauthResult);
16
+ const HF_TOKEN = oauthResult
17
+
18
  // import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
19
  // import { HfInference } from 'https://cdn.jsdelivr.net/npm/@huggingface/[email protected]/+esm';
20
+ import { HfInference } from 'https://esm.sh/@huggingface/inference';
21
+ const inference = new HfInference(HF_TOKEN);
22
 
23
  // PIPELINE MODELS
24
  // models('Xenova/gpt2', 'Xenova/gpt-3.5-turbo', 'mistralai/Mistral-7B-Instruct-v0.2', 'Xenova/llama-68m', 'meta-llama/Meta-Llama-3-8B', 'Xenova/bloom-560m', 'Xenova/distilgpt2')
 
26
 
27
 
28
  // Since we will download the model from the Hugging Face Hub, we can skip the local model check
29
+ // env.allowLocalModels = false;
30
 
31
  ///////// VARIABLES
32
 
 
171
 
172
  ///// MODEL STUFF
173
 
174
+ // async function runModel(PREPROMPT, PROMPT){
175
+ // // // Chat completion API
176
 
177
+ // let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
178
 
179
+ // // 'meta-llama/Meta-Llama-3-70B-Instruct'
180
+ // // 'openai-community/gpt2'
181
+ // // 'Xenova/gpt-3.5-turbo'
182
+ // // , 'Xenova/distilgpt2'
183
+ // // 'mistralai/Mistral-7B-Instruct-v0.2'
184
+ // // 'HuggingFaceH4/zephyr-7b-beta'
185
 
186
+ // // pipeline/transformers version
187
+ // let pipe = await pipeline('text-generation', MODELNAME);
188
+ // // seems to work with default model distilgpt2 ugh
189
 
190
 
191
+ // // let out = await pipe(inputText, {
192
+ // // max_tokens: 250,
193
+ // // return_full_text: false
194
+ // // // repetition_penalty: 1.5,
195
+ // // // num_return_sequences: 1 //must be 1 for greedy search
196
+ // // })
197
+
198
+ // // let inputText = PREPROMPT + PROMPT
199
+
200
+ // // let out = await pipe(inputText)
201
+
202
+ // let out = await pipe({
203
+ // messages: [{
204
+ // role: "system",
205
+ // content: PREPROMPT
206
+ // },{
207
+ // role: "user",
208
+ // content: PROMPT
209
+ // }],
210
+ // max_new_tokens: 100
211
+ // });
212
 
213
+ // console.log(out)
214
 
215
+ // var modelResult = await out[0].generated_text
216
+ // console.log(modelResult)
217
 
218
+ // return modelResult
 
 
219
 
220
+ // }
221
 
222
+ async function runModel(PREPROMPT, PROMPT){
223
  // inference API version, not working in spaces
 
 
 
 
 
224
 
225
+ // let MODELNAME = 'mistralai/Mistral-Nemo-Instruct-2407'
226
+ let MODELNAME = "mistralai/Mistral-7B-Instruct-v0.2"
227
+
228
+ let out = await inference.chatCompletion({
229
+ model: MODELNAME,
230
+ messages: [{
231
+ role: "system",
232
+ content: PREPROMPT
233
+ },{
234
+ role: "user",
235
+ content: PROMPT
236
+ }],
237
+ max_tokens: 100
238
+ });
239
 
240
+ console.log(out)
241
 
242
+ // modelResult = await out.messages[0].content
 
 
243
 
244
+ var modelResult = await out.choices[0].message.content
245
+ // var modelResult = await out[0].generated_text
246
+ console.log(modelResult);
247
 
248
+ return modelResult
249
+ }
250
 
251
 
252
  //inference.fill_mask({