Update app.py
Browse files
app.py
CHANGED
@@ -20,22 +20,8 @@ tokenizer = AutoTokenizer.from_pretrained(llm_model)
|
|
20 |
#import numpy as np
|
21 |
|
22 |
datasetiter = load_dataset("Namitg02/Test", split='train', streaming=False)
|
23 |
-
#dataset = np.array(list(datasetiter))
|
24 |
-
|
25 |
-
#dataset = np.dataset(np.array(list(datasetiter)))
|
26 |
dataset = to_map_style_dataset(datasetiter)
|
27 |
|
28 |
-
llm_model = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
29 |
-
tokenizer = AutoTokenizer.from_pretrained(llm_model)
|
30 |
-
# pulling tokeinzer for text generation model
|
31 |
-
|
32 |
-
import numpy as np
|
33 |
-
|
34 |
-
datasetiter = load_dataset("Namitg02/Test", split='train', streaming=False)
|
35 |
-
#dataset = np.array(list(datasetiter))
|
36 |
-
|
37 |
-
dataset = np.dataset(np.array(list(datasetiter)))
|
38 |
-
|
39 |
|
40 |
#dataset = load_dataset("not-lain/wikipedia",revision = "embedded")
|
41 |
#dataset = load_dataset("epfl-llm/guidelines", split='train')
|
|
|
20 |
#import numpy as np
|
21 |
|
22 |
datasetiter = load_dataset("Namitg02/Test", split='train', streaming=False)
|
|
|
|
|
|
|
23 |
dataset = to_map_style_dataset(datasetiter)
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
#dataset = load_dataset("not-lain/wikipedia",revision = "embedded")
|
27 |
#dataset = load_dataset("epfl-llm/guidelines", split='train')
|