Upload 12 files
Browse files- app.py +24 -0
- quantization_model.ipynb +202 -0
- quantization_model.zip +3 -0
- quantization_model/config.json +54 -0
- quantization_model/generation_config.json +6 -0
- quantization_model/merges.txt +0 -0
- quantization_model/model.safetensors +3 -0
- quantization_model/special_tokens_map.json +5 -0
- quantization_model/tokenizer.json +0 -0
- quantization_model/tokenizer_config.json +20 -0
- quantization_model/vocab.json +0 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer,pipeline
|
3 |
+
import torch
|
4 |
+
|
5 |
+
st.title("quantization_Generator Fine tunning model")
|
6 |
+
|
7 |
+
# Load model and tokenizer
|
8 |
+
model_dir = "quantization_model"
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained(model_dir)
|
10 |
+
model = AutoModelForCausalLM.from_pretrained(model_dir)
|
11 |
+
|
12 |
+
|
13 |
+
code_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
inputs_text=st.text_input("Please enter the text",value="def quicksort(arr):")
|
19 |
+
|
20 |
+
if st.button("submit"):
|
21 |
+
generated_code = code_generator(inputs_text, max_length=200, num_return_sequences=1)
|
22 |
+
|
23 |
+
st.write(generated_code[0]["generated_text"])
|
24 |
+
|
quantization_model.ipynb
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"id": "b7c4923e-d406-4a06-a847-b068a1f8154a",
|
7 |
+
"metadata": {},
|
8 |
+
"outputs": [
|
9 |
+
{
|
10 |
+
"name": "stdout",
|
11 |
+
"output_type": "stream",
|
12 |
+
"text": [
|
13 |
+
"Requirement already satisfied: U in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (1.0)\n",
|
14 |
+
"Requirement already satisfied: bitsandbytes in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (0.45.3)\n",
|
15 |
+
"Requirement already satisfied: revel>=0.9.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from U) (0.9.1)\n",
|
16 |
+
"Requirement already satisfied: typing-extensions in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from U) (4.12.2)\n",
|
17 |
+
"Requirement already satisfied: torch<3,>=2.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from bitsandbytes) (2.2.1+cu121)\n",
|
18 |
+
"Requirement already satisfied: numpy>=1.17 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from bitsandbytes) (1.26.4)\n",
|
19 |
+
"Requirement already satisfied: blessed<2.0.0,>=1.19.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from revel>=0.9.0->U) (1.20.0)\n",
|
20 |
+
"Requirement already satisfied: colorama<0.5.0,>=0.4.6 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from revel>=0.9.0->U) (0.4.6)\n",
|
21 |
+
"Requirement already satisfied: readchar<5.0.0,>=4.0.3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from revel>=0.9.0->U) (4.2.1)\n",
|
22 |
+
"Requirement already satisfied: filelock in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (3.17.0)\n",
|
23 |
+
"Requirement already satisfied: sympy in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (1.13.3)\n",
|
24 |
+
"Requirement already satisfied: networkx in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (3.4.2)\n",
|
25 |
+
"Requirement already satisfied: jinja2 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (3.1.5)\n",
|
26 |
+
"Requirement already satisfied: fsspec in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (2024.12.0)\n",
|
27 |
+
"Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.1.105 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (12.1.105)\n",
|
28 |
+
"Requirement already satisfied: nvidia-cuda-runtime-cu12==12.1.105 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (12.1.105)\n",
|
29 |
+
"Requirement already satisfied: nvidia-cuda-cupti-cu12==12.1.105 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (12.1.105)\n",
|
30 |
+
"Requirement already satisfied: nvidia-cudnn-cu12==8.9.2.26 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (8.9.2.26)\n",
|
31 |
+
"Requirement already satisfied: nvidia-cublas-cu12==12.1.3.1 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (12.1.3.1)\n",
|
32 |
+
"Requirement already satisfied: nvidia-cufft-cu12==11.0.2.54 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (11.0.2.54)\n",
|
33 |
+
"Requirement already satisfied: nvidia-curand-cu12==10.3.2.106 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (10.3.2.106)\n",
|
34 |
+
"Requirement already satisfied: nvidia-cusolver-cu12==11.4.5.107 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (11.4.5.107)\n",
|
35 |
+
"Requirement already satisfied: nvidia-cusparse-cu12==12.1.0.106 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (12.1.0.106)\n",
|
36 |
+
"Requirement already satisfied: nvidia-nccl-cu12==2.19.3 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (2.19.3)\n",
|
37 |
+
"Requirement already satisfied: nvidia-nvtx-cu12==12.1.105 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (12.1.105)\n",
|
38 |
+
"Requirement already satisfied: triton==2.2.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from torch<3,>=2.0->bitsandbytes) (2.2.0)\n",
|
39 |
+
"Requirement already satisfied: nvidia-nvjitlink-cu12 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from nvidia-cusolver-cu12==11.4.5.107->torch<3,>=2.0->bitsandbytes) (12.8.61)\n",
|
40 |
+
"Requirement already satisfied: wcwidth>=0.1.4 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from blessed<2.0.0,>=1.19.1->revel>=0.9.0->U) (0.2.13)\n",
|
41 |
+
"Requirement already satisfied: six>=1.9.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from blessed<2.0.0,>=1.19.1->revel>=0.9.0->U) (1.17.0)\n",
|
42 |
+
"Requirement already satisfied: MarkupSafe>=2.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from jinja2->torch<3,>=2.0->bitsandbytes) (3.0.2)\n",
|
43 |
+
"Requirement already satisfied: mpmath<1.4,>=1.1.0 in /system/conda/miniconda3/envs/cloudspace/lib/python3.10/site-packages (from sympy->torch<3,>=2.0->bitsandbytes) (1.3.0)\n"
|
44 |
+
]
|
45 |
+
}
|
46 |
+
],
|
47 |
+
"source": [
|
48 |
+
"!pip install U bitsandbytes"
|
49 |
+
]
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"cell_type": "code",
|
53 |
+
"execution_count": 6,
|
54 |
+
"id": "27fd8559-b9a2-4c1f-bebd-eb19e7acac39",
|
55 |
+
"metadata": {},
|
56 |
+
"outputs": [
|
57 |
+
{
|
58 |
+
"name": "stderr",
|
59 |
+
"output_type": "stream",
|
60 |
+
"text": [
|
61 |
+
"The `load_in_4bit` and `load_in_8bit` arguments are deprecated and will be removed in the future versions. Please, pass a `BitsAndBytesConfig` object in `quantization_config` argument instead.\n"
|
62 |
+
]
|
63 |
+
}
|
64 |
+
],
|
65 |
+
"source": [
|
66 |
+
"from transformers import AutoModelForCausalLM, AutoTokenizer\n",
|
67 |
+
"import torch\n",
|
68 |
+
"\n",
|
69 |
+
"model_name = \"gpt2\"\n",
|
70 |
+
"\n",
|
71 |
+
"model = AutoModelForCausalLM.from_pretrained(\n",
|
72 |
+
" model_name,\n",
|
73 |
+
" load_in_4bit=True, # Enable 4-bit quantization\n",
|
74 |
+
" device_map=\"auto\"\n",
|
75 |
+
")\n",
|
76 |
+
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n"
|
77 |
+
]
|
78 |
+
},
|
79 |
+
{
|
80 |
+
"cell_type": "code",
|
81 |
+
"execution_count": 8,
|
82 |
+
"id": "16061d86-4874-4f90-98b6-890ac640fde8",
|
83 |
+
"metadata": {},
|
84 |
+
"outputs": [
|
85 |
+
{
|
86 |
+
"name": "stderr",
|
87 |
+
"output_type": "stream",
|
88 |
+
"text": [
|
89 |
+
"Device set to use cuda:0\n",
|
90 |
+
"Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
|
91 |
+
]
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"name": "stdout",
|
95 |
+
"output_type": "stream",
|
96 |
+
"text": [
|
97 |
+
"how is the time required to make a long shot out of it when you have no idea when the money may be better spent on a small percentage of it which has to be purchased or sold without making a mistake?\n",
|
98 |
+
"\n",
|
99 |
+
"How should we know if a purchase has a minimum amount of effort and which is the right amount? This is a good place to start a review.\n",
|
100 |
+
"\n",
|
101 |
+
"One simple example of a money buyer looking to spend an extra $1 is when you can find a used car on eBay. It is likely the best starting point to consider as the one for the car you are buying for it at the time you get your check – and maybe even your retirement check even before you pay the $1,000 purchase deposit on it for your car. You may need to carefully evaluate your car before you buy it; check before you buy something for what you may want for your next purchase.\n",
|
102 |
+
"\n",
|
103 |
+
"Another way to look at it is when you sell it at a value around $\n"
|
104 |
+
]
|
105 |
+
}
|
106 |
+
],
|
107 |
+
"source": [
|
108 |
+
"from transformers import pipeline\n",
|
109 |
+
"\n",
|
110 |
+
"code_generator = pipeline(\"text-generation\", model=model, tokenizer=tokenizer)\n",
|
111 |
+
"\n",
|
112 |
+
"prompt = \"how is the time\"\n",
|
113 |
+
"generated_code = code_generator(prompt, max_length=200, num_return_sequences=1)\n",
|
114 |
+
"\n",
|
115 |
+
"print(generated_code[0][\"generated_text\"])"
|
116 |
+
]
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"cell_type": "code",
|
120 |
+
"execution_count": 9,
|
121 |
+
"id": "f8cf6fc1-8fd9-4f68-b9c5-4b58e3f57293",
|
122 |
+
"metadata": {},
|
123 |
+
"outputs": [
|
124 |
+
{
|
125 |
+
"data": {
|
126 |
+
"text/plain": [
|
127 |
+
"('quantization_model/tokenizer_config.json',\n",
|
128 |
+
" 'quantization_model/special_tokens_map.json',\n",
|
129 |
+
" 'quantization_model/vocab.json',\n",
|
130 |
+
" 'quantization_model/merges.txt',\n",
|
131 |
+
" 'quantization_model/added_tokens.json',\n",
|
132 |
+
" 'quantization_model/tokenizer.json')"
|
133 |
+
]
|
134 |
+
},
|
135 |
+
"execution_count": 9,
|
136 |
+
"metadata": {},
|
137 |
+
"output_type": "execute_result"
|
138 |
+
}
|
139 |
+
],
|
140 |
+
"source": [
|
141 |
+
"model.save_pretrained(\"quantization_model\")\n",
|
142 |
+
"tokenizer.save_pretrained(\"quantization_model\")"
|
143 |
+
]
|
144 |
+
},
|
145 |
+
{
|
146 |
+
"cell_type": "code",
|
147 |
+
"execution_count": 10,
|
148 |
+
"id": "f3078dbe-a264-4e11-af3a-8e670ac6f7af",
|
149 |
+
"metadata": {},
|
150 |
+
"outputs": [
|
151 |
+
{
|
152 |
+
"name": "stdout",
|
153 |
+
"output_type": "stream",
|
154 |
+
"text": [
|
155 |
+
"Folder 'quantization_model' has been zipped as 'quantization_model.zip'.\n"
|
156 |
+
]
|
157 |
+
}
|
158 |
+
],
|
159 |
+
"source": [
|
160 |
+
"import shutil\n",
|
161 |
+
"\n",
|
162 |
+
"# Specify the folder to be zipped\n",
|
163 |
+
"folder_path = \"quantization_model\" # Replace with your actual folder name\n",
|
164 |
+
"zip_name = \"quantization_model.zip\" # Desired zip file name\n",
|
165 |
+
"\n",
|
166 |
+
"# Create a zip archive\n",
|
167 |
+
"shutil.make_archive(zip_name.replace('.zip', ''), 'zip', folder_path)\n",
|
168 |
+
"\n",
|
169 |
+
"print(f\"Folder '{folder_path}' has been zipped as '{zip_name}'.\")"
|
170 |
+
]
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"cell_type": "code",
|
174 |
+
"execution_count": null,
|
175 |
+
"id": "f7f877b3-b7c3-4d68-a43f-21be464f3be8",
|
176 |
+
"metadata": {},
|
177 |
+
"outputs": [],
|
178 |
+
"source": []
|
179 |
+
}
|
180 |
+
],
|
181 |
+
"metadata": {
|
182 |
+
"kernelspec": {
|
183 |
+
"display_name": "Python 3",
|
184 |
+
"language": "python",
|
185 |
+
"name": "python3"
|
186 |
+
},
|
187 |
+
"language_info": {
|
188 |
+
"codemirror_mode": {
|
189 |
+
"name": "ipython",
|
190 |
+
"version": 3
|
191 |
+
},
|
192 |
+
"file_extension": ".py",
|
193 |
+
"mimetype": "text/x-python",
|
194 |
+
"name": "python",
|
195 |
+
"nbconvert_exporter": "python",
|
196 |
+
"pygments_lexer": "ipython3",
|
197 |
+
"version": "3.10.10"
|
198 |
+
}
|
199 |
+
},
|
200 |
+
"nbformat": 4,
|
201 |
+
"nbformat_minor": 5
|
202 |
+
}
|
quantization_model.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d10830fff9d9211bd03a1a808ac1b1e01c342a2f0379dd3924fde5c095d8cbdf
|
3 |
+
size 116323207
|
quantization_model/config.json
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"quantization_config": {
|
21 |
+
"_load_in_4bit": true,
|
22 |
+
"_load_in_8bit": false,
|
23 |
+
"bnb_4bit_compute_dtype": "float32",
|
24 |
+
"bnb_4bit_quant_storage": "uint8",
|
25 |
+
"bnb_4bit_quant_type": "fp4",
|
26 |
+
"bnb_4bit_use_double_quant": false,
|
27 |
+
"llm_int8_enable_fp32_cpu_offload": false,
|
28 |
+
"llm_int8_has_fp16_weight": false,
|
29 |
+
"llm_int8_skip_modules": null,
|
30 |
+
"llm_int8_threshold": 6.0,
|
31 |
+
"load_in_4bit": true,
|
32 |
+
"load_in_8bit": false,
|
33 |
+
"quant_method": "bitsandbytes"
|
34 |
+
},
|
35 |
+
"reorder_and_upcast_attn": false,
|
36 |
+
"resid_pdrop": 0.1,
|
37 |
+
"scale_attn_by_inverse_layer_idx": false,
|
38 |
+
"scale_attn_weights": true,
|
39 |
+
"summary_activation": null,
|
40 |
+
"summary_first_dropout": 0.1,
|
41 |
+
"summary_proj_to_labels": true,
|
42 |
+
"summary_type": "cls_index",
|
43 |
+
"summary_use_proj": true,
|
44 |
+
"task_specific_params": {
|
45 |
+
"text-generation": {
|
46 |
+
"do_sample": true,
|
47 |
+
"max_length": 50
|
48 |
+
}
|
49 |
+
},
|
50 |
+
"torch_dtype": "float16",
|
51 |
+
"transformers_version": "4.49.0",
|
52 |
+
"use_cache": true,
|
53 |
+
"vocab_size": 50257
|
54 |
+
}
|
quantization_model/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 50256,
|
4 |
+
"eos_token_id": 50256,
|
5 |
+
"transformers_version": "4.49.0"
|
6 |
+
}
|
quantization_model/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
quantization_model/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7fea19c66e89f08ca30f8c04f0ad1f4dc3f02ef5a6da601b6fcac98af3196e93
|
3 |
+
size 126824188
|
quantization_model/special_tokens_map.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<|endoftext|>",
|
3 |
+
"eos_token": "<|endoftext|>",
|
4 |
+
"unk_token": "<|endoftext|>"
|
5 |
+
}
|
quantization_model/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
quantization_model/tokenizer_config.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"50256": {
|
5 |
+
"content": "<|endoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
}
|
12 |
+
},
|
13 |
+
"bos_token": "<|endoftext|>",
|
14 |
+
"clean_up_tokenization_spaces": false,
|
15 |
+
"eos_token": "<|endoftext|>",
|
16 |
+
"extra_special_tokens": {},
|
17 |
+
"model_max_length": 1024,
|
18 |
+
"tokenizer_class": "GPT2Tokenizer",
|
19 |
+
"unk_token": "<|endoftext|>"
|
20 |
+
}
|
quantization_model/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
pandas==2.2.2
|
3 |
+
torch==2.5.1
|
4 |
+
transformers==4.48.3
|
5 |
+
streamlit==1.41.1
|