Spaces:
Sleeping
Sleeping
Commit
Β·
766fb2f
1
Parent(s):
b40e24a
new changes
Browse files
.gitignore
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
hf_env/
|
|
|
|
|
|
1 |
+
hf_env/
|
2 |
+
Y
|
3 |
+
Y.pub
|
0.26.0
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Collecting accelerate
|
2 |
+
Using cached accelerate-1.5.2-py3-none-any.whl.metadata (19 kB)
|
3 |
+
Requirement already satisfied: numpy<3.0.0,>=1.17 in f:\gemma testing\gemma_env\lib\site-packages (from accelerate) (2.2.4)
|
4 |
+
Requirement already satisfied: packaging>=20.0 in f:\gemma testing\gemma_env\lib\site-packages (from accelerate) (24.2)
|
5 |
+
Requirement already satisfied: psutil in f:\gemma testing\gemma_env\lib\site-packages (from accelerate) (7.0.0)
|
6 |
+
Requirement already satisfied: pyyaml in f:\gemma testing\gemma_env\lib\site-packages (from accelerate) (6.0.2)
|
7 |
+
Requirement already satisfied: torch>=2.0.0 in f:\gemma testing\gemma_env\lib\site-packages (from accelerate) (2.6.0+cu118)
|
8 |
+
Requirement already satisfied: huggingface-hub>=0.21.0 in f:\gemma testing\gemma_env\lib\site-packages (from accelerate) (0.29.3)
|
9 |
+
Requirement already satisfied: safetensors>=0.4.3 in f:\gemma testing\gemma_env\lib\site-packages (from accelerate) (0.5.3)
|
10 |
+
Requirement already satisfied: filelock in f:\gemma testing\gemma_env\lib\site-packages (from huggingface-hub>=0.21.0->accelerate) (3.18.0)
|
11 |
+
Requirement already satisfied: fsspec>=2023.5.0 in f:\gemma testing\gemma_env\lib\site-packages (from huggingface-hub>=0.21.0->accelerate) (2025.3.0)
|
12 |
+
Requirement already satisfied: requests in f:\gemma testing\gemma_env\lib\site-packages (from huggingface-hub>=0.21.0->accelerate) (2.32.3)
|
13 |
+
Requirement already satisfied: tqdm>=4.42.1 in f:\gemma testing\gemma_env\lib\site-packages (from huggingface-hub>=0.21.0->accelerate) (4.67.1)
|
14 |
+
Requirement already satisfied: typing-extensions>=3.7.4.3 in f:\gemma testing\gemma_env\lib\site-packages (from huggingface-hub>=0.21.0->accelerate) (4.12.2)
|
15 |
+
Requirement already satisfied: networkx in f:\gemma testing\gemma_env\lib\site-packages (from torch>=2.0.0->accelerate) (3.3)
|
16 |
+
Requirement already satisfied: jinja2 in f:\gemma testing\gemma_env\lib\site-packages (from torch>=2.0.0->accelerate) (3.1.6)
|
17 |
+
Requirement already satisfied: setuptools in f:\gemma testing\gemma_env\lib\site-packages (from torch>=2.0.0->accelerate) (77.0.3)
|
18 |
+
Requirement already satisfied: sympy==1.13.1 in f:\gemma testing\gemma_env\lib\site-packages (from torch>=2.0.0->accelerate) (1.13.1)
|
19 |
+
Requirement already satisfied: mpmath<1.4,>=1.1.0 in f:\gemma testing\gemma_env\lib\site-packages (from sympy==1.13.1->torch>=2.0.0->accelerate) (1.3.0)
|
20 |
+
Requirement already satisfied: colorama in f:\gemma testing\gemma_env\lib\site-packages (from tqdm>=4.42.1->huggingface-hub>=0.21.0->accelerate) (0.4.6)
|
21 |
+
Requirement already satisfied: MarkupSafe>=2.0 in f:\gemma testing\gemma_env\lib\site-packages (from jinja2->torch>=2.0.0->accelerate) (3.0.2)
|
22 |
+
Requirement already satisfied: charset-normalizer<4,>=2 in f:\gemma testing\gemma_env\lib\site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (3.4.1)
|
23 |
+
Requirement already satisfied: idna<4,>=2.5 in f:\gemma testing\gemma_env\lib\site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (3.10)
|
24 |
+
Requirement already satisfied: urllib3<3,>=1.21.1 in f:\gemma testing\gemma_env\lib\site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (2.3.0)
|
25 |
+
Requirement already satisfied: certifi>=2017.4.17 in f:\gemma testing\gemma_env\lib\site-packages (from requests->huggingface-hub>=0.21.0->accelerate) (2025.1.31)
|
26 |
+
Using cached accelerate-1.5.2-py3-none-any.whl (345 kB)
|
27 |
+
Installing collected packages: accelerate
|
28 |
+
Successfully installed accelerate-1.5.2
|
app.ipynb
ADDED
@@ -0,0 +1,526 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"from transformers import AutoProcessor, Gemma3ForConditionalGeneration, TextIteratorStreamer\n",
|
10 |
+
"import torch"
|
11 |
+
]
|
12 |
+
},
|
13 |
+
{
|
14 |
+
"cell_type": "code",
|
15 |
+
"execution_count": null,
|
16 |
+
"metadata": {},
|
17 |
+
"outputs": [],
|
18 |
+
"source": [
|
19 |
+
"model_id = \"google/gemma-3-12b-it\""
|
20 |
+
]
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"cell_type": "code",
|
24 |
+
"execution_count": null,
|
25 |
+
"metadata": {
|
26 |
+
"scrolled": true
|
27 |
+
},
|
28 |
+
"outputs": [],
|
29 |
+
"source": [
|
30 |
+
"\n",
|
31 |
+
"\n",
|
32 |
+
"processor = AutoProcessor.from_pretrained(model_id, padding_side=\"left\")\n",
|
33 |
+
"model = Gemma3ForConditionalGeneration.from_pretrained(\n",
|
34 |
+
" model_id, device_map=\"auto\", torch_dtype=torch.bfloat16, attn_implementation=\"eager\"\n",
|
35 |
+
")"
|
36 |
+
]
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"cell_type": "code",
|
40 |
+
"execution_count": null,
|
41 |
+
"metadata": {},
|
42 |
+
"outputs": [],
|
43 |
+
"source": [
|
44 |
+
"from transformers import pipeline\n",
|
45 |
+
"import torch\n",
|
46 |
+
"\n",
|
47 |
+
"pipe = pipeline(\n",
|
48 |
+
" \"image-text-to-text\",\n",
|
49 |
+
" model=\"google/gemma-3-4b-it\",\n",
|
50 |
+
" device=\"cuda\",\n",
|
51 |
+
" torch_dtype=torch.bfloat16,\n",
|
52 |
+
" cache_dir=\"F:\\\\huggingface_cache\"\n",
|
53 |
+
")\n"
|
54 |
+
]
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"cell_type": "code",
|
58 |
+
"execution_count": null,
|
59 |
+
"metadata": {},
|
60 |
+
"outputs": [],
|
61 |
+
"source": [
|
62 |
+
"# pip install accelerate\n",
|
63 |
+
"print(\"Hi\")\n",
|
64 |
+
"from transformers import AutoProcessor, Gemma3ForConditionalGeneration\n",
|
65 |
+
"import requests\n",
|
66 |
+
"import torch\n",
|
67 |
+
"from PIL import Image\n",
|
68 |
+
"\n",
|
69 |
+
"print(\"Done\")\n",
|
70 |
+
"model_id = \"google/gemma-3-4b-it\""
|
71 |
+
]
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"cell_type": "code",
|
75 |
+
"execution_count": null,
|
76 |
+
"metadata": {
|
77 |
+
"scrolled": true
|
78 |
+
},
|
79 |
+
"outputs": [],
|
80 |
+
"source": [
|
81 |
+
"pip install bitsandbytes\n"
|
82 |
+
]
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"cell_type": "code",
|
86 |
+
"execution_count": 2,
|
87 |
+
"metadata": {},
|
88 |
+
"outputs": [
|
89 |
+
{
|
90 |
+
"name": "stdout",
|
91 |
+
"output_type": "stream",
|
92 |
+
"text": [
|
93 |
+
"Using device: cuda\n"
|
94 |
+
]
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"data": {
|
98 |
+
"application/vnd.jupyter.widget-view+json": {
|
99 |
+
"model_id": "70d5eaba90854abfb20ffc43970c226c",
|
100 |
+
"version_major": 2,
|
101 |
+
"version_minor": 0
|
102 |
+
},
|
103 |
+
"text/plain": [
|
104 |
+
"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]"
|
105 |
+
]
|
106 |
+
},
|
107 |
+
"metadata": {},
|
108 |
+
"output_type": "display_data"
|
109 |
+
},
|
110 |
+
{
|
111 |
+
"name": "stderr",
|
112 |
+
"output_type": "stream",
|
113 |
+
"text": [
|
114 |
+
"Some parameters are on the meta device because they were offloaded to the cpu.\n",
|
115 |
+
"Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.50, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.\n"
|
116 |
+
]
|
117 |
+
}
|
118 |
+
],
|
119 |
+
"source": [
|
120 |
+
"import torch\n",
|
121 |
+
"from transformers import Gemma3ForConditionalGeneration, AutoProcessor\n",
|
122 |
+
"from transformers import BitsAndBytesConfig\n",
|
123 |
+
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
|
124 |
+
"print(f\"Using device: {device}\")\n",
|
125 |
+
"\n",
|
126 |
+
"model_id = \"google/gemma-3-4b-it\"\n",
|
127 |
+
"\n",
|
128 |
+
"\n",
|
129 |
+
"quantization_config = BitsAndBytesConfig(load_in_8bit=True, # Load in 8-bit mode\n",
|
130 |
+
" llm_int8_enable_fp32_cpu_offload=True # Allow CPU offloading for layers that don't fit in VRAM\n",
|
131 |
+
")\n",
|
132 |
+
"# Load the model and move it to the correct device\n",
|
133 |
+
"model = Gemma3ForConditionalGeneration.from_pretrained(\n",
|
134 |
+
" model_id,\n",
|
135 |
+
" cache_dir=\"F:\\\\huggingface_cache\",\n",
|
136 |
+
" device_map=\"auto\", # Automatically assigns layers to available devices\n",
|
137 |
+
" quantization_config=quantization_config\n",
|
138 |
+
").eval()\n",
|
139 |
+
"\n",
|
140 |
+
"# Load the processor\n",
|
141 |
+
"processor = AutoProcessor.from_pretrained(model_id, cache_dir=\"F:\\\\huggingface_cache\")"
|
142 |
+
]
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"cell_type": "code",
|
146 |
+
"execution_count": 3,
|
147 |
+
"metadata": {},
|
148 |
+
"outputs": [],
|
149 |
+
"source": [
|
150 |
+
"messages = [\n",
|
151 |
+
" {\n",
|
152 |
+
" \"role\": \"system\",\n",
|
153 |
+
" \"content\": [{\"type\": \"text\", \"text\": \"You are a helpful assistant.\"}]\n",
|
154 |
+
" },\n",
|
155 |
+
" {\n",
|
156 |
+
" \"role\": \"user\",\n",
|
157 |
+
" \"content\": [\n",
|
158 |
+
" \n",
|
159 |
+
" {\"type\": \"text\", \"text\": \"Whats the color of sky?.\"}\n",
|
160 |
+
" ]\n",
|
161 |
+
" }\n",
|
162 |
+
"]"
|
163 |
+
]
|
164 |
+
},
|
165 |
+
{
|
166 |
+
"cell_type": "code",
|
167 |
+
"execution_count": 4,
|
168 |
+
"metadata": {},
|
169 |
+
"outputs": [],
|
170 |
+
"source": [
|
171 |
+
"inputs = processor.apply_chat_template(\n",
|
172 |
+
" messages, add_generation_prompt=True, tokenize=True,\n",
|
173 |
+
" return_dict=True, return_tensors=\"pt\"\n",
|
174 |
+
").to(model.device, dtype=torch.bfloat16)"
|
175 |
+
]
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"cell_type": "code",
|
179 |
+
"execution_count": 5,
|
180 |
+
"metadata": {},
|
181 |
+
"outputs": [],
|
182 |
+
"source": [
|
183 |
+
"input_len = inputs[\"input_ids\"].shape[-1]"
|
184 |
+
]
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"cell_type": "code",
|
188 |
+
"execution_count": 6,
|
189 |
+
"metadata": {},
|
190 |
+
"outputs": [
|
191 |
+
{
|
192 |
+
"name": "stderr",
|
193 |
+
"output_type": "stream",
|
194 |
+
"text": [
|
195 |
+
"F:\\HF\\gemma-examples\\hf_env\\Lib\\site-packages\\transformers\\generation\\configuration_utils.py:633: UserWarning: `do_sample` is set to `False`. However, `top_p` is set to `0.95` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `top_p`.\n",
|
196 |
+
" warnings.warn(\n",
|
197 |
+
"F:\\HF\\gemma-examples\\hf_env\\Lib\\site-packages\\transformers\\generation\\configuration_utils.py:650: UserWarning: `do_sample` is set to `False`. However, `top_k` is set to `64` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `top_k`.\n",
|
198 |
+
" warnings.warn(\n",
|
199 |
+
"\n",
|
200 |
+
"KeyboardInterrupt\n",
|
201 |
+
"\n"
|
202 |
+
]
|
203 |
+
}
|
204 |
+
],
|
205 |
+
"source": [
|
206 |
+
"with torch.inference_mode():\n",
|
207 |
+
" generation = model.generate(**inputs, max_new_tokens=100, do_sample=False)\n",
|
208 |
+
" generation = generation[0][input_len:]\n"
|
209 |
+
]
|
210 |
+
},
|
211 |
+
{
|
212 |
+
"cell_type": "code",
|
213 |
+
"execution_count": null,
|
214 |
+
"metadata": {},
|
215 |
+
"outputs": [],
|
216 |
+
"source": [
|
217 |
+
"\n",
|
218 |
+
"with torch.inference_mode():\n",
|
219 |
+
" generation = model.generate(**inputs, max_new_tokens=100, do_sample=False)\n",
|
220 |
+
" generation = generation[0][input_len:]\n",
|
221 |
+
"\n",
|
222 |
+
"decoded = processor.decode(generation, skip_special_tokens=True)\n",
|
223 |
+
"print(decoded)\n",
|
224 |
+
"\n",
|
225 |
+
"# **Overall Impression:** The image is a close-up shot of a vibrant garden scene, \n",
|
226 |
+
"# focusing on a cluster of pink cosmos flowers and a busy bumblebee. \n",
|
227 |
+
"# It has a slightly soft, natural feel, likely captured in daylight.\n"
|
228 |
+
]
|
229 |
+
},
|
230 |
+
{
|
231 |
+
"cell_type": "code",
|
232 |
+
"execution_count": null,
|
233 |
+
"metadata": {},
|
234 |
+
"outputs": [],
|
235 |
+
"source": []
|
236 |
+
},
|
237 |
+
{
|
238 |
+
"cell_type": "code",
|
239 |
+
"execution_count": null,
|
240 |
+
"metadata": {},
|
241 |
+
"outputs": [],
|
242 |
+
"source": [
|
243 |
+
"ssh-keygen -t ed25519 -C \"[email protected]\""
|
244 |
+
]
|
245 |
+
},
|
246 |
+
{
|
247 |
+
"cell_type": "code",
|
248 |
+
"execution_count": null,
|
249 |
+
"metadata": {},
|
250 |
+
"outputs": [],
|
251 |
+
"source": [
|
252 |
+
"access_token='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'"
|
253 |
+
]
|
254 |
+
},
|
255 |
+
{
|
256 |
+
"cell_type": "code",
|
257 |
+
"execution_count": null,
|
258 |
+
"metadata": {},
|
259 |
+
"outputs": [],
|
260 |
+
"source": [
|
261 |
+
"SHA256:5m1PnI3v38H/z5cecFQQKtSAveRgolJirSjhjixvkVs [email protected]"
|
262 |
+
]
|
263 |
+
},
|
264 |
+
{
|
265 |
+
"cell_type": "code",
|
266 |
+
"execution_count": null,
|
267 |
+
"metadata": {},
|
268 |
+
"outputs": [],
|
269 |
+
"source": []
|
270 |
+
},
|
271 |
+
{
|
272 |
+
"cell_type": "code",
|
273 |
+
"execution_count": null,
|
274 |
+
"metadata": {},
|
275 |
+
"outputs": [],
|
276 |
+
"source": []
|
277 |
+
},
|
278 |
+
{
|
279 |
+
"cell_type": "code",
|
280 |
+
"execution_count": null,
|
281 |
+
"metadata": {},
|
282 |
+
"outputs": [],
|
283 |
+
"source": []
|
284 |
+
},
|
285 |
+
{
|
286 |
+
"cell_type": "code",
|
287 |
+
"execution_count": null,
|
288 |
+
"metadata": {},
|
289 |
+
"outputs": [],
|
290 |
+
"source": []
|
291 |
+
},
|
292 |
+
{
|
293 |
+
"cell_type": "code",
|
294 |
+
"execution_count": null,
|
295 |
+
"metadata": {},
|
296 |
+
"outputs": [],
|
297 |
+
"source": []
|
298 |
+
},
|
299 |
+
{
|
300 |
+
"cell_type": "code",
|
301 |
+
"execution_count": null,
|
302 |
+
"metadata": {},
|
303 |
+
"outputs": [],
|
304 |
+
"source": []
|
305 |
+
},
|
306 |
+
{
|
307 |
+
"cell_type": "code",
|
308 |
+
"execution_count": null,
|
309 |
+
"metadata": {},
|
310 |
+
"outputs": [],
|
311 |
+
"source": []
|
312 |
+
},
|
313 |
+
{
|
314 |
+
"cell_type": "code",
|
315 |
+
"execution_count": null,
|
316 |
+
"metadata": {},
|
317 |
+
"outputs": [],
|
318 |
+
"source": []
|
319 |
+
},
|
320 |
+
{
|
321 |
+
"cell_type": "code",
|
322 |
+
"execution_count": null,
|
323 |
+
"metadata": {},
|
324 |
+
"outputs": [],
|
325 |
+
"source": [
|
326 |
+
"# Use a pipeline as a high-level helper\n",
|
327 |
+
"from transformers import pipeline\n",
|
328 |
+
"\n",
|
329 |
+
"messages = [\n",
|
330 |
+
" {\"role\": \"user\", \"content\": \"Who are you?\"},\n",
|
331 |
+
"]\n",
|
332 |
+
"pipe = pipeline(\"image-text-to-text\", model=\"google/gemma-3-4b-it\")\n",
|
333 |
+
"pipe(messages)"
|
334 |
+
]
|
335 |
+
},
|
336 |
+
{
|
337 |
+
"cell_type": "code",
|
338 |
+
"execution_count": 5,
|
339 |
+
"metadata": {},
|
340 |
+
"outputs": [],
|
341 |
+
"source": [
|
342 |
+
"# Model ID\n",
|
343 |
+
"model_id = \"gemma3:latest\"\n",
|
344 |
+
"\n",
|
345 |
+
"from ollama import chat\n",
|
346 |
+
"from ollama import ChatResponse\n"
|
347 |
+
]
|
348 |
+
},
|
349 |
+
{
|
350 |
+
"cell_type": "code",
|
351 |
+
"execution_count": 6,
|
352 |
+
"metadata": {},
|
353 |
+
"outputs": [
|
354 |
+
{
|
355 |
+
"name": "stdout",
|
356 |
+
"output_type": "stream",
|
357 |
+
"text": [
|
358 |
+
"That's a fantastic question, and one that's fascinated people for centuries! The short answer is: **it's due to a phenomenon called Rayleigh scattering.** Here's a more detailed explanation:\n",
|
359 |
+
"\n",
|
360 |
+
"**1. Sunlight and Colors:**\n",
|
361 |
+
"\n",
|
362 |
+
"* Sunlight appears white, but it's actually made up of *all* the colors of the rainbow β red, orange, yellow, green, blue, indigo, and violet. Think of a prism splitting light.\n",
|
363 |
+
"\n",
|
364 |
+
"**2. The Atmosphere and Molecules:**\n",
|
365 |
+
"\n",
|
366 |
+
"* The Earth's atmosphere is filled with tiny particles β mostly nitrogen and oxygen molecules.\n",
|
367 |
+
"* These molecules are much smaller than the wavelengths of visible light.\n",
|
368 |
+
"\n",
|
369 |
+
"**3. Rayleigh Scattering:**\n",
|
370 |
+
"\n",
|
371 |
+
"* When sunlight enters the atmosphere, it collides with these tiny air molecules.\n",
|
372 |
+
"* **Rayleigh scattering** is the scattering of electromagnetic radiation (like light) by particles of a much smaller wavelength.\n",
|
373 |
+
"* **Crucially, shorter wavelengths of light (blue and violet) are scattered *much* more strongly than longer wavelengths (red and orange).** This is because the amount of scattering is inversely proportional to the fourth power of the wavelength. (That's a fancy way of saying that blue light gets scattered *way* more intensely than red light.)\n",
|
374 |
+
"\n",
|
375 |
+
"**4. Why Blue Specifically?**\n",
|
376 |
+
"\n",
|
377 |
+
"* Violet light is scattered even *more* than blue light. However, there are a couple of reasons why we see a blue sky instead of a violet one:\n",
|
378 |
+
" * **Sunlight emits less violet light than blue light.**\n",
|
379 |
+
" * **Our eyes are more sensitive to blue light than violet light.**\n",
|
380 |
+
"\n",
|
381 |
+
"\n",
|
382 |
+
"**In simpler terms:**\n",
|
383 |
+
"\n",
|
384 |
+
"Imagine throwing a small ball (blue light) and a large ball (red light) at a bumpy surface (the atmosphere). The small ball is more likely to bounce off in all directions, while the large ball is more likely to continue straight on. Blue light is \"bounced around\" more in the atmosphere, spreading it across the sky.\n",
|
385 |
+
"\n",
|
386 |
+
"**Why sunsets are red/orange:**\n",
|
387 |
+
"\n",
|
388 |
+
"At sunset (and sunrise), the sunlight has to travel through *much* more of the atmosphere to reach our eyes. During this longer journey, most of the blue light has already been scattered away. The longer wavelengths β red and orange β are less affected and can make it through, giving us those beautiful sunset colors.\n",
|
389 |
+
"\n",
|
390 |
+
"\n",
|
391 |
+
"\n",
|
392 |
+
"**Resources for further learning:**\n",
|
393 |
+
"\n",
|
394 |
+
"* **NASA - Why is the sky blue?:** [https://science.nasa.gov/sky-facts/why-is-the-sky-blue/](https://science.nasa.gov/sky-facts/why-is-the-sky-blue/)\n",
|
395 |
+
"* **Wikipedia - Rayleigh scattering:** [https://en.wikipedia.org/wiki/Rayleigh_scattering](https://en.wikipedia.org/wiki/Rayleigh_scattering)\n",
|
396 |
+
"\n",
|
397 |
+
"\n",
|
398 |
+
"Do you want me to delve into any specific aspect of this explanation, such as:\n",
|
399 |
+
"\n",
|
400 |
+
"* The math behind Rayleigh scattering?\n",
|
401 |
+
"* How this affects other celestial phenomena?\n",
|
402 |
+
"That's a fantastic question, and one that's fascinated people for centuries! The short answer is: **it's due to a phenomenon called Rayleigh scattering.** Here's a more detailed explanation:\n",
|
403 |
+
"\n",
|
404 |
+
"**1. Sunlight and Colors:**\n",
|
405 |
+
"\n",
|
406 |
+
"* Sunlight appears white, but it's actually made up of *all* the colors of the rainbow β red, orange, yellow, green, blue, indigo, and violet. Think of a prism splitting light.\n",
|
407 |
+
"\n",
|
408 |
+
"**2. The Atmosphere and Molecules:**\n",
|
409 |
+
"\n",
|
410 |
+
"* The Earth's atmosphere is filled with tiny particles β mostly nitrogen and oxygen molecules.\n",
|
411 |
+
"* These molecules are much smaller than the wavelengths of visible light.\n",
|
412 |
+
"\n",
|
413 |
+
"**3. Rayleigh Scattering:**\n",
|
414 |
+
"\n",
|
415 |
+
"* When sunlight enters the atmosphere, it collides with these tiny air molecules.\n",
|
416 |
+
"* **Rayleigh scattering** is the scattering of electromagnetic radiation (like light) by particles of a much smaller wavelength.\n",
|
417 |
+
"* **Crucially, shorter wavelengths of light (blue and violet) are scattered *much* more strongly than longer wavelengths (red and orange).** This is because the amount of scattering is inversely proportional to the fourth power of the wavelength. (That's a fancy way of saying that blue light gets scattered *way* more intensely than red light.)\n",
|
418 |
+
"\n",
|
419 |
+
"**4. Why Blue Specifically?**\n",
|
420 |
+
"\n",
|
421 |
+
"* Violet light is scattered even *more* than blue light. However, there are a couple of reasons why we see a blue sky instead of a violet one:\n",
|
422 |
+
" * **Sunlight emits less violet light than blue light.**\n",
|
423 |
+
" * **Our eyes are more sensitive to blue light than violet light.**\n",
|
424 |
+
"\n",
|
425 |
+
"\n",
|
426 |
+
"**In simpler terms:**\n",
|
427 |
+
"\n",
|
428 |
+
"Imagine throwing a small ball (blue light) and a large ball (red light) at a bumpy surface (the atmosphere). The small ball is more likely to bounce off in all directions, while the large ball is more likely to continue straight on. Blue light is \"bounced around\" more in the atmosphere, spreading it across the sky.\n",
|
429 |
+
"\n",
|
430 |
+
"**Why sunsets are red/orange:**\n",
|
431 |
+
"\n",
|
432 |
+
"At sunset (and sunrise), the sunlight has to travel through *much* more of the atmosphere to reach our eyes. During this longer journey, most of the blue light has already been scattered away. The longer wavelengths β red and orange β are less affected and can make it through, giving us those beautiful sunset colors.\n",
|
433 |
+
"\n",
|
434 |
+
"\n",
|
435 |
+
"\n",
|
436 |
+
"**Resources for further learning:**\n",
|
437 |
+
"\n",
|
438 |
+
"* **NASA - Why is the sky blue?:** [https://science.nasa.gov/sky-facts/why-is-the-sky-blue/](https://science.nasa.gov/sky-facts/why-is-the-sky-blue/)\n",
|
439 |
+
"* **Wikipedia - Rayleigh scattering:** [https://en.wikipedia.org/wiki/Rayleigh_scattering](https://en.wikipedia.org/wiki/Rayleigh_scattering)\n",
|
440 |
+
"\n",
|
441 |
+
"\n",
|
442 |
+
"Do you want me to delve into any specific aspect of this explanation, such as:\n",
|
443 |
+
"\n",
|
444 |
+
"* The math behind Rayleigh scattering?\n",
|
445 |
+
"* How this affects other celestial phenomena?\n"
|
446 |
+
]
|
447 |
+
}
|
448 |
+
],
|
449 |
+
"source": [
|
450 |
+
"\n",
|
451 |
+
"response: ChatResponse = chat(model=model_id, messages=[\n",
|
452 |
+
" {\n",
|
453 |
+
" 'role': 'user',\n",
|
454 |
+
" 'content': 'Why is the sky blue?',\n",
|
455 |
+
" },\n",
|
456 |
+
"])\n",
|
457 |
+
"print(response['message']['content'])\n",
|
458 |
+
"# or access fields directly from the response object\n",
|
459 |
+
"print(response.message.content)\n"
|
460 |
+
]
|
461 |
+
},
|
462 |
+
{
|
463 |
+
"cell_type": "code",
|
464 |
+
"execution_count": null,
|
465 |
+
"metadata": {},
|
466 |
+
"outputs": [],
|
467 |
+
"source": []
|
468 |
+
},
|
469 |
+
{
|
470 |
+
"cell_type": "code",
|
471 |
+
"execution_count": null,
|
472 |
+
"metadata": {},
|
473 |
+
"outputs": [],
|
474 |
+
"source": []
|
475 |
+
},
|
476 |
+
{
|
477 |
+
"cell_type": "code",
|
478 |
+
"execution_count": null,
|
479 |
+
"metadata": {},
|
480 |
+
"outputs": [],
|
481 |
+
"source": []
|
482 |
+
},
|
483 |
+
{
|
484 |
+
"cell_type": "code",
|
485 |
+
"execution_count": null,
|
486 |
+
"metadata": {},
|
487 |
+
"outputs": [],
|
488 |
+
"source": []
|
489 |
+
},
|
490 |
+
{
|
491 |
+
"cell_type": "code",
|
492 |
+
"execution_count": null,
|
493 |
+
"metadata": {},
|
494 |
+
"outputs": [],
|
495 |
+
"source": []
|
496 |
+
},
|
497 |
+
{
|
498 |
+
"cell_type": "code",
|
499 |
+
"execution_count": null,
|
500 |
+
"metadata": {},
|
501 |
+
"outputs": [],
|
502 |
+
"source": []
|
503 |
+
}
|
504 |
+
],
|
505 |
+
"metadata": {
|
506 |
+
"kernelspec": {
|
507 |
+
"display_name": "hf_env",
|
508 |
+
"language": "python",
|
509 |
+
"name": "python3"
|
510 |
+
},
|
511 |
+
"language_info": {
|
512 |
+
"codemirror_mode": {
|
513 |
+
"name": "ipython",
|
514 |
+
"version": 3
|
515 |
+
},
|
516 |
+
"file_extension": ".py",
|
517 |
+
"mimetype": "text/x-python",
|
518 |
+
"name": "python",
|
519 |
+
"nbconvert_exporter": "python",
|
520 |
+
"pygments_lexer": "ipython3",
|
521 |
+
"version": "3.12.6"
|
522 |
+
}
|
523 |
+
},
|
524 |
+
"nbformat": 4,
|
525 |
+
"nbformat_minor": 4
|
526 |
+
}
|
app2.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
import requests
|
3 |
+
import torch
|
4 |
+
import os
|
5 |
+
# Set the cache directory
|
6 |
+
cache_dir = "F:\\huggingface_cache"
|
7 |
+
|
8 |
+
# Set environment variables for good measure
|
9 |
+
# os.environ["TRANSFORMERS_CACHE"] = cache_dir
|
10 |
+
# os.environ["HF_HOME"] = cache_dir
|
11 |
+
# os.environ["HUGGINGFACE_HUB_CACHE"] = cache_dir
|
12 |
+
|
13 |
+
# Model ID
|
14 |
+
model_id = "google/gemma-3-4b-it"
|
15 |
+
|
16 |
+
from ollama import chat
|
17 |
+
from ollama import ChatResponse
|
18 |
+
|
19 |
+
response: ChatResponse = chat(model='llama3.2', messages=[
|
20 |
+
{
|
21 |
+
'role': 'user',
|
22 |
+
'content': 'Why is the sky blue?',
|
23 |
+
},
|
24 |
+
])
|
25 |
+
print(response['message']['content'])
|
26 |
+
# or access fields directly from the response object
|
27 |
+
print(response.message.content)
|