{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from transformers import AutoProcessor, Gemma3ForConditionalGeneration, TextIteratorStreamer\n", "import torch" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "model_id = \"google/gemma-3-12b-it\"" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": true }, "outputs": [], "source": [ "\n", "\n", "processor = AutoProcessor.from_pretrained(model_id, padding_side=\"left\")\n", "model = Gemma3ForConditionalGeneration.from_pretrained(\n", " model_id, device_map=\"auto\", torch_dtype=torch.bfloat16, attn_implementation=\"eager\"\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from transformers import pipeline\n", "import torch\n", "\n", "pipe = pipeline(\n", " \"image-text-to-text\",\n", " model=\"google/gemma-3-4b-it\",\n", " device=\"cuda\",\n", " torch_dtype=torch.bfloat16,\n", " cache_dir=\"F:\\\\huggingface_cache\"\n", ")\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# pip install accelerate\n", "print(\"Hi\")\n", "from transformers import AutoProcessor, Gemma3ForConditionalGeneration\n", "import requests\n", "import torch\n", "from PIL import Image\n", "\n", "print(\"Done\")\n", "model_id = \"google/gemma-3-4b-it\"" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": true }, "outputs": [], "source": [ "pip install bitsandbytes\n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Using device: cuda\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "70d5eaba90854abfb20ffc43970c226c", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Loading checkpoint shards: 0%| | 0/2 [00:00