Spaces:
Running
on
Zero
Running
on
Zero
mjavaid
commited on
Commit
·
642f587
1
Parent(s):
964cebe
first commit
Browse files
app.py
CHANGED
@@ -2,6 +2,9 @@ import spaces
|
|
2 |
import gradio as gr
|
3 |
from transformers import pipeline
|
4 |
import torch
|
|
|
|
|
|
|
5 |
|
6 |
# Load the Gemma 3 pipeline.
|
7 |
# Gemma 3 is a multimodal model that accepts text and image inputs.
|
@@ -9,7 +12,8 @@ pipe = pipeline(
|
|
9 |
"image-text-to-text",
|
10 |
model="google/gemma-3-4b-it",
|
11 |
device="cuda" if torch.cuda.is_available() else "cpu",
|
12 |
-
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else None
|
|
|
13 |
)
|
14 |
@spaces.GPU
|
15 |
def generate_response(user_text, user_image, history):
|
|
|
2 |
import gradio as gr
|
3 |
from transformers import pipeline
|
4 |
import torch
|
5 |
+
import os
|
6 |
+
|
7 |
+
hf_token = os.environ["HF_TOKEN"]
|
8 |
|
9 |
# Load the Gemma 3 pipeline.
|
10 |
# Gemma 3 is a multimodal model that accepts text and image inputs.
|
|
|
12 |
"image-text-to-text",
|
13 |
model="google/gemma-3-4b-it",
|
14 |
device="cuda" if torch.cuda.is_available() else "cpu",
|
15 |
+
torch_dtype=torch.bfloat16 if torch.cuda.is_available() else None,
|
16 |
+
use_auth_token=hf_token
|
17 |
)
|
18 |
@spaces.GPU
|
19 |
def generate_response(user_text, user_image, history):
|