Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -24,8 +24,8 @@ huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
|
24 |
|
25 |
# Download gguf model files
|
26 |
hf_hub_download(
|
27 |
-
repo_id="
|
28 |
-
filename="
|
29 |
local_dir="./models",
|
30 |
)
|
31 |
hf_hub_download(
|
@@ -170,10 +170,10 @@ demo = gr.ChatInterface(
|
|
170 |
additional_inputs=[
|
171 |
gr.Dropdown(
|
172 |
choices=[
|
173 |
-
"
|
174 |
"google_gemma-3-1b-it-Q5_K_M.gguf",
|
175 |
],
|
176 |
-
value="
|
177 |
label="Model",
|
178 |
info="Select the AI model to use for chat",
|
179 |
),
|
|
|
24 |
|
25 |
# Download gguf model files
|
26 |
hf_hub_download(
|
27 |
+
repo_id="bartowski/google_gemma-3-1b-it-GGUF",
|
28 |
+
filename="google_gemma-3-1b-it-Q6_K.gguf",
|
29 |
local_dir="./models",
|
30 |
)
|
31 |
hf_hub_download(
|
|
|
170 |
additional_inputs=[
|
171 |
gr.Dropdown(
|
172 |
choices=[
|
173 |
+
"google_gemma-3-1b-it-Q6_K.gguf",
|
174 |
"google_gemma-3-1b-it-Q5_K_M.gguf",
|
175 |
],
|
176 |
+
value="google_gemma-3-1b-it-Q5_K_M.gguf",
|
177 |
label="Model",
|
178 |
info="Select the AI model to use for chat",
|
179 |
),
|