Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -8,18 +8,14 @@ from transformers import pipeline
|
|
8 |
|
9 |
# 사용 가능한 모델 목록
|
10 |
available_models = {
|
11 |
-
"Qwen/Qwen2-1.5B-Instruct": "Qwen 2 (1.5B)",
|
12 |
"Qwen/Qwen2.5-72B-Instruct": "Qwen 2.5 (72B)",
|
13 |
"Hermes-3-Llama-3.1-8B": "Hermes 3 Llama 3.1 (8B)",
|
14 |
"Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen 2.5 Coder (32B)",
|
15 |
-
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF": "Nvidia Nemotron (70B)",
|
16 |
-
"CohereForAI/c4ai-command-r-plus-08-2024": "Cohere Command R+ (08/2024)",
|
17 |
"mistralai/Mistral-Small-3.1-24B-Instruct-2503": "Mistral Small 3.1 (24B)",
|
18 |
"google/gemma-3-27b-it": "Google Gemma 3 (27B)",
|
19 |
"gemma-3-27b-it-abliterated": "Gemma 3 Abliterated (27B)",
|
20 |
"nvidia/Llama-3.1-Nemotron-Nano-8B-v1": "Nvidia Nemotron Nano (8B)",
|
21 |
"bartowski/mistralai_Mistral-Small-3.1-24B-Instruct-2503-GGUF": "Mistral Small GGUF (24B)",
|
22 |
-
"CohereForAI/c4ai-command-a-03-2025": "Cohere Command A (03/2025)",
|
23 |
"open-r1/OlympicCoder-32B": "Olympic Coder (32B)"
|
24 |
}
|
25 |
|
|
|
8 |
|
9 |
# 사용 가능한 모델 목록
|
10 |
available_models = {
|
|
|
11 |
"Qwen/Qwen2.5-72B-Instruct": "Qwen 2.5 (72B)",
|
12 |
"Hermes-3-Llama-3.1-8B": "Hermes 3 Llama 3.1 (8B)",
|
13 |
"Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen 2.5 Coder (32B)",
|
|
|
|
|
14 |
"mistralai/Mistral-Small-3.1-24B-Instruct-2503": "Mistral Small 3.1 (24B)",
|
15 |
"google/gemma-3-27b-it": "Google Gemma 3 (27B)",
|
16 |
"gemma-3-27b-it-abliterated": "Gemma 3 Abliterated (27B)",
|
17 |
"nvidia/Llama-3.1-Nemotron-Nano-8B-v1": "Nvidia Nemotron Nano (8B)",
|
18 |
"bartowski/mistralai_Mistral-Small-3.1-24B-Instruct-2503-GGUF": "Mistral Small GGUF (24B)",
|
|
|
19 |
"open-r1/OlympicCoder-32B": "Olympic Coder (32B)"
|
20 |
}
|
21 |
|