phi3
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
|
|
13 |
|
14 |
app = FastAPI()
|
15 |
|
16 |
-
MODEL_NAME = '
|
17 |
|
18 |
@lru_cache()
|
19 |
def get_llm():
|
|
|
13 |
|
14 |
app = FastAPI()
|
15 |
|
16 |
+
MODEL_NAME = 'phi3:mini'
|
17 |
|
18 |
@lru_cache()
|
19 |
def get_llm():
|
start.sh
CHANGED
@@ -9,8 +9,8 @@ export CUDA_VISIBLE_DEVICES=0 # Use the first GPU if available
|
|
9 |
ollama serve & # Use GPU 0 if available
|
10 |
|
11 |
# Pull the model if not already present
|
12 |
-
if ! ollama list | grep -q "
|
13 |
-
ollama pull
|
14 |
fi
|
15 |
|
16 |
# Wait for Ollama to start up (use a more robust check)
|
|
|
9 |
ollama serve & # Use GPU 0 if available
|
10 |
|
11 |
# Pull the model if not already present
|
12 |
+
if ! ollama list | grep -q "phi3:mini"; then
|
13 |
+
ollama pull phi3:mini
|
14 |
fi
|
15 |
|
16 |
# Wait for Ollama to start up (use a more robust check)
|