deepseek-r1 / start.sh
Mahadih534's picture
Update start.sh
991933e verified
raw
history blame contribute delete
812 Bytes
#!/bin/bash
# Set environment variables for optimization
export OMP_NUM_THREADS=4
export MKL_NUM_THREADS=4
export CUDA_VISIBLE_DEVICES=0
# Start Ollama in the background
ollama serve &
# Pull the model if not already present
if ! ollama list | grep -q "tinyllama"; then
ollama pull tinyllama
fi
# Wait for Ollama to start up
max_attempts=30
attempt=0
while ! curl -s http://localhost:11434/api/tags >/dev/null; do
sleep 1
attempt=$((attempt + 1))
if [ $attempt -eq $max_attempts ]; then
echo "Ollama failed to start within 30 seconds. Exiting."
exit 1
fi
done
echo "Ollama is ready."
# Print the API URL
echo "API is running on: http://0.0.0.0:7860"
ollama pull smollm:135m
ollama pull deepseek-r1:1.5b
# Start the FastAPI server
streamlit run app.py --server.port 7860