#!/bin/bash # Start the Ollama server in the background ollama serve & # Wait for the server to be ready sleep 5 # Pull the necessary models #ollama pull deepseek-r1:1.5b #ollama pull llama3.2:1b ollama pull granite-embedding ollama pull nomic-embed-text ollama pull granite3-moe ollama pull granite3-moe:3b # Keep the container running tail -f /dev/null