File size: 452 Bytes
5ddae8e
 
 
 
48511d8
5ddae8e
48511d8
 
 
5ddae8e
 
 
48511d8
5ddae8e
48511d8
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#!/bin/bash

# Start llama-server in background
cd /llama.cpp/build
./bin/llama-server --host 0.0.0.0 --port 8081 --model /models/model.q8_0.gguf --ctx-size 32768 &

# Wait for llama-server to initialize
echo "Waiting for llama-server to start..."
until curl -s "http://localhost:8081/v1/models" >/dev/null; do
    sleep 1
done

echo "llama-server is ready."

# Start Go application (main service)
echo "Starting Go application..."
cd /app
exec ./main