Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,21 +3,29 @@ import requests
|
|
3 |
import streamlit as st
|
4 |
from llama_cpp import Llama
|
5 |
|
6 |
-
# β
Define model path
|
7 |
MODEL_PATH = "./Phi-3-mini-4k-instruct-q4.gguf"
|
8 |
MODEL_URL = "https://huggingface.co/YourModelRepo/Phi-3-mini-4k-instruct-q4.gguf"
|
9 |
|
|
|
|
|
|
|
|
|
10 |
# β
Check if model exists, otherwise download
|
11 |
if not os.path.exists(MODEL_PATH):
|
12 |
st.info("Downloading the model file. Please wait...")
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
21 |
if "model" not in st.session_state:
|
22 |
st.session_state["model"] = Llama(model_path=MODEL_PATH, n_ctx=4096)
|
23 |
|
@@ -54,7 +62,8 @@ if st.button("Send") and user_input:
|
|
54 |
st.session_state["messages"].append(("assistant", response))
|
55 |
st.chat_message("assistant").write(response)
|
56 |
|
57 |
-
# Run the app with: streamlit run
|
|
|
58 |
|
59 |
|
60 |
|
|
|
3 |
import streamlit as st
|
4 |
from llama_cpp import Llama
|
5 |
|
6 |
+
# β
Define model path
|
7 |
MODEL_PATH = "./Phi-3-mini-4k-instruct-q4.gguf"
|
8 |
MODEL_URL = "https://huggingface.co/YourModelRepo/Phi-3-mini-4k-instruct-q4.gguf"
|
9 |
|
10 |
+
# β
Get Hugging Face API token from environment variable
|
11 |
+
HF_TOKEN = os.getenv("HF_TOKEN") # Set this securely in your Hugging Face Space
|
12 |
+
HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"} if HF_TOKEN else {}
|
13 |
+
|
14 |
# β
Check if model exists, otherwise download
|
15 |
if not os.path.exists(MODEL_PATH):
|
16 |
st.info("Downloading the model file. Please wait...")
|
17 |
+
try:
|
18 |
+
with requests.get(MODEL_URL, headers=HEADERS, stream=True) as response:
|
19 |
+
response.raise_for_status() # Stops the script if download fails
|
20 |
+
with open(MODEL_PATH, "wb") as f:
|
21 |
+
for chunk in response.iter_content(chunk_size=8192):
|
22 |
+
f.write(chunk)
|
23 |
+
st.success("Model downloaded successfully!")
|
24 |
+
except requests.exceptions.HTTPError as e:
|
25 |
+
st.error(f"π¨ Model download failed: {e}")
|
26 |
+
st.stop()
|
27 |
+
|
28 |
+
# β
Load model
|
29 |
if "model" not in st.session_state:
|
30 |
st.session_state["model"] = Llama(model_path=MODEL_PATH, n_ctx=4096)
|
31 |
|
|
|
62 |
st.session_state["messages"].append(("assistant", response))
|
63 |
st.chat_message("assistant").write(response)
|
64 |
|
65 |
+
# Run the app with: streamlit run app.py
|
66 |
+
|
67 |
|
68 |
|
69 |
|