Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -218,20 +218,23 @@ class RAGPipeline:
|
|
218 |
self.initialize_model()
|
219 |
|
220 |
|
221 |
-
|
222 |
-
def initialize_model(
|
223 |
-
"""Initialize the model with proper error handling and verification
|
|
|
|
|
|
|
224 |
try:
|
225 |
-
if not os.path.exists(
|
226 |
direct_url = "https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_K_M.gguf"
|
227 |
-
download_file_with_progress(direct_url,
|
228 |
|
229 |
# Verify file exists and has content
|
230 |
-
if not os.path.exists(
|
231 |
-
raise FileNotFoundError(f"Model file {
|
232 |
|
233 |
-
if os.path.getsize(
|
234 |
-
os.remove(
|
235 |
raise ValueError("Downloaded model file is too small, likely corrupted")
|
236 |
|
237 |
llm_config = {
|
@@ -242,13 +245,12 @@ class RAGPipeline:
|
|
242 |
"verbose": False
|
243 |
}
|
244 |
|
245 |
-
|
246 |
st.success("Model loaded successfully!")
|
247 |
|
248 |
except Exception as e:
|
249 |
st.error(f"Error initializing model: {str(e)}")
|
250 |
raise
|
251 |
-
|
252 |
|
253 |
@log_function
|
254 |
@st.cache_data
|
|
|
218 |
self.initialize_model()
|
219 |
|
220 |
|
221 |
+
st.cache_resource
|
222 |
+
def initialize_model(_self):
|
223 |
+
"""Initialize the model with proper error handling and verification
|
224 |
+
|
225 |
+
Note: Using _self instead of self for Streamlit caching compatibility
|
226 |
+
"""
|
227 |
try:
|
228 |
+
if not os.path.exists(_self.model_path):
|
229 |
direct_url = "https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_K_M.gguf"
|
230 |
+
download_file_with_progress(direct_url, _self.model_path)
|
231 |
|
232 |
# Verify file exists and has content
|
233 |
+
if not os.path.exists(_self.model_path):
|
234 |
+
raise FileNotFoundError(f"Model file {_self.model_path} not found after download attempts")
|
235 |
|
236 |
+
if os.path.getsize(_self.model_path) < 1000000: # Less than 1MB
|
237 |
+
os.remove(_self.model_path)
|
238 |
raise ValueError("Downloaded model file is too small, likely corrupted")
|
239 |
|
240 |
llm_config = {
|
|
|
245 |
"verbose": False
|
246 |
}
|
247 |
|
248 |
+
_self.llm = Llama(model_path=_self.model_path, **llm_config)
|
249 |
st.success("Model loaded successfully!")
|
250 |
|
251 |
except Exception as e:
|
252 |
st.error(f"Error initializing model: {str(e)}")
|
253 |
raise
|
|
|
254 |
|
255 |
@log_function
|
256 |
@st.cache_data
|