Updated warning for gated models
Browse files
app.py
CHANGED
@@ -114,7 +114,7 @@ def _run(path,genes,N,progress_bar):
|
|
114 |
return None
|
115 |
except OSError as e:
|
116 |
print(f"Error loading model '{path}': {e}")
|
117 |
-
gr.Warning("Model doesn't seem to exist on the HuggingFace Hub. Please check the model name and try again.")
|
118 |
return None
|
119 |
except RuntimeError as e:
|
120 |
if 'out of memory' in str(e):
|
@@ -143,11 +143,11 @@ def run(path,progress_bar):
|
|
143 |
try:
|
144 |
# Download the model to cache
|
145 |
if download_llm_to_cache(path) is None:
|
146 |
-
gr.Warning("Model not found on Hugging Face Hub. Please check the model name and try again.")
|
147 |
return None
|
148 |
except OSError as e:
|
149 |
print(f"Error downloading model: {e}")
|
150 |
-
gr.Warning("Model not found on Hugging Face Hub. Please check the model name and try again.")
|
151 |
return None
|
152 |
|
153 |
# Load the model
|
|
|
114 |
return None
|
115 |
except OSError as e:
|
116 |
print(f"Error loading model '{path}': {e}")
|
117 |
+
gr.Warning("Model doesn't seem to exist on the HuggingFace Hub or might be gated. Please check the model name and try again.")
|
118 |
return None
|
119 |
except RuntimeError as e:
|
120 |
if 'out of memory' in str(e):
|
|
|
143 |
try:
|
144 |
# Download the model to cache
|
145 |
if download_llm_to_cache(path) is None:
|
146 |
+
gr.Warning("Model not found on Hugging Face Hub or might be gated. Please check the model name and try again.")
|
147 |
return None
|
148 |
except OSError as e:
|
149 |
print(f"Error downloading model: {e}")
|
150 |
+
gr.Warning("Model not found on Hugging Face Hub or might be gated. Please check the model name and try again.")
|
151 |
return None
|
152 |
|
153 |
# Load the model
|