Spaces:
Sleeping
Sleeping
Commit
·
697b1f4
1
Parent(s):
912015d
transformers fix?
Browse files- app.py +1 -1
- requirements.txt +1 -1
app.py
CHANGED
@@ -53,7 +53,7 @@ def load_model():
|
|
53 |
device_map="auto",
|
54 |
torch_dtype=torch.float16,
|
55 |
# quantization_config=quantization_config,
|
56 |
-
# attn_implementation="flash_attention_2",
|
57 |
trust_remote_code = True
|
58 |
)
|
59 |
# model.to("cpu")
|
|
|
53 |
device_map="auto",
|
54 |
torch_dtype=torch.float16,
|
55 |
# quantization_config=quantization_config,
|
56 |
+
# attn_implementation="flash_attention_2",
|
57 |
trust_remote_code = True
|
58 |
)
|
59 |
# model.to("cpu")
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
streamlit
|
2 |
-
transformers
|
3 |
torch
|
4 |
accelerate
|
|
|
1 |
streamlit
|
2 |
+
git+https://github.com/huggingface/transformers.git
|
3 |
torch
|
4 |
accelerate
|